hexsha
stringlengths 40
40
| size
int64 2
1.02M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
245
| max_stars_repo_name
stringlengths 6
130
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
245
| max_issues_repo_name
stringlengths 6
130
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
245
| max_forks_repo_name
stringlengths 6
130
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 2
1.02M
| avg_line_length
float64 1
417k
| max_line_length
int64 1
987k
| alphanum_fraction
float64 0
1
| content_no_comment
stringlengths 0
1.01M
| is_comment_constant_removed
bool 1
class | is_sharp_comment_removed
bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
1c424d2b0340b27ca4ef99b29114c2e41034020c
| 6,816
|
py
|
Python
|
scipy/linalg/decomp_lu.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 366
|
2019-04-07T20:34:48.000Z
|
2022-03-29T07:35:38.000Z
|
scipy/linalg/decomp_lu.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 26
|
2020-03-24T18:07:06.000Z
|
2022-03-12T00:12:27.000Z
|
scipy/linalg/decomp_lu.py
|
magnusja/scipy
|
c4a5a1f984e28840010f20a7e41caa21b8f41979
|
[
"FSFAP"
] | 61
|
2019-04-08T00:58:14.000Z
|
2022-03-20T23:04:28.000Z
|
"""LU decomposition functions."""
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
# Local imports
from .misc import _datacopied, LinAlgWarning
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, M) array_like
Matrix to decompose
overwrite_a : bool, optional
Whether to overwrite data in A (may increase performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
lu : (N, N) ndarray
Matrix containing U in its upper triangle, and L in its lower triangle.
The unit diagonal elements of L are not stored.
piv : (N,) ndarray
Pivot indices representing the permutation matrix P:
row i of matrix was interchanged with row piv[i].
See also
--------
lu_solve : solve an equation system using the LU factorization of a matrix
Notes
-----
This is a wrapper to the ``*GETRF`` routines from LAPACK.
Examples
--------
>>> from scipy.linalg import lu_factor
>>> from numpy import tril, triu, allclose, zeros, eye
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> lu, piv = lu_factor(A)
>>> piv
array([2, 2, 3, 3], dtype=int32)
Convert LAPACK's ``piv`` array to NumPy index and test the permutation
>>> piv_py = [2, 0, 3, 1]
>>> L, U = np.tril(lu, k=-1) + np.eye(4), np.triu(lu)
>>> np.allclose(A[piv_py] - L @ U, np.zeros((4, 4)))
True
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
LinAlgWarning, stacklevel=2)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
"""Solve an equation system, a x = b, given the LU factorization of a
Parameters
----------
(lu, piv)
Factorization of the coefficient matrix a, as given by lu_factor
b : array
Right-hand side
trans : {0, 1, 2}, optional
Type of system to solve:
===== =========
trans system
===== =========
0 a x = b
1 a^T x = b
2 a^H x = b
===== =========
overwrite_b : bool, optional
Whether to overwrite data in b (may increase performance)
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
x : array
Solution to the system
See also
--------
lu_factor : LU factorize a matrix
Examples
--------
>>> from scipy.linalg import lu_factor, lu_solve
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> b = np.array([1, 1, 1, 1])
>>> lu, piv = lu_factor(A)
>>> x = lu_solve((lu, piv), b)
>>> np.allclose(A @ x - b, np.zeros((4,)))
True
"""
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
"""
Compute pivoted LU decomposition of a matrix.
The decomposition is::
A = P L U
where P is a permutation matrix, L lower triangular with unit
diagonal elements, and U upper triangular.
Parameters
----------
a : (M, N) array_like
Array to decompose
permute_l : bool, optional
Perform the multiplication P*L (Default: do not permute)
overwrite_a : bool, optional
Whether to overwrite data in a (may improve performance)
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
**(If permute_l == False)**
p : (M, M) ndarray
Permutation matrix
l : (M, K) ndarray
Lower triangular or trapezoidal matrix with unit diagonal.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
**(If permute_l == True)**
pl : (M, K) ndarray
Permuted L matrix.
K = min(M, N)
u : (K, N) ndarray
Upper triangular or trapezoidal matrix
Notes
-----
This is a LU factorization routine written for SciPy.
Examples
--------
>>> from scipy.linalg import lu
>>> A = np.array([[2, 5, 8, 7], [5, 2, 2, 8], [7, 5, 6, 6], [5, 4, 4, 8]])
>>> p, l, u = lu(A)
>>> np.allclose(A - p @ l @ u, np.zeros((4, 4)))
True
"""
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u
| 30.293333
| 79
| 0.591256
|
from __future__ import division, print_function, absolute_import
from warnings import warn
from numpy import asarray, asarray_chkfinite
from .misc import _datacopied, LinAlgWarning
from .lapack import get_lapack_funcs
from .flinalg import get_flinalg_funcs
__all__ = ['lu', 'lu_solve', 'lu_factor']
def lu_factor(a, overwrite_a=False, check_finite=True):
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2 or (a1.shape[0] != a1.shape[1]):
raise ValueError('expected square matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
getrf, = get_lapack_funcs(('getrf',), (a1,))
lu, piv, info = getrf(a1, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal getrf (lu_factor)' % -info)
if info > 0:
warn("Diagonal number %d is exactly zero. Singular matrix." % info,
LinAlgWarning, stacklevel=2)
return lu, piv
def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True):
(lu, piv) = lu_and_piv
if check_finite:
b1 = asarray_chkfinite(b)
else:
b1 = asarray(b)
overwrite_b = overwrite_b or _datacopied(b1, b)
if lu.shape[0] != b1.shape[0]:
raise ValueError("incompatible dimensions.")
getrs, = get_lapack_funcs(('getrs',), (lu, b1))
x, info = getrs(lu, piv, b1, trans=trans, overwrite_b=overwrite_b)
if info == 0:
return x
raise ValueError('illegal value in %d-th argument of internal gesv|posv'
% -info)
def lu(a, permute_l=False, overwrite_a=False, check_finite=True):
if check_finite:
a1 = asarray_chkfinite(a)
else:
a1 = asarray(a)
if len(a1.shape) != 2:
raise ValueError('expected matrix')
overwrite_a = overwrite_a or (_datacopied(a1, a))
flu, = get_flinalg_funcs(('lu',), (a1,))
p, l, u, info = flu(a1, permute_l=permute_l, overwrite_a=overwrite_a)
if info < 0:
raise ValueError('illegal value in %d-th argument of '
'internal lu.getrf' % -info)
if permute_l:
return l, u
return p, l, u
| true
| true
|
1c424d733b42474c1db1ce3755a624674fd7d334
| 221
|
py
|
Python
|
src/diamond/error.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 6
|
2015-04-13T21:24:24.000Z
|
2020-05-11T07:43:05.000Z
|
src/diamond/error.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 3
|
2015-09-11T16:29:54.000Z
|
2016-05-18T15:31:54.000Z
|
src/diamond/error.py
|
matt-ullmer/Diamond
|
6ea198f3ebe58473467c6dc38b20e683c278192c
|
[
"MIT"
] | 13
|
2015-01-11T12:10:45.000Z
|
2021-01-27T10:55:02.000Z
|
# coding=utf-8
class DiamondException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return self.message
| 17
| 34
| 0.647059
|
class DiamondException(Exception):
def __init__(self, message):
self.message = message
def __str__(self):
return self.message
def __repr__(self):
return self.message
| true
| true
|
1c424e75e7b1b63aeb5f62a9876acbe97e1921a7
| 394
|
py
|
Python
|
meet/management/commands/refresh_thumbnails.py
|
acaciawater/meetsite
|
5008fe8aa333cdcf74df19cbe97fa490af4fcafd
|
[
"Apache-2.0"
] | null | null | null |
meet/management/commands/refresh_thumbnails.py
|
acaciawater/meetsite
|
5008fe8aa333cdcf74df19cbe97fa490af4fcafd
|
[
"Apache-2.0"
] | null | null | null |
meet/management/commands/refresh_thumbnails.py
|
acaciawater/meetsite
|
5008fe8aa333cdcf74df19cbe97fa490af4fcafd
|
[
"Apache-2.0"
] | null | null | null |
'''
Created on Jan 27, 2017
@author: stephane
'''
from django.core.management.base import BaseCommand
from meet.models import Meetpunt
class Command(BaseCommand):
help = "Assign a meetpunt for each meting that does not have one"
def handle(self, *args, **options):
mps = Meetpunt.objects.all()
for mp in mps:
mp.make_thumbnail()
mp.save()
| 23.176471
| 69
| 0.647208
|
from django.core.management.base import BaseCommand
from meet.models import Meetpunt
class Command(BaseCommand):
help = "Assign a meetpunt for each meting that does not have one"
def handle(self, *args, **options):
mps = Meetpunt.objects.all()
for mp in mps:
mp.make_thumbnail()
mp.save()
| true
| true
|
1c424ea1b06d31a7f970e23a97cce3f2a8152949
| 59,549
|
py
|
Python
|
tech_project/lib/python2.7/site-packages/pylint/lint.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 1
|
2019-10-16T07:56:31.000Z
|
2019-10-16T07:56:31.000Z
|
tech_project/lib/python2.7/site-packages/pylint/lint.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 9
|
2019-12-04T23:15:54.000Z
|
2022-02-10T11:05:43.000Z
|
tech_project/lib/python2.7/site-packages/pylint/lint.py
|
priyamshah112/Project-Descripton-Blog
|
8e01016c6be79776c4f5ca75563fa3daa839e39e
|
[
"MIT"
] | 1
|
2021-07-28T20:35:14.000Z
|
2021-07-28T20:35:14.000Z
|
# -*- coding: utf-8 -*-
# Copyright (c) 2006-2015 LOGILAB S.A. (Paris, FRANCE) <contact@logilab.fr>
# Copyright (c) 2008 Fabrice Douchant <Fabrice.Douchant@logilab.fr>
# Copyright (c) 2009 Vincent
# Copyright (c) 2009 Mads Kiilerich <mads@kiilerich.com>
# Copyright (c) 2011-2014 Google, Inc.
# Copyright (c) 2012 David Pursehouse <david.pursehouse@sonymobile.com>
# Copyright (c) 2012 Kevin Jing Qiu <kevin.jing.qiu@gmail.com>
# Copyright (c) 2012 FELD Boris <lothiraldan@gmail.com>
# Copyright (c) 2012 JT Olds <jtolds@xnet5.com>
# Copyright (c) 2014-2017 Claudiu Popa <pcmanticore@gmail.com>
# Copyright (c) 2014-2015 Michal Nowikowski <godfryd@gmail.com>
# Copyright (c) 2014 Brett Cannon <brett@python.org>
# Copyright (c) 2014 Alexandru Coman <fcoman@bitdefender.com>
# Copyright (c) 2014 Daniel Harding <dharding@living180.net>
# Copyright (c) 2014 Arun Persaud <arun@nubati.net>
# Copyright (c) 2014 Dan Goldsmith <djgoldsmith@googlemail.com>
# Copyright (c) 2015-2016 Florian Bruhin <me@the-compiler.org>
# Copyright (c) 2015 Aru Sahni <arusahni@gmail.com>
# Copyright (c) 2015 Steven Myint <hg@stevenmyint.com>
# Copyright (c) 2015 Simu Toni <simutoni@gmail.com>
# Copyright (c) 2015 Mihai Balint <balint.mihai@gmail.com>
# Copyright (c) 2015 Ionel Cristian Maries <contact@ionelmc.ro>
# Copyright (c) 2016-2017 Łukasz Rogalski <rogalski.91@gmail.com>
# Copyright (c) 2016 Glenn Matthews <glenn@e-dad.net>
# Copyright (c) 2016 Alan Evangelista <alanoe@linux.vnet.ibm.com>
# Copyright (c) 2017 Daniel Miller <millerdev@gmail.com>
# Copyright (c) 2017 hippo91 <guillaume.peillex@gmail.com>
# Copyright (c) 2017 Roman Ivanov <me@roivanov.com>
# Copyright (c) 2017 Ned Batchelder <ned@nedbatchelder.com>
# Copyright (c) 2017 Ville Skyttä <ville.skytta@iki.fi>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/master/COPYING
""" %prog [options] modules_or_packages
Check that module(s) satisfy a coding standard (and more !).
%prog --help
Display this help message and exit.
%prog --help-msg <msg-id>[,<msg-id>]
Display help messages about given message identifiers and exit.
"""
from __future__ import print_function
import collections
import contextlib
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import tokenize
import warnings
import six
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import exceptions
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
from pylint.reporters.ureports import nodes as report_nodes
MANAGER = astroid.MANAGER
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (
message.msg_id,
message.symbol,
location,
message.msg,
message.confidence,
)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
return None
def _merge_stats(stats):
merged = {}
by_msg = collections.Counter()
for stat in stats:
message_stats = stat.pop('by_msg', {})
by_msg.update(message_stats)
for key, item in six.iteritems(stat):
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
merged['by_msg'] = by_msg
return merged
@contextlib.contextmanager
def _patch_sysmodules():
# Context manager that permits running pylint, on Windows, with -m switch
# and with --jobs, as in 'python -2 -m pylint .. --jobs'.
# For more details why this is needed,
# see Python issue http://bugs.python.org/issue10845.
mock_main = __name__ != '__main__' # -m switch
if mock_main:
sys.modules['__main__'] = sys.modules[__name__]
try:
yield
finally:
if mock_main:
sys.modules.pop('__main__')
# Python Linter class #########################################################
MSGS = {
'F0001': ('%s',
'fatal',
'Used when an error occurred preventing the analysis of a \
module (unable to find it for instance).'),
'F0002': ('%s: %s',
'astroid-error',
'Used when an unexpected error occurred while building the '
'Astroid representation. This is usually accompanied by a '
'traceback. Please report such errors !'),
'F0010': ('error while code parsing: %s',
'parse-error',
'Used when an exception occurred while building the Astroid '
'representation which could be handled by astroid.'),
'I0001': ('Unable to run raw checkers on built-in module %s',
'raw-checker-failed',
'Used to inform that a built-in module has not been checked '
'using the raw checkers.'),
'I0010': ('Unable to consider inline option %r',
'bad-inline-option',
'Used when an inline option is either badly formatted or can\'t '
'be used inside modules.'),
'I0011': ('Locally disabling %s (%s)',
'locally-disabled',
'Used when an inline option disables a message or a messages '
'category.'),
'I0012': ('Locally enabling %s (%s)',
'locally-enabled',
'Used when an inline option enables a message or a messages '
'category.'),
'I0013': ('Ignoring entire file',
'file-ignored',
'Used to inform that the file will not be checked'),
'I0020': ('Suppressed %s (from line %d)',
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Pragma "%s" is deprecated, use "%s" instead',
'deprecated-pragma',
'Some inline pylint options have been renamed or reworked, '
'only the most recent form should be used. '
'NOTE:skip-all is only available with pylint >= 0.26',
{'old_names': [('I0014', 'deprecated-disable-all')]}),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process):
def run(self):
# pylint: disable=no-member, unbalanced-tuple-unpacking
tasks_queue, results_queue, self._config = self._args
self._config["jobs"] = 1 # Child does not parallelize any further.
self._python3_porting_mode = self._config.pop(
'python3_porting_mode', None)
self._plugins = self._config.pop('plugins', None)
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, 'STOP'):
try:
result = self._run_linter(file_or_module[0])
results_queue.put(result)
except Exception as ex:
print("internal error with sending report for module %s" %
file_or_module, file=sys.stderr)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
if self._plugins:
linter.load_plugin_modules(self._plugins)
linter.load_configuration_from_config(self._config)
linter.set_reporter(reporters.CollectingReporter())
# Enable the Python 3 checker mode. This option is
# passed down from the parent linter up to here, since
# the Python 3 porting flag belongs to the Run class,
# instead of the Linter class.
if self._python3_porting_mode:
linter.python3_porting_mode()
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (file_or_module, linter.file_state.base_name, linter.current_name,
msgs, linter.stats, linter.msg_status)
class PyLinter(config.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker):
"""lint Python modules using external checkers.
This is the main checker controlling the other ones and the reports
generation. It is itself both a raw checker and an astroid checker in order
to:
* handle message activation / deactivation at the module level
* handle some basic but necessary stats'data (number of classes, methods...)
IDE plugin developers: you may have to call
`astroid.builder.MANAGER.astroid_cache.clear()` across runs if you want
to ensure the latest code version is actually checked.
"""
__implements__ = (interfaces.ITokenChecker, )
name = 'master'
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('ignore-patterns',
{'type' : 'regexp_csv', 'metavar' : '<pattern>[,<pattern>...]',
'dest' : 'black_list_re', 'default' : (),
'help' : 'Add files or directories matching the regex patterns to the'
' blacklist. The regex matches against base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, json and msvs (visual studio).'
'You can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('reports',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less '
'than 10 (10 is the highest note). You have access '
'to the variables errors warning, statement which '
'respectively contain the number of errors / '
'warnings messages and the total number of '
'statements analyzed. This is used by the global '
'evaluation report (RP0004).'}),
('score',
{'default': True, 'type': 'yn', 'metavar': '<y_or_n>',
'short': 's',
'group': 'Reports',
'help': 'Activate the evaluation score.'}),
('confidence',
{'type' : 'multiple_choice', 'metavar': '<levels>',
'default': '',
'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
'group': 'Messages control',
'help' : 'Only show warnings with the listed confidence levels.'
' Leave empty to show all. Valid levels: %s' % (
', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time '
'(only on the command line, not in the configuration file '
'where it should appear only once). '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}),
('suggestion-mode',
{'type': 'yn', 'metavar': '<yn>', 'default': True,
'help': ('When enabled, pylint would attempt to guess common '
'misconfiguration and emit user-friendly hints instead '
'of false-positive error messages')}),
('exit-zero',
{'action': 'store_true',
'help': ('Always return a 0 (non-error) status code, even if '
'lint errors are found. This is primarily useful in '
'continuous integration scripts.')}),
)
option_groups = (
('Messages control', 'Options controlling analysis messages'),
('Reports', 'Options related to output formatting and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s\nPython %s' % (
version, astroid_version, sys.version)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
"""take a list of module names which are pylint plugins and load
and register them
"""
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError):
raise exceptions.InvalidReporterError(name)
else:
self.set_reporter(reporter_class())
def _load_reporter_class(self):
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
return reporter_class
def set_reporter(self, reporter):
"""set the reporter used to display messages and reports"""
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
"""overridden from config.OptionsProviderMixin to handle some
special options
"""
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (optname,
optname.split('-')[0]),
DeprecationWarning)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except config.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
# Remove the current reporter and add it
# at the end of the list.
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
# checkers manipulation methods ############################################
def register_checker(self, checker):
"""register a new checker
checker is an object implementing IRawChecker or / and IAstroidChecker
"""
assert checker.priority <= 0, 'checker priority can\'t be >= 0'
self._checkers[checker.name].append(checker)
for r_id, r_title, r_cb in checker.reports:
self.register_report(r_id, r_title, r_cb, checker)
self.register_options_provider(checker)
if hasattr(checker, 'msgs'):
self.msgs_store.register_messages(checker)
checker.load_defaults()
# Register the checker, but disable all of its messages.
# TODO(cpopa): we should have a better API for this.
if not getattr(checker, 'enabled', True):
self.disable(checker.name)
def disable_noerror_messages(self):
for msgcat, msgids in six.iteritems(self.msgs_store._msgs_by_category):
# enable only messages with 'error' severity and above ('fatal')
if msgcat in ['E', 'F']:
for msgid in msgids:
self.enable(msgid)
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
"""disable all reporters"""
for _reporters in six.itervalues(self._reports):
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
"""error mode: enable only errors; no reports, no persistent"""
self._error_mode = True
self.disable_noerror_messages()
self.disable('miscellaneous')
if self._python3_porting_mode:
self.disable('all')
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option('MESSAGES CONTROL', 'disable'):
value = config_parser.get('MESSAGES CONTROL', 'disable')
self.global_set_option('disable', value)
else:
self.disable('python3')
self.set_option('reports', False)
self.set_option('persistent', False)
self.set_option('score', False)
def python3_porting_mode(self):
"""Disable all other checkers and enable Python 3 warnings."""
self.disable('all')
self.enable('python3')
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
# Python 3 porting checker.
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option('MESSAGES CONTROL', 'disable'):
value = config_parser.get('MESSAGES CONTROL', 'disable')
self.global_set_option('disable', value)
self._python3_porting_mode = True
# block level option handling #############################################
#
# see func_block_disable_msg.py test case for expected behaviour
def process_tokens(self, tokens):
"""process tokens from the current module to search for module/block
level options
"""
control_pragmas = {'disable', 'enable'}
for (tok_type, content, start, _, _) in tokens:
if tok_type != tokenize.COMMENT:
continue
match = utils.OPTION_RGX.search(content)
if match is None:
continue
if match.group(1).strip() == "disable-all" or \
match.group(1).strip() == 'skip-file':
if match.group(1).strip() == "disable-all":
self.add_message('deprecated-pragma', line=start[0],
args=('disable-all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
try:
opt, value = match.group(1).split('=', 1)
except ValueError:
self.add_message('bad-inline-option', args=match.group(1).strip(),
line=start[0])
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
# found a "(dis|en)able-msg" pragma deprecated suppression
self.add_message('deprecated-pragma', line=start[0],
args=(opt, opt.replace('-msg', '')))
for msgid in utils._splitstrip(value):
# Add the line where a control pragma was encountered.
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0],
args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except exceptions.UnknownMessageError:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
# code checking methods ###################################################
def get_checkers(self):
"""return all available checkers as a list"""
return [self] + [c for _checkers in six.itervalues(self._checkers)
for c in _checkers if c is not self]
def prepare_checkers(self):
"""return checkers needed for activated messages and reports"""
if not self.config.reports:
self.disable_reporters()
# get needed checkers
neededcheckers = [self]
for checker in self.get_checkers()[1:]:
messages = set(msg for msg in checker.msgs
if self.is_message_enabled(msg))
if (messages or
any(self.report_is_enabled(r[0]) for r in checker.reports)):
neededcheckers.append(checker)
# Sort checkers by priority
neededcheckers = sorted(neededcheckers,
key=operator.attrgetter('priority'),
reverse=True)
return neededcheckers
# pylint: disable=unused-argument
@staticmethod
def should_analyze_file(modname, path, is_argument=False):
"""Returns whether or not a module should be checked.
This implementation returns True for all python source file, indicating
that all files should be linted.
Subclasses may override this method to indicate that modules satisfying
certain conditions should not be linted.
:param str modname: The name of the module to be checked.
:param str path: The full path to the source code of the module.
:param bool is_argument: Whetter the file is an argument to pylint or not.
Files which respect this property are always
checked, since the user requested it explicitly.
:returns: True if the module should be checked.
:rtype: bool
"""
if is_argument:
return True
return path.endswith('.py')
# pylint: enable=unused-argument
def check(self, files_or_modules):
"""main checking entry: check a list of files or modules from their
name.
"""
# initialize msgs_state now that all messages have been registered into
# the store
for msg in self.msgs_store.messages:
if not msg.may_be_emitted():
self._msgs_state[msg.msgid] = False
if not isinstance(files_or_modules, (list, tuple)):
files_or_modules = (files_or_modules,)
if self.config.jobs == 1:
self._do_check(files_or_modules)
else:
with _patch_sysmodules():
self._parallel_check(files_or_modules)
def _get_jobs_config(self):
child_config = collections.OrderedDict()
filter_options = {'long-help'}
filter_options.update((opt_name for opt_name, _ in self._external_opts))
for opt_providers in six.itervalues(self._all_options):
for optname, optdict, val in opt_providers.options_and_values():
if optdict.get('deprecated'):
continue
if optname not in filter_options:
child_config[optname] = utils._format_option_value(
optdict, val)
child_config['python3_porting_mode'] = self._python3_porting_mode
child_config['plugins'] = self._dynamic_plugins
return child_config
def _parallel_task(self, files_or_modules):
# Prepare configuration for child linters.
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
# Send files to child linters.
expanded_files = self.expand_files(files_or_modules)
# do not start more jobs than needed
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue,
child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module['path']
tasks_queue.put([path])
# collect results from child linters
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print("internal error while receiving results from child linter",
file=sys.stderr)
print(ex, file=sys.stderr)
failed = True
break
yield result
# Stop child linters and wait for their completion.
for _ in range(self.config.jobs):
tasks_queue.put('STOP')
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
# Reset stats.
self.open()
all_stats = []
module = None
for result in self._parallel_task(files_or_modules):
if not result:
continue
(
_,
self.file_state.base_name,
module,
messages,
stats,
msg_status
) = result
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(all_stats)
self.current_name = module
# Insert stats data to local checkers.
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker)
and c is not self]
rawcheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.IRawChecker)]
# notify global begin
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
# build ast and check modules or packages
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr['name'], descr['path'], descr['isarg']
if not self.should_analyze_file(modname, filepath, is_argument=is_arg):
continue
self.set_current_module(modname, filepath)
# get the module representation
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
# XXX to be correct we need to keep module_msgs_state for every
# analyzed module (the problem stands with localized messages which
# are only detected in the .close step)
self.file_state = utils.FileState(descr['basename'])
self._ignore_file = False
# fix the current file (if the source file was not available or
# if it's actually a c extension)
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(self.msgs_store)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
# notify global end
self.stats['statement'] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def expand_files(self, modules):
"""get modules and errors from a list of modules and handle errors
"""
result, errors = utils.expand_modules(modules, self.config.black_list,
self.config.black_list_re)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
"""set the name of the currently analyzed module and
init statistics for it
"""
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats['by_module'][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
"""return a ast(roid) representation for a module"""
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except astroid.AstroidSyntaxError as ex:
self.add_message('syntax-error',
line=getattr(ex.error, 'lineno', 0),
args=str(ex.error))
except astroid.AstroidBuildingException as ex:
self.add_message('parse-error', args=ex)
except Exception as ex: # pylint: disable=broad-except
import traceback
traceback.print_exc()
self.add_message('astroid-error', args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker,
rawcheckers, tokencheckers):
"""Check a module from its astroid representation."""
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
"""initialize counters"""
self.stats = {'by_module' : {},
'by_msg' : {},
}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist)
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats[msg_cat] = 0
def generate_reports(self):
"""close the whole package /module, it's time to make reports !
if persistent run, pickle results for later comparison
"""
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
self._report_evaluation()
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
def _report_evaluation(self):
"""make the global evaluation report"""
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
previous_stats = config.load_results(self.file_state.base_name)
if self.stats['statement'] == 0:
return
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = 'An exception occurred while rating: %s' % ex
else:
self.stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
if self.config.score:
sect = report_nodes.EvaluationSection(msg)
self.reporter.display_reports(sect)
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
"""make total errors / warnings report"""
lines = ['type', 'number', 'previous', 'difference']
lines += checkers.table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(report_nodes.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
"""make messages type report"""
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise exceptions.EmptyReportError()
in_order = sorted([(value, msg_id)
for msg_id, value in six.iteritems(stats['by_msg'])
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
"""make errors / warnings by modules report"""
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise exceptions.EmptyReportError()
by_mod = collections.defaultdict(dict)
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in six.iterkeys(stats['by_module']):
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in six.iteritems(by_mod):
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
# Don't report clean modules.
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise exceptions.EmptyReportError()
sect.append(report_nodes.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
"""Raised if an error occurs during argument preprocessing."""
def preprocess_options(args, search_for):
"""look for some options (keys of <search_for>) which have to be processed
before others
values of <search_for> are callback functions to call when the option is
found
"""
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
msg = 'Option %s expects a value' % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
"""Prepare sys.path for running the linter checks.
Within this context, each of the given arguments is importable.
Paths are added to sys.path in corresponding order to the arguments.
We avoid adding duplicate directories to sys.path.
`sys.path` is reset to its original value upon exiting this context.
"""
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + ["."] + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run(object):
"""helper class to use as main for pylint :
run(*sys.argv[1:])
"""
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
# option: (callback, takearg)
'init-hook': (cb_init_hook, True),
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type' : 'string', 'metavar': '<code>',
'level': 1,
'help' : 'Python code to execute, usually for sys.path '
'manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : 'Display a help message for the given message id and '
'exit. The value may be a comma separated list of message ids.'}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('list-conf-levels',
{'action' : 'callback',
'callback' : cb_list_confidence_levels,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : 'Generate a sample configuration file according to '
'the current configuration. You can put other options '
'before this one to get them in the generated '
'configuration.'}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : 'In error mode, checkers without error messages are '
'disabled and for others, only the ERROR messages are '
'displayed, and no reports are done by default'''}),
('py3k',
{'action' : 'callback', 'callback' : self.cb_python3_porting_mode,
'help' : 'In Python 3 porting mode, all checkers will be '
'disabled and only messages emitted by the porting '
'checker will be displayed'}),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('I')
linter.enable('c-extension-no-member')
linter.read_config_file()
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option('MASTER', 'init-hook'):
cb_init_hook('init-hook',
utils._unquote(config_parser.get('MASTER',
'init-hook')))
# is there some additional plugins in the file configuration, in
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = utils._splitstrip(
config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print("Jobs number (%d) should be greater than 0"
% linter.config.jobs, file=sys.stderr)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print("Multiprocessing library is missing, "
"fallback to single process", file=sys.stderr)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = multiprocessing.cpu_count()
# insert current working directory to the python path to have a correct
# behaviour
with fix_import_path(args):
linter.check(args)
linter.generate_reports()
if exit:
if linter.config.exit_zero:
sys.exit(0)
else:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._rcfile = value
def cb_add_plugins(self, name, value):
"""callback for option preprocessing (i.e. before option parsing)"""
self._plugins.extend(utils._splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
"""error mode:
* disable all but error messages
* disable the 'miscellaneous' checker which can be safely deactivated in
debug
* disable reports
* do not save execution information
"""
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
"""optik callback for sample config file generation"""
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
"""optik callback for sample config file generation"""
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
"""optik callback for printing some help about a particular message"""
self.linter.msgs_store.help_message(utils._splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
"""optik callback for printing full documentation"""
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
"""optik callback for printing available messages"""
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
"""Activate only the python3 porting checker."""
self.linter.python3_porting_mode()
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print('%-18s: %s' % level)
sys.exit(0)
def cb_init_hook(optname, value):
"""exec arbitrary code to set sys.path for instance"""
exec(value) # pylint: disable=exec-used
if __name__ == '__main__':
Run(sys.argv[1:])
| 41.876934
| 99
| 0.556987
|
from __future__ import print_function
import collections
import contextlib
import operator
import os
try:
import multiprocessing
except ImportError:
multiprocessing = None
import sys
import tokenize
import warnings
import six
import astroid
from astroid.__pkginfo__ import version as astroid_version
from astroid import modutils
from pylint import checkers
from pylint import interfaces
from pylint import reporters
from pylint import exceptions
from pylint import utils
from pylint import config
from pylint.__pkginfo__ import version
from pylint.reporters.ureports import nodes as report_nodes
MANAGER = astroid.MANAGER
def _get_new_args(message):
location = (
message.abspath,
message.path,
message.module,
message.obj,
message.line,
message.column,
)
return (
message.msg_id,
message.symbol,
location,
message.msg,
message.confidence,
)
def _get_python_path(filepath):
dirname = os.path.realpath(os.path.expanduser(filepath))
if not os.path.isdir(dirname):
dirname = os.path.dirname(dirname)
while True:
if not os.path.exists(os.path.join(dirname, "__init__.py")):
return dirname
old_dirname = dirname
dirname = os.path.dirname(dirname)
if old_dirname == dirname:
return os.getcwd()
return None
def _merge_stats(stats):
merged = {}
by_msg = collections.Counter()
for stat in stats:
message_stats = stat.pop('by_msg', {})
by_msg.update(message_stats)
for key, item in six.iteritems(stat):
if key not in merged:
merged[key] = item
else:
if isinstance(item, dict):
merged[key].update(item)
else:
merged[key] = merged[key] + item
merged['by_msg'] = by_msg
return merged
@contextlib.contextmanager
def _patch_sysmodules():
mock_main = __name__ != '__main__'
if mock_main:
sys.modules['__main__'] = sys.modules[__name__]
try:
yield
finally:
if mock_main:
sys.modules.pop('__main__')
'suppressed-message',
'A message was triggered on a line, but suppressed explicitly '
'by a disable= comment in the file. This message is not '
'generated for messages that are ignored due to configuration '
'settings.'),
'I0021': ('Useless suppression of %s',
'useless-suppression',
'Reported when a message is explicitly disabled for a line or '
'a block of code, but never triggered.'),
'I0022': ('Pragma "%s" is deprecated, use "%s" instead',
'deprecated-pragma',
'Some inline pylint options have been renamed or reworked, '
'only the most recent form should be used. '
'NOTE:skip-all is only available with pylint >= 0.26',
{'old_names': [('I0014', 'deprecated-disable-all')]}),
'E0001': ('%s',
'syntax-error',
'Used when a syntax error is raised for a module.'),
'E0011': ('Unrecognized file option %r',
'unrecognized-inline-option',
'Used when an unknown inline option is encountered.'),
'E0012': ('Bad option value %r',
'bad-option-value',
'Used when a bad value for an inline option is encountered.'),
}
if multiprocessing is not None:
class ChildLinter(multiprocessing.Process):
def run(self):
# pylint: disable=no-member, unbalanced-tuple-unpacking
tasks_queue, results_queue, self._config = self._args
self._config["jobs"] = 1 # Child does not parallelize any further.
self._python3_porting_mode = self._config.pop(
'python3_porting_mode', None)
self._plugins = self._config.pop('plugins', None)
# Run linter for received files/modules.
for file_or_module in iter(tasks_queue.get, 'STOP'):
try:
result = self._run_linter(file_or_module[0])
results_queue.put(result)
except Exception as ex:
print("internal error with sending report for module %s" %
file_or_module, file=sys.stderr)
print(ex, file=sys.stderr)
results_queue.put({})
def _run_linter(self, file_or_module):
linter = PyLinter()
# Register standard checkers.
linter.load_default_plugins()
# Load command line plugins.
if self._plugins:
linter.load_plugin_modules(self._plugins)
linter.load_configuration_from_config(self._config)
linter.set_reporter(reporters.CollectingReporter())
# Enable the Python 3 checker mode. This option is
# passed down from the parent linter up to here, since
# the Python 3 porting flag belongs to the Run class,
# instead of the Linter class.
if self._python3_porting_mode:
linter.python3_porting_mode()
# Run the checks.
linter.check(file_or_module)
msgs = [_get_new_args(m) for m in linter.reporter.messages]
return (file_or_module, linter.file_state.base_name, linter.current_name,
msgs, linter.stats, linter.msg_status)
class PyLinter(config.OptionsManagerMixIn,
utils.MessagesHandlerMixIn,
utils.ReportsHandlerMixIn,
checkers.BaseTokenChecker):
__implements__ = (interfaces.ITokenChecker, )
name = 'master'
priority = 0
level = 0
msgs = MSGS
@staticmethod
def make_options():
return (('ignore',
{'type' : 'csv', 'metavar' : '<file>[,<file>...]',
'dest' : 'black_list', 'default' : ('CVS',),
'help' : 'Add files or directories to the blacklist. '
'They should be base names, not paths.'}),
('ignore-patterns',
{'type' : 'regexp_csv', 'metavar' : '<pattern>[,<pattern>...]',
'dest' : 'black_list_re', 'default' : (),
'help' : 'Add files or directories matching the regex patterns to the'
' blacklist. The regex matches against base names, not paths.'}),
('persistent',
{'default': True, 'type' : 'yn', 'metavar' : '<y_or_n>',
'level': 1,
'help' : 'Pickle collected data for later comparisons.'}),
('load-plugins',
{'type' : 'csv', 'metavar' : '<modules>', 'default' : (),
'level': 1,
'help' : 'List of plugins (as comma separated values of '
'python modules names) to load, usually to register '
'additional checkers.'}),
('output-format',
{'default': 'text', 'type': 'string', 'metavar' : '<format>',
'short': 'f',
'group': 'Reports',
'help' : 'Set the output format. Available formats are text,'
' parseable, colorized, json and msvs (visual studio).'
'You can also give a reporter class, eg mypackage.mymodule.'
'MyReporterClass.'}),
('reports',
{'default': False, 'type' : 'yn', 'metavar' : '<y_or_n>',
'short': 'r',
'group': 'Reports',
'help' : 'Tells whether to display a full report or only the '
'messages'}),
('evaluation',
{'type' : 'string', 'metavar' : '<python_expression>',
'group': 'Reports', 'level': 1,
'default': '10.0 - ((float(5 * error + warning + refactor + '
'convention) / statement) * 10)',
'help' : 'Python expression which should return a note less '
'than 10 (10 is the highest note). You have access '
'to the variables errors warning, statement which '
'respectively contain the number of errors / '
'warnings messages and the total number of '
'statements analyzed. This is used by the global '
'evaluation report (RP0004).'}),
('score',
{'default': True, 'type': 'yn', 'metavar': '<y_or_n>',
'short': 's',
'group': 'Reports',
'help': 'Activate the evaluation score.'}),
('confidence',
{'type' : 'multiple_choice', 'metavar': '<levels>',
'default': '',
'choices': [c.name for c in interfaces.CONFIDENCE_LEVELS],
'group': 'Messages control',
'help' : 'Only show warnings with the listed confidence levels.'
' Leave empty to show all. Valid levels: %s' % (
', '.join(c.name for c in interfaces.CONFIDENCE_LEVELS),)}),
('enable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'e',
'group': 'Messages control',
'help' : 'Enable the message, report, category or checker with the '
'given id(s). You can either give multiple identifier '
'separated by comma (,) or put this option multiple time '
'(only on the command line, not in the configuration file '
'where it should appear only once). '
'See also the "--disable" option for examples. '}),
('disable',
{'type' : 'csv', 'metavar': '<msg ids>',
'short': 'd',
'group': 'Messages control',
'help' : 'Disable the message, report, category or checker '
'with the given id(s). You can either give multiple identifiers'
' separated by comma (,) or put this option multiple times '
'(only on the command line, not in the configuration file '
'where it should appear only once).'
'You can also use "--disable=all" to disable everything first '
'and then reenable specific checks. For example, if you want '
'to run only the similarities checker, you can use '
'"--disable=all --enable=similarities". '
'If you want to run only the classes checker, but have no '
'Warning level messages displayed, use'
'"--disable=all --enable=classes --disable=W"'}),
('msg-template',
{'type' : 'string', 'metavar': '<template>',
'group': 'Reports',
'help' : ('Template used to display messages. '
'This is a python new-style format string '
'used to format the message information. '
'See doc for all details')
}),
('jobs',
{'type' : 'int', 'metavar': '<n-processes>',
'short': 'j',
'default': 1,
'help' : '''Use multiple processes to speed up Pylint.''',
}),
('unsafe-load-any-extension',
{'type': 'yn', 'metavar': '<yn>', 'default': False, 'hide': True,
'help': ('Allow loading of arbitrary C extensions. Extensions'
' are imported into the active Python interpreter and'
' may run arbitrary code.')}),
('extension-pkg-whitelist',
{'type': 'csv', 'metavar': '<pkg[,pkg]>', 'default': [],
'help': ('A comma-separated list of package or module names'
' from where C extensions may be loaded. Extensions are'
' loading into the active Python interpreter and may run'
' arbitrary code')}),
('suggestion-mode',
{'type': 'yn', 'metavar': '<yn>', 'default': True,
'help': ('When enabled, pylint would attempt to guess common '
'misconfiguration and emit user-friendly hints instead '
'of false-positive error messages')}),
('exit-zero',
{'action': 'store_true',
'help': ('Always return a 0 (non-error) status code, even if '
'lint errors are found. This is primarily useful in '
'continuous integration scripts.')}),
)
option_groups = (
('Messages control', 'Options controlling analysis messages'),
('Reports', 'Options related to output formatting and reporting'),
)
def __init__(self, options=(), reporter=None, option_groups=(),
pylintrc=None):
# some stuff has to be done before ancestors initialization...
#
# messages store / checkers / reporter / astroid manager
self.msgs_store = utils.MessagesStore()
self.reporter = None
self._reporter_name = None
self._reporters = {}
self._checkers = collections.defaultdict(list)
self._pragma_lineno = {}
self._ignore_file = False
# visit variables
self.file_state = utils.FileState()
self.current_name = None
self.current_file = None
self.stats = None
# init options
self._external_opts = options
self.options = options + PyLinter.make_options()
self.option_groups = option_groups + PyLinter.option_groups
self._options_methods = {
'enable': self.enable,
'disable': self.disable}
self._bw_options_methods = {'disable-msg': self.disable,
'enable-msg': self.enable}
full_version = '%%prog %s, \nastroid %s\nPython %s' % (
version, astroid_version, sys.version)
utils.MessagesHandlerMixIn.__init__(self)
utils.ReportsHandlerMixIn.__init__(self)
super(PyLinter, self).__init__(
usage=__doc__,
version=full_version,
config_file=pylintrc or config.PYLINTRC)
checkers.BaseTokenChecker.__init__(self)
# provided reports
self.reports = (('RP0001', 'Messages by category',
report_total_messages_stats),
('RP0002', '% errors / warnings by module',
report_messages_by_module_stats),
('RP0003', 'Messages',
report_messages_stats),
)
self.register_checker(self)
self._dynamic_plugins = set()
self._python3_porting_mode = False
self._error_mode = False
self.load_provider_defaults()
if reporter:
self.set_reporter(reporter)
def load_default_plugins(self):
checkers.initialize(self)
reporters.initialize(self)
# Make sure to load the default reporter, because
# the option has been set before the plugins had been loaded.
if not self.reporter:
self._load_reporter()
def load_plugin_modules(self, modnames):
for modname in modnames:
if modname in self._dynamic_plugins:
continue
self._dynamic_plugins.add(modname)
module = modutils.load_module_from_name(modname)
module.register(self)
def _load_reporter(self):
name = self._reporter_name.lower()
if name in self._reporters:
self.set_reporter(self._reporters[name]())
else:
try:
reporter_class = self._load_reporter_class()
except (ImportError, AttributeError):
raise exceptions.InvalidReporterError(name)
else:
self.set_reporter(reporter_class())
def _load_reporter_class(self):
qname = self._reporter_name
module = modutils.load_module_from_name(
modutils.get_module_part(qname))
class_name = qname.split('.')[-1]
reporter_class = getattr(module, class_name)
return reporter_class
def set_reporter(self, reporter):
self.reporter = reporter
reporter.linter = self
def set_option(self, optname, value, action=None, optdict=None):
if optname in self._options_methods or \
optname in self._bw_options_methods:
if value:
try:
meth = self._options_methods[optname]
except KeyError:
meth = self._bw_options_methods[optname]
warnings.warn('%s is deprecated, replace it by %s' % (optname,
optname.split('-')[0]),
DeprecationWarning)
value = utils._check_csv(value)
if isinstance(value, (list, tuple)):
for _id in value:
meth(_id, ignore_unknown=True)
else:
meth(value)
return # no need to call set_option, disable/enable methods do it
elif optname == 'output-format':
self._reporter_name = value
# If the reporters are already available, load
# the reporter class.
if self._reporters:
self._load_reporter()
try:
checkers.BaseTokenChecker.set_option(self, optname,
value, action, optdict)
except config.UnsupportedAction:
print('option %s can\'t be read from config file' % \
optname, file=sys.stderr)
def register_reporter(self, reporter_class):
self._reporters[reporter_class.name] = reporter_class
def report_order(self):
reports = sorted(self._reports, key=lambda x: getattr(x, 'name', ''))
try:
reports.pop(reports.index(self))
except ValueError:
pass
else:
reports.append(self)
return reports
else:
for msgid in msgids:
self.disable(msgid)
def disable_reporters(self):
for _reporters in six.itervalues(self._reports):
for report_id, _, _ in _reporters:
self.disable_report(report_id)
def error_mode(self):
self._error_mode = True
self.disable_noerror_messages()
self.disable('miscellaneous')
if self._python3_porting_mode:
self.disable('all')
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option('MESSAGES CONTROL', 'disable'):
value = config_parser.get('MESSAGES CONTROL', 'disable')
self.global_set_option('disable', value)
else:
self.disable('python3')
self.set_option('reports', False)
self.set_option('persistent', False)
self.set_option('score', False)
def python3_porting_mode(self):
self.disable('all')
self.enable('python3')
if self._error_mode:
# The error mode was activated, using the -E flag.
# So we'll need to enable only the errors from the
for msg_id in self._checker_messages('python3'):
if msg_id.startswith('E'):
self.enable(msg_id)
else:
self.disable(msg_id)
config_parser = self.cfgfile_parser
if config_parser.has_option('MESSAGES CONTROL', 'disable'):
value = config_parser.get('MESSAGES CONTROL', 'disable')
self.global_set_option('disable', value)
self._python3_porting_mode = True
continue
opt = opt.strip()
if opt in self._options_methods or opt in self._bw_options_methods:
try:
meth = self._options_methods[opt]
except KeyError:
meth = self._bw_options_methods[opt]
self.add_message('deprecated-pragma', line=start[0],
args=(opt, opt.replace('-msg', '')))
for msgid in utils._splitstrip(value):
if opt in control_pragmas:
self._pragma_lineno[msgid] = start[0]
try:
if (opt, msgid) == ('disable', 'all'):
self.add_message('deprecated-pragma', line=start[0],
args=('disable=all', 'skip-file'))
self.add_message('file-ignored', line=start[0])
self._ignore_file = True
return
meth(msgid, 'module', start[0])
except exceptions.UnknownMessageError:
self.add_message('bad-option-value', args=msgid, line=start[0])
else:
self.add_message('unrecognized-inline-option', args=opt, line=start[0])
if self.config.jobs == 1:
self._do_check(files_or_modules)
else:
with _patch_sysmodules():
self._parallel_check(files_or_modules)
def _get_jobs_config(self):
child_config = collections.OrderedDict()
filter_options = {'long-help'}
filter_options.update((opt_name for opt_name, _ in self._external_opts))
for opt_providers in six.itervalues(self._all_options):
for optname, optdict, val in opt_providers.options_and_values():
if optdict.get('deprecated'):
continue
if optname not in filter_options:
child_config[optname] = utils._format_option_value(
optdict, val)
child_config['python3_porting_mode'] = self._python3_porting_mode
child_config['plugins'] = self._dynamic_plugins
return child_config
def _parallel_task(self, files_or_modules):
child_config = self._get_jobs_config()
children = []
manager = multiprocessing.Manager()
tasks_queue = manager.Queue()
results_queue = manager.Queue()
expanded_files = self.expand_files(files_or_modules)
for _ in range(min(self.config.jobs, len(expanded_files))):
child_linter = ChildLinter(args=(tasks_queue, results_queue,
child_config))
child_linter.start()
children.append(child_linter)
for files_or_module in expanded_files:
path = files_or_module['path']
tasks_queue.put([path])
failed = False
for _ in expanded_files:
try:
result = results_queue.get()
except Exception as ex:
print("internal error while receiving results from child linter",
file=sys.stderr)
print(ex, file=sys.stderr)
failed = True
break
yield result
for _ in range(self.config.jobs):
tasks_queue.put('STOP')
for child in children:
child.join()
if failed:
print("Error occurred, stopping the linter.", file=sys.stderr)
sys.exit(32)
def _parallel_check(self, files_or_modules):
self.open()
all_stats = []
module = None
for result in self._parallel_task(files_or_modules):
if not result:
continue
(
_,
self.file_state.base_name,
module,
messages,
stats,
msg_status
) = result
for msg in messages:
msg = utils.Message(*msg)
self.set_current_module(module)
self.reporter.handle_message(msg)
all_stats.append(stats)
self.msg_status |= msg_status
self.stats = _merge_stats(all_stats)
self.current_name = module
for checker in self.get_checkers():
if checker is not self:
checker.stats = self.stats
def _do_check(self, files_or_modules):
walker = utils.PyLintASTWalker(self)
_checkers = self.prepare_checkers()
tokencheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.ITokenChecker)
and c is not self]
rawcheckers = [c for c in _checkers
if interfaces.implements(c, interfaces.IRawChecker)]
for checker in _checkers:
checker.open()
if interfaces.implements(checker, interfaces.IAstroidChecker):
walker.add_checker(checker)
for descr in self.expand_files(files_or_modules):
modname, filepath, is_arg = descr['name'], descr['path'], descr['isarg']
if not self.should_analyze_file(modname, filepath, is_argument=is_arg):
continue
self.set_current_module(modname, filepath)
ast_node = self.get_ast(filepath, modname)
if ast_node is None:
continue
self.file_state = utils.FileState(descr['basename'])
self._ignore_file = False
self.current_file = ast_node.file # pylint: disable=maybe-no-member
self.check_astroid_module(ast_node, walker, rawcheckers, tokencheckers)
# warn about spurious inline messages handling
spurious_messages = self.file_state.iter_spurious_suppression_messages(self.msgs_store)
for msgid, line, args in spurious_messages:
self.add_message(msgid, line, None, args)
# notify global end
self.stats['statement'] = walker.nbstatements
for checker in reversed(_checkers):
checker.close()
def expand_files(self, modules):
result, errors = utils.expand_modules(modules, self.config.black_list,
self.config.black_list_re)
for error in errors:
message = modname = error["mod"]
key = error["key"]
self.set_current_module(modname)
if key == "fatal":
message = str(error["ex"]).replace(os.getcwd() + os.sep, '')
self.add_message(key, args=message)
return result
def set_current_module(self, modname, filepath=None):
if not modname and filepath is None:
return
self.reporter.on_set_current_module(modname, filepath)
self.current_name = modname
self.current_file = filepath or modname
self.stats['by_module'][modname] = {}
self.stats['by_module'][modname]['statement'] = 0
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats['by_module'][modname][msg_cat] = 0
def get_ast(self, filepath, modname):
try:
return MANAGER.ast_from_file(filepath, modname, source=True)
except astroid.AstroidSyntaxError as ex:
self.add_message('syntax-error',
line=getattr(ex.error, 'lineno', 0),
args=str(ex.error))
except astroid.AstroidBuildingException as ex:
self.add_message('parse-error', args=ex)
except Exception as ex: # pylint: disable=broad-except
import traceback
traceback.print_exc()
self.add_message('astroid-error', args=(ex.__class__, ex))
def check_astroid_module(self, ast_node, walker,
rawcheckers, tokencheckers):
try:
tokens = utils.tokenize_module(ast_node)
except tokenize.TokenError as ex:
self.add_message('syntax-error', line=ex.args[1][0], args=ex.args[0])
return None
if not ast_node.pure_python:
self.add_message('raw-checker-failed', args=ast_node.name)
else:
#assert astroid.file.endswith('.py')
# invoke ITokenChecker interface on self to fetch module/block
# level options
self.process_tokens(tokens)
if self._ignore_file:
return False
# walk ast to collect line numbers
self.file_state.collect_block_lines(self.msgs_store, ast_node)
# run raw and tokens checkers
for checker in rawcheckers:
checker.process_module(ast_node)
for checker in tokencheckers:
checker.process_tokens(tokens)
# generate events to astroid checkers
walker.walk(ast_node)
return True
# IAstroidChecker interface #################################################
def open(self):
self.stats = {'by_module' : {},
'by_msg' : {},
}
MANAGER.always_load_extensions = self.config.unsafe_load_any_extension
MANAGER.extension_package_whitelist.update(
self.config.extension_pkg_whitelist)
for msg_cat in six.itervalues(utils.MSG_TYPES):
self.stats[msg_cat] = 0
def generate_reports(self):
# Display whatever messages are left on the reporter.
self.reporter.display_messages(report_nodes.Section())
if self.file_state.base_name is not None:
# load previous results if any
previous_stats = config.load_results(self.file_state.base_name)
# XXX code below needs refactoring to be more reporter agnostic
self.reporter.on_close(self.stats, previous_stats)
if self.config.reports:
sect = self.make_reports(self.stats, previous_stats)
else:
sect = report_nodes.Section()
if self.config.reports:
self.reporter.display_reports(sect)
self._report_evaluation()
# save results if persistent run
if self.config.persistent:
config.save_results(self.stats, self.file_state.base_name)
else:
self.reporter.on_close(self.stats, {})
def _report_evaluation(self):
# check with at least check 1 statements (usually 0 when there is a
# syntax error preventing pylint from further processing)
previous_stats = config.load_results(self.file_state.base_name)
if self.stats['statement'] == 0:
return
# get a global note for the code
evaluation = self.config.evaluation
try:
note = eval(evaluation, {}, self.stats) # pylint: disable=eval-used
except Exception as ex: # pylint: disable=broad-except
msg = 'An exception occurred while rating: %s' % ex
else:
self.stats['global_note'] = note
msg = 'Your code has been rated at %.2f/10' % note
pnote = previous_stats.get('global_note')
if pnote is not None:
msg += ' (previous run: %.2f/10, %+.2f)' % (pnote, note - pnote)
if self.config.score:
sect = report_nodes.EvaluationSection(msg)
self.reporter.display_reports(sect)
# some reporting functions ####################################################
def report_total_messages_stats(sect, stats, previous_stats):
lines = ['type', 'number', 'previous', 'difference']
lines += checkers.table_lines_from_stats(stats, previous_stats,
('convention', 'refactor',
'warning', 'error'))
sect.append(report_nodes.Table(children=lines, cols=4, rheaders=1))
def report_messages_stats(sect, stats, _):
if not stats['by_msg']:
# don't print this report when we didn't detected any errors
raise exceptions.EmptyReportError()
in_order = sorted([(value, msg_id)
for msg_id, value in six.iteritems(stats['by_msg'])
if not msg_id.startswith('I')])
in_order.reverse()
lines = ('message id', 'occurrences')
for value, msg_id in in_order:
lines += (msg_id, str(value))
sect.append(report_nodes.Table(children=lines, cols=2, rheaders=1))
def report_messages_by_module_stats(sect, stats, _):
if len(stats['by_module']) == 1:
# don't print this report when we are analysing a single module
raise exceptions.EmptyReportError()
by_mod = collections.defaultdict(dict)
for m_type in ('fatal', 'error', 'warning', 'refactor', 'convention'):
total = stats[m_type]
for module in six.iterkeys(stats['by_module']):
mod_total = stats['by_module'][module][m_type]
if total == 0:
percent = 0
else:
percent = float((mod_total)*100) / total
by_mod[module][m_type] = percent
sorted_result = []
for module, mod_info in six.iteritems(by_mod):
sorted_result.append((mod_info['error'],
mod_info['warning'],
mod_info['refactor'],
mod_info['convention'],
module))
sorted_result.sort()
sorted_result.reverse()
lines = ['module', 'error', 'warning', 'refactor', 'convention']
for line in sorted_result:
if all(entry == 0 for entry in line[:-1]):
continue
lines.append(line[-1])
for val in line[:-1]:
lines.append('%.2f' % val)
if len(lines) == 5:
raise exceptions.EmptyReportError()
sect.append(report_nodes.Table(children=lines, cols=5, rheaders=1))
# utilities ###################################################################
class ArgumentPreprocessingError(Exception):
def preprocess_options(args, search_for):
i = 0
while i < len(args):
arg = args[i]
if arg.startswith('--'):
try:
option, val = arg[2:].split('=', 1)
except ValueError:
option, val = arg[2:], None
try:
cb, takearg = search_for[option]
except KeyError:
i += 1
else:
del args[i]
if takearg and val is None:
if i >= len(args) or args[i].startswith('-'):
msg = 'Option %s expects a value' % option
raise ArgumentPreprocessingError(msg)
val = args[i]
del args[i]
elif not takearg and val is not None:
msg = "Option %s doesn't expects a value" % option
raise ArgumentPreprocessingError(msg)
cb(option, val)
else:
i += 1
@contextlib.contextmanager
def fix_import_path(args):
orig = list(sys.path)
changes = []
for arg in args:
path = _get_python_path(arg)
if path in changes:
continue
else:
changes.append(path)
sys.path[:] = changes + ["."] + sys.path
try:
yield
finally:
sys.path[:] = orig
class Run(object):
LinterClass = PyLinter
option_groups = (
('Commands', 'Options which are actually commands. Options in this \
group are mutually exclusive.'),
)
def __init__(self, args, reporter=None, exit=True):
self._rcfile = None
self._plugins = []
try:
preprocess_options(args, {
'init-hook': (cb_init_hook, True),
'rcfile': (self.cb_set_rcfile, True),
'load-plugins': (self.cb_add_plugins, True),
})
except ArgumentPreprocessingError as ex:
print(ex, file=sys.stderr)
sys.exit(32)
self.linter = linter = self.LinterClass((
('rcfile',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type': 'string', 'metavar': '<file>',
'help' : 'Specify a configuration file.'}),
('init-hook',
{'action' : 'callback', 'callback' : lambda *args: 1,
'type' : 'string', 'metavar': '<code>',
'level': 1,
'help' : 'Python code to execute, usually for sys.path '
'manipulation such as pygtk.require().'}),
('help-msg',
{'action' : 'callback', 'type' : 'string', 'metavar': '<msg-id>',
'callback' : self.cb_help_message,
'group': 'Commands',
'help' : 'Display a help message for the given message id and '
'exit. The value may be a comma separated list of message ids.'}),
('list-msgs',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_list_messages,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('list-conf-levels',
{'action' : 'callback',
'callback' : cb_list_confidence_levels,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's messages."}),
('full-documentation',
{'action' : 'callback', 'metavar': '<msg-id>',
'callback' : self.cb_full_documentation,
'group': 'Commands', 'level': 1,
'help' : "Generate pylint's full documentation."}),
('generate-rcfile',
{'action' : 'callback', 'callback' : self.cb_generate_config,
'group': 'Commands',
'help' : 'Generate a sample configuration file according to '
'the current configuration. You can put other options '
'before this one to get them in the generated '
'configuration.'}),
('generate-man',
{'action' : 'callback', 'callback' : self.cb_generate_manpage,
'group': 'Commands',
'help' : "Generate pylint's man page.", 'hide': True}),
('errors-only',
{'action' : 'callback', 'callback' : self.cb_error_mode,
'short': 'E',
'help' : 'In error mode, checkers without error messages are '
'disabled and for others, only the ERROR messages are '
'displayed, and no reports are done by default'''}),
('py3k',
{'action' : 'callback', 'callback' : self.cb_python3_porting_mode,
'help' : 'In Python 3 porting mode, all checkers will be '
'disabled and only messages emitted by the porting '
'checker will be displayed'}),
), option_groups=self.option_groups, pylintrc=self._rcfile)
# register standard checkers
linter.load_default_plugins()
# load command line plugins
linter.load_plugin_modules(self._plugins)
# add some help section
linter.add_help_section('Environment variables', config.ENV_HELP, level=1)
# pylint: disable=bad-continuation
linter.add_help_section('Output',
'Using the default text output, the message format is : \n'
' \n'
' MESSAGE_TYPE: LINE_NUM:[OBJECT:] MESSAGE \n'
' \n'
'There are 5 kind of message types : \n'
' * (C) convention, for programming standard violation \n'
' * (R) refactor, for bad code smell \n'
' * (W) warning, for python specific problems \n'
' * (E) error, for probable bugs in the code \n'
' * (F) fatal, if an error occurred which prevented pylint from doing further\n'
'processing.\n'
, level=1)
linter.add_help_section('Output status code',
'Pylint should leave with following status code: \n'
' * 0 if everything went fine \n'
' * 1 if a fatal message was issued \n'
' * 2 if an error message was issued \n'
' * 4 if a warning message was issued \n'
' * 8 if a refactor message was issued \n'
' * 16 if a convention message was issued \n'
' * 32 on usage error \n'
' \n'
'status 1 to 16 will be bit-ORed so you can know which different categories has\n'
'been issued by analysing pylint output status code\n',
level=1)
# read configuration
linter.disable('I')
linter.enable('c-extension-no-member')
linter.read_config_file()
config_parser = linter.cfgfile_parser
# run init hook, if present, before loading plugins
if config_parser.has_option('MASTER', 'init-hook'):
cb_init_hook('init-hook',
utils._unquote(config_parser.get('MASTER',
'init-hook')))
# is there some additional plugins in the file configuration, in
if config_parser.has_option('MASTER', 'load-plugins'):
plugins = utils._splitstrip(
config_parser.get('MASTER', 'load-plugins'))
linter.load_plugin_modules(plugins)
# now we can load file config and command line, plugins (which can
# provide options) have been registered
linter.load_config_file()
if reporter:
# if a custom reporter is provided as argument, it may be overridden
# by file parameters, so re-set it here, but before command line
# parsing so it's still overrideable by command line option
linter.set_reporter(reporter)
try:
args = linter.load_command_line_configuration(args)
except SystemExit as exc:
if exc.code == 2: # bad options
exc.code = 32
raise
if not args:
print(linter.help())
sys.exit(32)
if linter.config.jobs < 0:
print("Jobs number (%d) should be greater than 0"
% linter.config.jobs, file=sys.stderr)
sys.exit(32)
if linter.config.jobs > 1 or linter.config.jobs == 0:
if multiprocessing is None:
print("Multiprocessing library is missing, "
"fallback to single process", file=sys.stderr)
linter.set_option("jobs", 1)
else:
if linter.config.jobs == 0:
linter.config.jobs = multiprocessing.cpu_count()
# insert current working directory to the python path to have a correct
# behaviour
with fix_import_path(args):
linter.check(args)
linter.generate_reports()
if exit:
if linter.config.exit_zero:
sys.exit(0)
else:
sys.exit(self.linter.msg_status)
def cb_set_rcfile(self, name, value):
self._rcfile = value
def cb_add_plugins(self, name, value):
self._plugins.extend(utils._splitstrip(value))
def cb_error_mode(self, *args, **kwargs):
self.linter.error_mode()
def cb_generate_config(self, *args, **kwargs):
self.linter.generate_config(skipsections=('COMMANDS',))
sys.exit(0)
def cb_generate_manpage(self, *args, **kwargs):
from pylint import __pkginfo__
self.linter.generate_manpage(__pkginfo__)
sys.exit(0)
def cb_help_message(self, option, optname, value, parser):
self.linter.msgs_store.help_message(utils._splitstrip(value))
sys.exit(0)
def cb_full_documentation(self, option, optname, value, parser):
self.linter.print_full_documentation()
sys.exit(0)
def cb_list_messages(self, option, optname, value, parser): # FIXME
self.linter.msgs_store.list_messages()
sys.exit(0)
def cb_python3_porting_mode(self, *args, **kwargs):
self.linter.python3_porting_mode()
def cb_list_confidence_levels(option, optname, value, parser):
for level in interfaces.CONFIDENCE_LEVELS:
print('%-18s: %s' % level)
sys.exit(0)
def cb_init_hook(optname, value):
exec(value) # pylint: disable=exec-used
if __name__ == '__main__':
Run(sys.argv[1:])
| true
| true
|
1c424f68e1e50a13d66542614d95af7c28619862
| 5,135
|
py
|
Python
|
Validation/RecoTrack/python/trackingNtuple_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2020-08-12T08:37:04.000Z
|
2020-08-12T08:37:04.000Z
|
Validation/RecoTrack/python/trackingNtuple_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | null | null | null |
Validation/RecoTrack/python/trackingNtuple_cff.py
|
nistefan/cmssw
|
ea13af97f7f2117a4f590a5e654e06ecd9825a5b
|
[
"Apache-2.0"
] | 1
|
2019-03-19T13:44:54.000Z
|
2019-03-19T13:44:54.000Z
|
import FWCore.ParameterSet.Config as cms
from RecoLocalTracker.Configuration.RecoLocalTracker_cff import *
from SimGeneral.TrackingAnalysis.simHitTPAssociation_cfi import *
from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import *
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import *
from RecoTracker.TransientTrackingRecHit.TTRHBuilders_cff import *
from RecoLocalTracker.SiPixelRecHits.PixelCPEGeneric_cfi import *
from RecoLocalTracker.Phase2TrackerRecHits.Phase2TrackerRecHits_cfi import *
from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *
from Validation.RecoTrack.trackingNtuple_cfi import *
from Validation.RecoTrack.TrackValidation_cff import *
from SimGeneral.TrackingAnalysis.trackingParticleNumberOfLayersProducer_cff import *
import Validation.RecoTrack.TrackValidation_cff as _TrackValidation_cff
_includeHits = True
#_includeHits = False
_includeSeeds = True
#_includeSeeds = False
_includeMVA = True
#_includeMVA = False
_includeTrackingParticles = True
#_includeTrackingParticles = False
from CommonTools.RecoAlgos.trackingParticleRefSelector_cfi import trackingParticleRefSelector as _trackingParticleRefSelector
trackingParticlesIntime = _trackingParticleRefSelector.clone(
signalOnly = False,
intimeOnly = True,
chargedOnly = False,
tip = 1e5,
lip = 1e5,
minRapidity = -10,
maxRapidity = 10,
ptMin = 0,
)
trackingNtuple.trackingParticles = "trackingParticlesIntime"
trackingNtuple.trackingParticlesRef = True
trackingNtuple.includeAllHits = _includeHits
trackingNtuple.includeSeeds = _includeSeeds
trackingNtuple.includeMVA = _includeMVA
trackingNtuple.includeTrackingParticles = _includeTrackingParticles
def _filterForNtuple(lst):
ret = []
for item in lst:
if "PreSplitting" in item:
continue
if "SeedsA" in item and item.replace("SeedsA", "SeedsB") in lst:
ret.append(item.replace("SeedsA", "Seeds"))
continue
if "SeedsB" in item:
continue
if "SeedsPair" in item and item.replace("SeedsPair", "SeedsTripl") in lst:
ret.append(item.replace("SeedsPair", "Seeds"))
continue
if "SeedsTripl" in item:
continue
ret.append(item)
return ret
_seedProducers = _filterForNtuple(_TrackValidation_cff._seedProducers)
_seedProducers_trackingPhase1 = _filterForNtuple(_TrackValidation_cff._seedProducers_trackingPhase1)
_seedProducers_trackingPhase2PU140 = _filterForNtuple(_TrackValidation_cff._seedProducers_trackingPhase2PU140)
(_seedSelectors, trackingNtupleSeedSelectors) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers, globals())
(_seedSelectors_trackingPhase1, _trackingNtupleSeedSelectors_trackingPhase1) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers_trackingPhase1, globals())
(_seedSelectors_trackingPhase2PU140, _trackingNtupleSeedSelectors_trackingPhase2PU140) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers_trackingPhase2PU140, globals())
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase1.toReplaceWith(trackingNtupleSeedSelectors, _trackingNtupleSeedSelectors_trackingPhase1)
trackingPhase2PU140.toReplaceWith(trackingNtupleSeedSelectors, _trackingNtupleSeedSelectors_trackingPhase2PU140)
trackingNtuple.seedTracks = _seedSelectors
trackingPhase1.toModify(trackingNtuple, seedTracks = _seedSelectors_trackingPhase1)
trackingPhase2PU140.toModify(trackingNtuple, seedTracks = _seedSelectors_trackingPhase2PU140)
def _seedProdToTrackCands(name):
return name.replace("seedTracks", "").replace("Seeds", "TrackCandidates")
trackingNtuple.trackCandidates = map(_seedProdToTrackCands, _seedProducers)
trackingPhase1.toModify(trackingNtuple, trackCandidates=map(_seedProdToTrackCands, _seedProducers_trackingPhase1))
trackingPhase2PU140.toModify(trackingNtuple, trackCandidates=map(_seedProdToTrackCands, _seedProducers_trackingPhase2PU140))
trackingNtupleSequence = cms.Sequence()
# reproduce hits because they're not stored in RECO
if _includeHits:
trackingNtupleSequence += (
siPixelRecHits +
siStripMatchedRecHits
)
_phase2_trackingNtupleSequence = trackingNtupleSequence.copy()
_phase2_trackingNtupleSequence.remove(siStripMatchedRecHits)
_phase2_trackingNtupleSequence += (siPhase2RecHits)
trackingPhase2PU140.toReplaceWith(trackingNtupleSequence, _phase2_trackingNtupleSequence)
if _includeSeeds:
trackingNtupleSequence += trackingNtupleSeedSelectors
trackingNtupleSequence += (
# sim information
trackingParticlesIntime +
simHitTPAssocProducer +
tpClusterProducer +
quickTrackAssociatorByHits +
trackingParticleNumberOfLayersProducer +
# ntuplizer
trackingNtuple
)
trackingPhase2PU140.toModify(trackingNtuple, # FIXME
pixelDigiSimLink = cms.untracked.InputTag('simSiPixelDigis', "Pixel"),
stripDigiSimLink = cms.untracked.InputTag(''),
phase2OTSimLink = cms.untracked.InputTag('simSiPixelDigis', "Tracker")
)
| 43.888889
| 181
| 0.820837
|
import FWCore.ParameterSet.Config as cms
from RecoLocalTracker.Configuration.RecoLocalTracker_cff import *
from SimGeneral.TrackingAnalysis.simHitTPAssociation_cfi import *
from SimTracker.TrackerHitAssociation.tpClusterProducer_cfi import *
from SimTracker.TrackAssociatorProducers.quickTrackAssociatorByHits_cfi import *
from RecoTracker.TransientTrackingRecHit.TTRHBuilders_cff import *
from RecoLocalTracker.SiPixelRecHits.PixelCPEGeneric_cfi import *
from RecoLocalTracker.Phase2TrackerRecHits.Phase2TrackerRecHits_cfi import *
from Geometry.TrackerNumberingBuilder.trackerTopology_cfi import *
from Validation.RecoTrack.trackingNtuple_cfi import *
from Validation.RecoTrack.TrackValidation_cff import *
from SimGeneral.TrackingAnalysis.trackingParticleNumberOfLayersProducer_cff import *
import Validation.RecoTrack.TrackValidation_cff as _TrackValidation_cff
_includeHits = True
_includeSeeds = True
_includeMVA = True
_includeTrackingParticles = True
from CommonTools.RecoAlgos.trackingParticleRefSelector_cfi import trackingParticleRefSelector as _trackingParticleRefSelector
trackingParticlesIntime = _trackingParticleRefSelector.clone(
signalOnly = False,
intimeOnly = True,
chargedOnly = False,
tip = 1e5,
lip = 1e5,
minRapidity = -10,
maxRapidity = 10,
ptMin = 0,
)
trackingNtuple.trackingParticles = "trackingParticlesIntime"
trackingNtuple.trackingParticlesRef = True
trackingNtuple.includeAllHits = _includeHits
trackingNtuple.includeSeeds = _includeSeeds
trackingNtuple.includeMVA = _includeMVA
trackingNtuple.includeTrackingParticles = _includeTrackingParticles
def _filterForNtuple(lst):
ret = []
for item in lst:
if "PreSplitting" in item:
continue
if "SeedsA" in item and item.replace("SeedsA", "SeedsB") in lst:
ret.append(item.replace("SeedsA", "Seeds"))
continue
if "SeedsB" in item:
continue
if "SeedsPair" in item and item.replace("SeedsPair", "SeedsTripl") in lst:
ret.append(item.replace("SeedsPair", "Seeds"))
continue
if "SeedsTripl" in item:
continue
ret.append(item)
return ret
_seedProducers = _filterForNtuple(_TrackValidation_cff._seedProducers)
_seedProducers_trackingPhase1 = _filterForNtuple(_TrackValidation_cff._seedProducers_trackingPhase1)
_seedProducers_trackingPhase2PU140 = _filterForNtuple(_TrackValidation_cff._seedProducers_trackingPhase2PU140)
(_seedSelectors, trackingNtupleSeedSelectors) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers, globals())
(_seedSelectors_trackingPhase1, _trackingNtupleSeedSelectors_trackingPhase1) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers_trackingPhase1, globals())
(_seedSelectors_trackingPhase2PU140, _trackingNtupleSeedSelectors_trackingPhase2PU140) = _TrackValidation_cff._addSeedToTrackProducers(_seedProducers_trackingPhase2PU140, globals())
from Configuration.Eras.Modifier_trackingPhase1_cff import trackingPhase1
from Configuration.Eras.Modifier_trackingPhase2PU140_cff import trackingPhase2PU140
trackingPhase1.toReplaceWith(trackingNtupleSeedSelectors, _trackingNtupleSeedSelectors_trackingPhase1)
trackingPhase2PU140.toReplaceWith(trackingNtupleSeedSelectors, _trackingNtupleSeedSelectors_trackingPhase2PU140)
trackingNtuple.seedTracks = _seedSelectors
trackingPhase1.toModify(trackingNtuple, seedTracks = _seedSelectors_trackingPhase1)
trackingPhase2PU140.toModify(trackingNtuple, seedTracks = _seedSelectors_trackingPhase2PU140)
def _seedProdToTrackCands(name):
return name.replace("seedTracks", "").replace("Seeds", "TrackCandidates")
trackingNtuple.trackCandidates = map(_seedProdToTrackCands, _seedProducers)
trackingPhase1.toModify(trackingNtuple, trackCandidates=map(_seedProdToTrackCands, _seedProducers_trackingPhase1))
trackingPhase2PU140.toModify(trackingNtuple, trackCandidates=map(_seedProdToTrackCands, _seedProducers_trackingPhase2PU140))
trackingNtupleSequence = cms.Sequence()
if _includeHits:
trackingNtupleSequence += (
siPixelRecHits +
siStripMatchedRecHits
)
_phase2_trackingNtupleSequence = trackingNtupleSequence.copy()
_phase2_trackingNtupleSequence.remove(siStripMatchedRecHits)
_phase2_trackingNtupleSequence += (siPhase2RecHits)
trackingPhase2PU140.toReplaceWith(trackingNtupleSequence, _phase2_trackingNtupleSequence)
if _includeSeeds:
trackingNtupleSequence += trackingNtupleSeedSelectors
trackingNtupleSequence += (
# sim information
trackingParticlesIntime +
simHitTPAssocProducer +
tpClusterProducer +
quickTrackAssociatorByHits +
trackingParticleNumberOfLayersProducer +
# ntuplizer
trackingNtuple
)
trackingPhase2PU140.toModify(trackingNtuple, # FIXME
pixelDigiSimLink = cms.untracked.InputTag('simSiPixelDigis', "Pixel"),
stripDigiSimLink = cms.untracked.InputTag(''),
phase2OTSimLink = cms.untracked.InputTag('simSiPixelDigis', "Tracker")
)
| true
| true
|
1c425048b7f3e854ac1d0a5ee45198dc6db9d2bb
| 687
|
py
|
Python
|
pycontour/fea/setup.py
|
PingjunChen/pycontour
|
13f64b685740368605db314b0f547f9f8dd4e737
|
[
"BSD-3-Clause"
] | 8
|
2018-08-30T13:13:48.000Z
|
2021-07-24T08:49:35.000Z
|
pycontour/fea/setup.py
|
PingjunChen/pycontour
|
13f64b685740368605db314b0f547f9f8dd4e737
|
[
"BSD-3-Clause"
] | 1
|
2021-09-21T04:11:39.000Z
|
2021-09-21T04:11:39.000Z
|
pycontour/fea/setup.py
|
PingjunChen/pycontour
|
13f64b685740368605db314b0f547f9f8dd4e737
|
[
"BSD-3-Clause"
] | 4
|
2018-09-21T20:38:26.000Z
|
2021-09-17T21:45:37.000Z
|
# -*- coding: utf-8 -*-
import os
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
MODULE_NAME = os.path.basename(BASE_PATH)
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(MODULE_NAME, parent_package, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='Pingjun Chen',
maintainer_email='chenpingjun@gmx.com',
description='Contour feature calculation.',
url='https://github.com/PingjunChen/pycontour',
license='MIT',
**(configuration(top_path='').todict())
)
| 27.48
| 65
| 0.679767
|
import os
BASE_PATH = os.path.abspath(os.path.dirname(__file__))
MODULE_NAME = os.path.basename(BASE_PATH)
def configuration(parent_package='', top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration(MODULE_NAME, parent_package, top_path)
return config
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(maintainer='Pingjun Chen',
maintainer_email='chenpingjun@gmx.com',
description='Contour feature calculation.',
url='https://github.com/PingjunChen/pycontour',
license='MIT',
**(configuration(top_path='').todict())
)
| true
| true
|
1c425108a80a803a1e821cb6eb1a16b0511dbce8
| 223
|
py
|
Python
|
python3/day_019/day-019-acquire_a_thread_lock-2.7.6.py
|
king-md/100DaysOfCode
|
ab2e2495e804663ca35f72bbc8d8ec06cb202fac
|
[
"MIT"
] | null | null | null |
python3/day_019/day-019-acquire_a_thread_lock-2.7.6.py
|
king-md/100DaysOfCode
|
ab2e2495e804663ca35f72bbc8d8ec06cb202fac
|
[
"MIT"
] | null | null | null |
python3/day_019/day-019-acquire_a_thread_lock-2.7.6.py
|
king-md/100DaysOfCode
|
ab2e2495e804663ca35f72bbc8d8ec06cb202fac
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python2
import threading, time
def takeANap():
time.sleep(5)
print('Wake up!')
print('Start of program.')
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print('End of program.')
| 14.866667
| 45
| 0.695067
|
import threading, time
def takeANap():
time.sleep(5)
print('Wake up!')
print('Start of program.')
threadObj = threading.Thread(target=takeANap)
threadObj.start()
print('End of program.')
| true
| true
|
1c42529af57e0cf71cfef4106ffabcce7782fc6f
| 2,258
|
py
|
Python
|
ConverterValueEchchangePythonCourse/main.py
|
Sviatoslav-Lobanov/ConverterValueEchchangePythonCourse
|
1862835c22d7bb9269d7ed7a6b3f704a49c16ce1
|
[
"MIT"
] | null | null | null |
ConverterValueEchchangePythonCourse/main.py
|
Sviatoslav-Lobanov/ConverterValueEchchangePythonCourse
|
1862835c22d7bb9269d7ed7a6b3f704a49c16ce1
|
[
"MIT"
] | null | null | null |
ConverterValueEchchangePythonCourse/main.py
|
Sviatoslav-Lobanov/ConverterValueEchchangePythonCourse
|
1862835c22d7bb9269d7ed7a6b3f704a49c16ce1
|
[
"MIT"
] | null | null | null |
import telebot
from config import keys, TOKEN
from extensions import *
bot = telebot.TeleBot(TOKEN)
# @bot.message_handler()
# def echo_test(message: telebot.types.Message):
# bot.send_message(message.chat.id, 'Привет')
@bot.message_handler(commands=['start','help'])
def help(message: telebot.types.Message):
text= 'Чтобы начать работу введите команду боту в следующем формате: \n <имя валюты> \
<в какую валюту первести> \
<количество перводимой валюты> \n Увидеть список всех доступных валют: /values'
bot.reply_to(message,text)
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text= 'Доступные валюты>'
for key in keys.keys():
text = "\n".join((text,key, ))
bot.reply_to(message,text)
@bot.message_handler(content_types=['text', ])
def convert(message: telebot.types.Message):
# Вариант в котором ошибки пользователя возвращаются строкой и просто отправляются в чат
# try:
# values = message.text.split(' ')
# if len(values) != 3:
# bot.reply_to(message, "Неверно введена команда используйте /help")
# else:
# quote, base, amount = values
# total_base = CryptoConverter.convert(quote, base, amount)
# if type(total_base) is str: # Если вернулась строка, то это вернулась ошибка, выводим ее как есть и продолжаем работу бота
# bot.reply_to(message, total_base)
# else:
# text = f'Цена {amount} {quote} в {base} - {total_base}'
# bot.reply_to(message, text)
# except Exception as e:
# bot.reply_to(message, f'Не удалось обработать команду\n{e}')
try:
values = message.text.split(' ')
if len(values) != 3:
raise APIException("Неверно введена команда, используйте /help")
quote, base, amount = values
total_base = CryptoConverter.get_price(quote, base, amount)
except APIException as e:
bot.reply_to(message, f'Ошибка пользователя \n {e}')
except Exception as e:
bot.reply_to(message, f'Не удалось обработать команду\n{e}')
else:
text = f'Цена {amount} {quote} в {base} - {total_base}'
bot.reply_to(message, text)
bot.polling()
| 38.271186
| 137
| 0.649247
|
import telebot
from config import keys, TOKEN
from extensions import *
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start','help'])
def help(message: telebot.types.Message):
text= 'Чтобы начать работу введите команду боту в следующем формате: \n <имя валюты> \
<в какую валюту первести> \
<количество перводимой валюты> \n Увидеть список всех доступных валют: /values'
bot.reply_to(message,text)
@bot.message_handler(commands=['values'])
def values(message: telebot.types.Message):
text= 'Доступные валюты>'
for key in keys.keys():
text = "\n".join((text,key, ))
bot.reply_to(message,text)
@bot.message_handler(content_types=['text', ])
def convert(message: telebot.types.Message):
en(values) != 3:
raise APIException("Неверно введена команда, используйте /help")
quote, base, amount = values
total_base = CryptoConverter.get_price(quote, base, amount)
except APIException as e:
bot.reply_to(message, f'Ошибка пользователя \n {e}')
except Exception as e:
bot.reply_to(message, f'Не удалось обработать команду\n{e}')
else:
text = f'Цена {amount} {quote} в {base} - {total_base}'
bot.reply_to(message, text)
bot.polling()
| true
| true
|
1c4252c046eb24dc133afc0a0de91116b45c5495
| 30,836
|
py
|
Python
|
ckan/tests/test_coding_standards.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 1
|
2020-04-24T03:35:16.000Z
|
2020-04-24T03:35:16.000Z
|
ckan/tests/test_coding_standards.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 7
|
2021-02-02T22:03:03.000Z
|
2021-06-22T02:13:00.000Z
|
ckan/tests/test_coding_standards.py
|
GlobalMaksimum/ckan
|
bdba078d26d485e75554ba9570e292ec480eb9e4
|
[
"Apache-2.0"
] | 3
|
2019-06-21T11:57:57.000Z
|
2020-01-20T12:36:38.000Z
|
# encoding: utf-8
u'''A module for coding standards tests.
These are tests that are not functional- or unit-testing any particular piece
of CKAN code, but are checking coding standards. For example: checking that
there are no errors in the Sphinx build, that there are no PEP8 problems,
etc.
'''
import ast
import io
import os
import os.path
import re
import subprocess
import sys
from six import text_type
from six.moves import xrange
FILESYSTEM_ENCODING = text_type(
sys.getfilesystemencoding() or sys.getdefaultencoding()
)
HERE = os.path.abspath(os.path.dirname(__file__.decode(FILESYSTEM_ENCODING)))
PROJECT_ROOT = os.path.normpath(os.path.join(HERE, u'..', u'..'))
# Directories which are ignored when checking Python source code files
IGNORED_DIRS = [
u'ckan/include',
]
def walk_python_files():
u'''
Generator that yields all CKAN Python source files.
Yields 2-tuples containing the filename in absolute and relative (to
the project root) form.
'''
def _is_dir_ignored(root, d):
if d.startswith(u'.'):
return True
return os.path.join(rel_root, d) in IGNORED_DIRS
for abs_root, dirnames, filenames in os.walk(PROJECT_ROOT):
rel_root = os.path.relpath(abs_root, PROJECT_ROOT)
if rel_root == u'.':
rel_root = u''
dirnames[:] = [d for d in dirnames if not _is_dir_ignored(rel_root, d)]
for filename in filenames:
if not filename.endswith(u'.py'):
continue
abs_name = os.path.join(abs_root, filename)
rel_name = os.path.join(rel_root, filename)
yield abs_name, rel_name
def test_building_the_docs():
u'''There should be no warnings or errors when building the Sphinx docs.
This test will also fail is build_sphinx exits with non-zero status.
'''
try:
output = subprocess.check_output(
[b'python',
b'setup.py',
b'build_sphinx'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
assert False, (
u"Building the docs failed with return code: {code}".format(
code=err.returncode))
output_lines = output.split(u'\n')
errors = [line for line in output_lines if u'ERROR' in line]
if errors:
assert False, (u"Don't add any errors to the Sphinx build: "
u"{errors}".format(errors=errors))
warnings = [line for line in output_lines if u'WARNING' in line]
# Some warnings have been around for a long time and aren't easy to fix.
# These are allowed, but no more should be added.
allowed_warnings = [
u'WARNING: duplicate label ckan.auth.create_user_via_web',
u'WARNING: duplicate label ckan.auth.create_unowned_dataset',
u'WARNING: duplicate label ckan.auth.user_create_groups',
u'WARNING: duplicate label ckan.auth.anon_create_dataset',
u'WARNING: duplicate label ckan.auth.user_delete_organizations',
u'WARNING: duplicate label ckan.auth.create_user_via_api',
u'WARNING: duplicate label ckan.auth.create_dataset_if_not_in_organization',
u'WARNING: duplicate label ckan.auth.user_delete_groups',
u'WARNING: duplicate label ckan.auth.user_create_organizations',
u'WARNING: duplicate label ckan.auth.roles_that_cascade_to_sub_groups',
u'WARNING: duplicate label ckan.auth.public_user_details',
]
# Remove the allowed warnings from the list of collected warnings.
# Be sure to only remove one warning for each allowed warning.
warnings_to_remove = []
for allowed_warning in allowed_warnings:
for warning in warnings:
if allowed_warning in warning:
warnings_to_remove.append(warning)
break
new_warnings = [warning for warning in warnings
if warning not in warnings_to_remove]
if new_warnings:
assert False, (u"Don't add any new warnings to the Sphinx build: "
u"{warnings}".format(warnings=new_warnings))
def test_source_files_specify_encoding():
u'''
Test that *.py files have a PEP 263 UTF-8 encoding specification.
Empty files and files that only contain comments are ignored.
'''
pattern = re.compile(u'#.*?coding[:=][ \\t]*utf-?8')
decode_errors = []
no_specification = []
for abs_path, rel_path in walk_python_files():
try:
with io.open(abs_path, encoding=u'utf-8') as f:
for line in f:
line = line.strip()
if pattern.match(line):
# Pattern found
break
elif line and not line.startswith(u'#'):
# File contains non-empty non-comment line
no_specification.append(rel_path)
break
except UnicodeDecodeError:
decode_errors.append(rel_path)
msgs = []
if no_specification:
msgs.append(
u'The following files are missing an encoding specification: '
u'{}'.format(no_specification)
)
if decode_errors:
msgs.append(
u'The following files are not valid UTF-8: '
u'{}'.format(decode_errors)
)
if msgs:
assert False, u'\n\n'.join(msgs)
def renumerate(it):
u'''
Reverse enumerate.
Yields tuples ``(i, x)`` where ``x`` are the items of ``it`` in
reverse order and ``i`` is the corresponding (decreasing) index.
``it`` must support ``len``.
'''
return zip(xrange(len(it) - 1, -1, -1), reversed(it))
def find_unprefixed_string_literals(filename):
u'''
Find unprefixed string literals in a Python source file.
Returns a list of ``(line_number, column)`` tuples (both 1-based) of
positions where string literals without a ``u`` or ``b`` prefix
start.
Note: Due to limitations in Python's ``ast`` module this does not
check the rear parts of auto-concatenated string literals
(``'foo' 'bar'``).
'''
with io.open(filename, encoding=u'utf-8') as f:
lines = f.readlines()
# In some versions of Python, the ast module cannot deal with
# encoding declarations (http://bugs.python.org/issue22221). We
# therefore replace all comment lines at the beginning of the file
# with empty lines (to keep the line numbers correct).
for i, line in enumerate(lines):
line = line.strip()
if line.startswith(u'#'):
lines[i] = u'\n'
elif line:
break
root = ast.parse(u''.join(lines), filename.encode(FILESYSTEM_ENCODING))
problems = []
for node in ast.walk(root):
if isinstance(node, ast.Str):
lineno = node.lineno - 1
col_offset = node.col_offset
if col_offset == -1:
# `lineno` and `col_offset` are broken for literals that span
# multiple lines: For these, `lineno` contains the line of the
# *closing* quotes, and `col_offset` is always -1, see
# https://bugs.python.org/issue16806. We therefore have to
# find the start of the literal manually, which is difficult
# since '''-literals can contain """ and vice versa. The
# following code assumes that no ''' or """ literal begins on
# the same line where a multi-line literal ends.
last_line = lines[lineno]
if last_line.rfind(u'"""') > last_line.rfind(u"'''"):
quotes = u'"""'
else:
quotes = u"'''"
for lineno, line in renumerate(lines[:lineno]):
try:
i = line.rindex(quotes)
if (i > 1) and (line[i - 2:i].lower() == u'ur'):
col_offset = i - 2
elif (i > 0) and (line[i - 1].lower() in u'rbu'):
col_offset = i - 1
else:
col_offset = 0
break
except ValueError:
continue
leading = lines[lineno][col_offset - 1:col_offset + 1]
if leading[:-1] == u'[': # data['id'] is unambiguous, ignore these
continue
if leading[-1:] not in u'ub': # Don't allow capital U and B either
problems.append((lineno + 1, col_offset + 1))
return sorted(problems)
# List of files white-listed for the string literal prefix test. Files on the
# list are expected to be fixed over time and removed from the list. DO NOT ADD
# NEW FILES TO THE LIST.
_STRING_LITERALS_WHITELIST = [
u'bin/running_stats.py',
u'ckan/__init__.py',
u'ckan/authz.py',
u'ckan/ckan_nose_plugin.py',
u'ckan/config/environment.py',
u'ckan/config/install.py',
u'ckan/config/middleware/__init__.py',
u'ckan/config/middleware/common_middleware.py',
u'ckan/config/middleware/flask_app.py',
u'ckan/config/middleware/pylons_app.py',
u'ckan/config/routing.py',
u'ckan/controllers/admin.py',
u'ckan/controllers/api.py',
u'ckan/controllers/error.py',
u'ckan/controllers/feed.py',
u'ckan/controllers/group.py',
u'ckan/controllers/home.py',
u'ckan/controllers/organization.py',
u'ckan/controllers/package.py',
u'ckan/controllers/partyline.py',
u'ckan/controllers/revision.py',
u'ckan/controllers/tag.py',
u'ckan/controllers/user.py',
u'ckan/controllers/util.py',
u'ckan/exceptions.py',
u'ckan/i18n/check_po_files.py',
u'ckan/lib/activity_streams.py',
u'ckan/lib/activity_streams_session_extension.py',
u'ckan/lib/app_globals.py',
u'ckan/lib/auth_tkt.py',
u'ckan/lib/authenticator.py',
u'ckan/lib/base.py',
u'ckan/lib/captcha.py',
u'ckan/lib/cli.py',
u'ckan/lib/config_tool.py',
u'ckan/lib/create_test_data.py',
u'ckan/lib/datapreview.py',
u'ckan/lib/dictization/__init__.py',
u'ckan/lib/dictization/model_dictize.py',
u'ckan/lib/dictization/model_save.py',
u'ckan/lib/email_notifications.py',
u'ckan/lib/extract.py',
u'ckan/lib/fanstatic_extensions.py',
u'ckan/lib/fanstatic_resources.py',
u'ckan/lib/formatters.py',
u'ckan/lib/hash.py',
u'ckan/lib/helpers.py',
u'ckan/lib/i18n.py',
u'ckan/lib/jinja_extensions.py',
u'ckan/lib/jsonp.py',
u'ckan/lib/mailer.py',
u'ckan/lib/maintain.py',
u'ckan/lib/munge.py',
u'ckan/lib/navl/__init__.py',
u'ckan/lib/navl/dictization_functions.py',
u'ckan/lib/navl/validators.py',
u'ckan/lib/plugins.py',
u'ckan/lib/render.py',
u'ckan/lib/search/__init__.py',
u'ckan/lib/search/common.py',
u'ckan/lib/search/index.py',
u'ckan/lib/search/query.py',
u'ckan/lib/search/sql.py',
u'ckan/lib/uploader.py',
u'ckan/logic/__init__.py',
u'ckan/logic/action/__init__.py',
u'ckan/logic/action/create.py',
u'ckan/logic/action/delete.py',
u'ckan/logic/action/get.py',
u'ckan/logic/action/patch.py',
u'ckan/logic/action/update.py',
u'ckan/logic/auth/__init__.py',
u'ckan/logic/auth/create.py',
u'ckan/logic/auth/delete.py',
u'ckan/logic/auth/get.py',
u'ckan/logic/auth/patch.py',
u'ckan/logic/auth/update.py',
u'ckan/logic/converters.py',
u'ckan/logic/schema.py',
u'ckan/logic/validators.py',
u'ckan/migration/manage.py',
u'ckan/migration/versions/001_add_existing_tables.py',
u'ckan/migration/versions/002_add_author_and_maintainer.py',
u'ckan/migration/versions/003_add_user_object.py',
u'ckan/migration/versions/004_add_group_object.py',
u'ckan/migration/versions/005_add_authorization_tables.py',
u'ckan/migration/versions/006_add_ratings.py',
u'ckan/migration/versions/007_add_system_roles.py',
u'ckan/migration/versions/008_update_vdm_ids.py',
u'ckan/migration/versions/009_add_creation_timestamps.py',
u'ckan/migration/versions/010_add_user_about.py',
u'ckan/migration/versions/011_add_package_search_vector.py',
u'ckan/migration/versions/012_add_resources.py',
u'ckan/migration/versions/013_add_hash.py',
u'ckan/migration/versions/014_hash_2.py',
u'ckan/migration/versions/015_remove_state_object.py',
u'ckan/migration/versions/016_uuids_everywhere.py',
u'ckan/migration/versions/017_add_pkg_relationships.py',
u'ckan/migration/versions/018_adjust_licenses.py',
u'ckan/migration/versions/019_pkg_relationships_state.py',
u'ckan/migration/versions/020_add_changeset.py',
u'ckan/migration/versions/022_add_group_extras.py',
u'ckan/migration/versions/023_add_harvesting.py',
u'ckan/migration/versions/024_add_harvested_document.py',
u'ckan/migration/versions/025_add_authorization_groups.py',
u'ckan/migration/versions/026_authorization_group_user_pk.py',
u'ckan/migration/versions/027_adjust_harvester.py',
u'ckan/migration/versions/028_drop_harvest_source_status.py',
u'ckan/migration/versions/029_version_groups.py',
u'ckan/migration/versions/030_additional_user_attributes.py',
u'ckan/migration/versions/031_move_openid_to_new_field.py',
u'ckan/migration/versions/032_add_extra_info_field_to_resources.py',
u'ckan/migration/versions/033_auth_group_user_id_add_conditional.py',
u'ckan/migration/versions/034_resource_group_table.py',
u'ckan/migration/versions/035_harvesting_doc_versioning.py',
u'ckan/migration/versions/036_lockdown_roles.py',
u'ckan/migration/versions/037_role_anon_editor.py',
u'ckan/migration/versions/038_delete_migration_tables.py',
u'ckan/migration/versions/039_add_expired_id_and_dates.py',
u'ckan/migration/versions/040_reset_key_on_user.py',
u'ckan/migration/versions/041_resource_new_fields.py',
u'ckan/migration/versions/042_user_revision_indexes.py',
u'ckan/migration/versions/043_drop_postgres_search.py',
u'ckan/migration/versions/044_add_task_status.py',
u'ckan/migration/versions/045_user_name_unique.py',
u'ckan/migration/versions/046_drop_changesets.py',
u'ckan/migration/versions/047_rename_package_group_member.py',
u'ckan/migration/versions/048_add_activity_streams_tables.py',
u'ckan/migration/versions/049_add_group_approval_status.py',
u'ckan/migration/versions/050_term_translation_table.py',
u'ckan/migration/versions/051_add_tag_vocabulary.py',
u'ckan/migration/versions/052_update_member_capacities.py',
u'ckan/migration/versions/053_add_group_logo.py',
u'ckan/migration/versions/054_add_resource_created_date.py',
u'ckan/migration/versions/055_update_user_and_activity_detail.py',
u'ckan/migration/versions/056_add_related_table.py',
u'ckan/migration/versions/057_tracking.py',
u'ckan/migration/versions/058_add_follower_tables.py',
u'ckan/migration/versions/059_add_related_count_and_flag.py',
u'ckan/migration/versions/060_add_system_info_table.py',
u'ckan/migration/versions/061_add_follower__group_table.py',
u'ckan/migration/versions/062_add_dashboard_table.py',
u'ckan/migration/versions/063_org_changes.py',
u'ckan/migration/versions/064_add_email_last_sent_column.py',
u'ckan/migration/versions/065_add_email_notifications_preference.py',
u'ckan/migration/versions/066_default_package_type.py',
u'ckan/migration/versions/067_turn_extras_to_strings.py',
u'ckan/migration/versions/068_add_package_extras_index.py',
u'ckan/migration/versions/069_resource_url_and_metadata_modified.py',
u'ckan/migration/versions/070_add_activity_and_resource_indexes.py',
u'ckan/migration/versions/071_add_state_column_to_user_table.py',
u'ckan/migration/versions/072_add_resource_view.py',
u'ckan/migration/versions/073_update_resource_view_resource_id_constraint.py',
u'ckan/migration/versions/074_remove_resource_groups.py',
u'ckan/migration/versions/075_rename_view_plugins.py',
u'ckan/migration/versions/076_rename_view_plugins_2.py',
u'ckan/migration/versions/077_add_revisions_to_system_info.py',
u'ckan/migration/versions/078_remove_old_authz_model.py',
u'ckan/migration/versions/079_resource_revision_index.py',
u'ckan/migration/versions/080_continuity_id_indexes.py',
u'ckan/migration/versions/081_set_datastore_active.py',
u'ckan/migration/versions/082_create_index_creator_user_id.py',
u'ckan/migration/versions/083_remove_related_items.py',
u'ckan/migration/versions/084_add_metadata_created.py',
u'ckan/model/__init__.py',
u'ckan/model/activity.py',
u'ckan/model/core.py',
u'ckan/model/dashboard.py',
u'ckan/model/domain_object.py',
u'ckan/model/extension.py',
u'ckan/model/follower.py',
u'ckan/model/group.py',
u'ckan/model/group_extra.py',
u'ckan/model/license.py',
u'ckan/model/meta.py',
u'ckan/model/misc.py',
u'ckan/model/modification.py',
u'ckan/model/package.py',
u'ckan/model/package_extra.py',
u'ckan/model/package_relationship.py',
u'ckan/model/rating.py',
u'ckan/model/resource.py',
u'ckan/model/resource_view.py',
u'ckan/model/system_info.py',
u'ckan/model/tag.py',
u'ckan/model/task_status.py',
u'ckan/model/term_translation.py',
u'ckan/model/tracking.py',
u'ckan/model/types.py',
u'ckan/model/user.py',
u'ckan/model/vocabulary.py',
u'ckan/pastertemplates/__init__.py',
u'ckan/plugins/core.py',
u'ckan/plugins/toolkit.py',
u'ckan/plugins/toolkit_sphinx_extension.py',
u'ckan/tests/config/test_environment.py',
u'ckan/tests/config/test_middleware.py',
u'ckan/tests/controllers/__init__.py',
u'ckan/tests/controllers/test_admin.py',
u'ckan/tests/controllers/test_api.py',
u'ckan/tests/controllers/test_feed.py',
u'ckan/tests/controllers/test_group.py',
u'ckan/tests/controllers/test_home.py',
u'ckan/tests/controllers/test_organization.py',
u'ckan/tests/controllers/test_package.py',
u'ckan/tests/controllers/test_user.py',
u'ckan/tests/controllers/test_util.py',
u'ckan/tests/factories.py',
u'ckan/tests/helpers.py',
u'ckan/tests/i18n/test_check_po_files.py',
u'ckan/tests/legacy/__init__.py',
u'ckan/tests/legacy/ckantestplugins.py',
u'ckan/tests/legacy/functional/api/__init__.py',
u'ckan/tests/legacy/functional/api/base.py',
u'ckan/tests/legacy/functional/api/model/test_group.py',
u'ckan/tests/legacy/functional/api/model/test_licenses.py',
u'ckan/tests/legacy/functional/api/model/test_package.py',
u'ckan/tests/legacy/functional/api/model/test_ratings.py',
u'ckan/tests/legacy/functional/api/model/test_relationships.py',
u'ckan/tests/legacy/functional/api/model/test_revisions.py',
u'ckan/tests/legacy/functional/api/model/test_tag.py',
u'ckan/tests/legacy/functional/api/model/test_vocabulary.py',
u'ckan/tests/legacy/functional/api/test_activity.py',
u'ckan/tests/legacy/functional/api/test_api.py',
u'ckan/tests/legacy/functional/api/test_dashboard.py',
u'ckan/tests/legacy/functional/api/test_email_notifications.py',
u'ckan/tests/legacy/functional/api/test_follow.py',
u'ckan/tests/legacy/functional/api/test_misc.py',
u'ckan/tests/legacy/functional/api/test_package_search.py',
u'ckan/tests/legacy/functional/api/test_resource.py',
u'ckan/tests/legacy/functional/api/test_resource_search.py',
u'ckan/tests/legacy/functional/api/test_user.py',
u'ckan/tests/legacy/functional/api/test_util.py',
u'ckan/tests/legacy/functional/test_activity.py',
u'ckan/tests/legacy/functional/test_admin.py',
u'ckan/tests/legacy/functional/test_error.py',
u'ckan/tests/legacy/functional/test_group.py',
u'ckan/tests/legacy/functional/test_package.py',
u'ckan/tests/legacy/functional/test_pagination.py',
u'ckan/tests/legacy/functional/test_preview_interface.py',
u'ckan/tests/legacy/functional/test_revision.py',
u'ckan/tests/legacy/functional/test_tag.py',
u'ckan/tests/legacy/functional/test_tracking.py',
u'ckan/tests/legacy/functional/test_user.py',
u'ckan/tests/legacy/html_check.py',
u'ckan/tests/legacy/lib/__init__.py',
u'ckan/tests/legacy/lib/test_authenticator.py',
u'ckan/tests/legacy/lib/test_cli.py',
u'ckan/tests/legacy/lib/test_dictization.py',
u'ckan/tests/legacy/lib/test_dictization_schema.py',
u'ckan/tests/legacy/lib/test_email_notifications.py',
u'ckan/tests/legacy/lib/test_hash.py',
u'ckan/tests/legacy/lib/test_helpers.py',
u'ckan/tests/legacy/lib/test_i18n.py',
u'ckan/tests/legacy/lib/test_navl.py',
u'ckan/tests/legacy/lib/test_resource_search.py',
u'ckan/tests/legacy/lib/test_simple_search.py',
u'ckan/tests/legacy/lib/test_solr_package_search.py',
u'ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py',
u'ckan/tests/legacy/lib/test_solr_schema_version.py',
u'ckan/tests/legacy/lib/test_solr_search_index.py',
u'ckan/tests/legacy/lib/test_tag_search.py',
u'ckan/tests/legacy/logic/test_action.py',
u'ckan/tests/legacy/logic/test_auth.py',
u'ckan/tests/legacy/logic/test_init.py',
u'ckan/tests/legacy/logic/test_member.py',
u'ckan/tests/legacy/logic/test_tag.py',
u'ckan/tests/legacy/logic/test_tag_vocab.py',
u'ckan/tests/legacy/logic/test_validators.py',
u'ckan/tests/legacy/misc/test_format_text.py',
u'ckan/tests/legacy/misc/test_mock_mail_server.py',
u'ckan/tests/legacy/misc/test_sync.py',
u'ckan/tests/legacy/mock_mail_server.py',
u'ckan/tests/legacy/mock_plugin.py',
u'ckan/tests/legacy/models/test_activity.py',
u'ckan/tests/legacy/models/test_extras.py',
u'ckan/tests/legacy/models/test_follower.py',
u'ckan/tests/legacy/models/test_group.py',
u'ckan/tests/legacy/models/test_misc.py',
u'ckan/tests/legacy/models/test_package.py',
u'ckan/tests/legacy/models/test_package_relationships.py',
u'ckan/tests/legacy/models/test_purge_revision.py',
u'ckan/tests/legacy/models/test_resource.py',
u'ckan/tests/legacy/models/test_revision.py',
u'ckan/tests/legacy/models/test_user.py',
u'ckan/tests/legacy/schema/test_schema.py',
u'ckan/tests/legacy/test_coding_standards.py',
u'ckan/tests/legacy/test_plugins.py',
u'ckan/tests/legacy/test_versions.py',
u'ckan/tests/lib/__init__.py',
u'ckan/tests/lib/dictization/test_model_dictize.py',
u'ckan/tests/lib/navl/test_dictization_functions.py',
u'ckan/tests/lib/navl/test_validators.py',
u'ckan/tests/lib/search/test_index.py',
u'ckan/tests/lib/test_app_globals.py',
u'ckan/tests/lib/test_auth_tkt.py',
u'ckan/tests/lib/test_base.py',
u'ckan/tests/lib/test_cli.py',
u'ckan/tests/lib/test_config_tool.py',
u'ckan/tests/lib/test_datapreview.py',
u'ckan/tests/lib/test_helpers.py',
u'ckan/tests/lib/test_mailer.py',
u'ckan/tests/lib/test_munge.py',
u'ckan/tests/lib/test_navl.py',
u'ckan/tests/logic/action/__init__.py',
u'ckan/tests/logic/action/test_create.py',
u'ckan/tests/logic/action/test_delete.py',
u'ckan/tests/logic/action/test_get.py',
u'ckan/tests/logic/action/test_patch.py',
u'ckan/tests/logic/action/test_update.py',
u'ckan/tests/logic/auth/__init__.py',
u'ckan/tests/logic/auth/test_create.py',
u'ckan/tests/logic/auth/test_delete.py',
u'ckan/tests/logic/auth/test_get.py',
u'ckan/tests/logic/auth/test_init.py',
u'ckan/tests/logic/auth/test_update.py',
u'ckan/tests/logic/test_conversion.py',
u'ckan/tests/logic/test_converters.py',
u'ckan/tests/logic/test_schema.py',
u'ckan/tests/logic/test_validators.py',
u'ckan/tests/migration/__init__.py',
u'ckan/tests/model/__init__.py',
u'ckan/tests/model/test_license.py',
u'ckan/tests/model/test_resource.py',
u'ckan/tests/model/test_resource_view.py',
u'ckan/tests/model/test_system_info.py',
u'ckan/tests/model/test_user.py',
u'ckan/tests/plugins/__init__.py',
u'ckan/tests/plugins/test_toolkit.py',
u'ckan/tests/test_authz.py',
u'ckan/tests/test_factories.py',
u'ckan/websetup.py',
u'ckanext/datapusher/cli.py',
u'ckanext/datapusher/helpers.py',
u'ckanext/datapusher/interfaces.py',
u'ckanext/datapusher/logic/action.py',
u'ckanext/datapusher/logic/schema.py',
u'ckanext/datapusher/plugin.py',
u'ckanext/datapusher/tests/test.py',
u'ckanext/datapusher/tests/test_action.py',
u'ckanext/datapusher/tests/test_default_views.py',
u'ckanext/datapusher/tests/test_interfaces.py',
u'ckanext/datastore/controller.py',
u'ckanext/datastore/helpers.py',
u'ckanext/datastore/backend/postgres.py',
u'ckanext/datastore/interfaces.py',
u'ckanext/datastore/logic/action.py',
u'ckanext/datastore/logic/auth.py',
u'ckanext/datastore/logic/schema.py',
u'ckanext/datastore/plugin.py',
u'ckanext/datastore/tests/helpers.py',
u'ckanext/datastore/tests/sample_datastore_plugin.py',
u'ckanext/datastore/tests/test_configure.py',
u'ckanext/datastore/tests/test_create.py',
u'ckanext/datastore/tests/test_db.py',
u'ckanext/datastore/tests/test_delete.py',
u'ckanext/datastore/tests/test_disable.py',
u'ckanext/datastore/tests/test_dump.py',
u'ckanext/datastore/tests/test_helpers.py',
u'ckanext/datastore/tests/test_info.py',
u'ckanext/datastore/tests/test_interface.py',
u'ckanext/datastore/tests/test_plugin.py',
u'ckanext/datastore/tests/test_search.py',
u'ckanext/datastore/tests/test_unit.py',
u'ckanext/datastore/tests/test_upsert.py',
u'ckanext/example_iauthfunctions/plugin_v2.py',
u'ckanext/example_iauthfunctions/plugin_v3.py',
u'ckanext/example_iauthfunctions/plugin_v4.py',
u'ckanext/example_iauthfunctions/plugin_v5_custom_config_setting.py',
u'ckanext/example_iauthfunctions/plugin_v6_parent_auth_functions.py',
u'ckanext/example_iauthfunctions/tests/test_example_iauthfunctions.py',
u'ckanext/example_iconfigurer/controller.py',
u'ckanext/example_iconfigurer/plugin.py',
u'ckanext/example_iconfigurer/plugin_v1.py',
u'ckanext/example_iconfigurer/plugin_v2.py',
u'ckanext/example_iconfigurer/tests/test_example_iconfigurer.py',
u'ckanext/example_iconfigurer/tests/test_iconfigurer_toolkit.py',
u'ckanext/example_iconfigurer/tests/test_iconfigurer_update_config.py',
u'ckanext/example_idatasetform/plugin.py',
u'ckanext/example_idatasetform/plugin_v1.py',
u'ckanext/example_idatasetform/plugin_v2.py',
u'ckanext/example_idatasetform/plugin_v3.py',
u'ckanext/example_idatasetform/plugin_v4.py',
u'ckanext/example_idatasetform/tests/test_controllers.py',
u'ckanext/example_idatasetform/tests/test_example_idatasetform.py',
u'ckanext/example_igroupform/plugin.py',
u'ckanext/example_igroupform/tests/test_controllers.py',
u'ckanext/example_iresourcecontroller/plugin.py',
u'ckanext/example_iresourcecontroller/tests/test_example_iresourcecontroller.py',
u'ckanext/example_itemplatehelpers/plugin.py',
u'ckanext/example_itranslation/plugin.py',
u'ckanext/example_itranslation/plugin_v1.py',
u'ckanext/example_itranslation/tests/test_plugin.py',
u'ckanext/example_iuploader/plugin.py',
u'ckanext/example_iuploader/test/test_plugin.py',
u'ckanext/example_ivalidators/plugin.py',
u'ckanext/example_ivalidators/tests/test_ivalidators.py',
u'ckanext/example_theme_docs/custom_config_setting/plugin.py',
u'ckanext/example_theme_docs/custom_emails/plugin.py',
u'ckanext/example_theme_docs/custom_emails/tests.py',
u'ckanext/example_theme_docs/v01_empty_extension/plugin.py',
u'ckanext/example_theme_docs/v02_empty_template/plugin.py',
u'ckanext/example_theme_docs/v03_jinja/plugin.py',
u'ckanext/example_theme_docs/v04_ckan_extends/plugin.py',
u'ckanext/example_theme_docs/v05_block/plugin.py',
u'ckanext/example_theme_docs/v06_super/plugin.py',
u'ckanext/example_theme_docs/v07_helper_function/plugin.py',
u'ckanext/example_theme_docs/v08_custom_helper_function/plugin.py',
u'ckanext/example_theme_docs/v09_snippet/plugin.py',
u'ckanext/example_theme_docs/v10_custom_snippet/plugin.py',
u'ckanext/example_theme_docs/v11_HTML_and_CSS/plugin.py',
u'ckanext/example_theme_docs/v12_extra_public_dir/plugin.py',
u'ckanext/example_theme_docs/v13_custom_css/plugin.py',
u'ckanext/example_theme_docs/v14_more_custom_css/plugin.py',
u'ckanext/example_theme_docs/v15_fanstatic/plugin.py',
u'ckanext/example_theme_docs/v16_initialize_a_javascript_module/plugin.py',
u'ckanext/example_theme_docs/v17_popover/plugin.py',
u'ckanext/example_theme_docs/v18_snippet_api/plugin.py',
u'ckanext/example_theme_docs/v19_01_error/plugin.py',
u'ckanext/example_theme_docs/v19_02_error_handling/plugin.py',
u'ckanext/example_theme_docs/v20_pubsub/plugin.py',
u'ckanext/example_theme_docs/v21_custom_jquery_plugin/plugin.py',
u'ckanext/imageview/plugin.py',
u'ckanext/imageview/tests/test_view.py',
u'ckanext/multilingual/plugin.py',
u'ckanext/multilingual/tests/test_multilingual_plugin.py',
u'ckanext/reclineview/plugin.py',
u'ckanext/reclineview/tests/test_view.py',
u'ckanext/resourceproxy/controller.py',
u'ckanext/resourceproxy/plugin.py',
u'ckanext/resourceproxy/tests/test_proxy.py',
u'ckanext/stats/__init__.py',
u'ckanext/stats/controller.py',
u'ckanext/stats/plugin.py',
u'ckanext/stats/stats.py',
u'ckanext/stats/tests/__init__.py',
u'ckanext/stats/tests/test_stats_lib.py',
u'ckanext/stats/tests/test_stats_plugin.py',
u'ckanext/test_tag_vocab_plugin.py',
u'ckanext/textview/plugin.py',
u'ckanext/textview/tests/test_view.py',
u'ckanext/webpageview/plugin.py',
u'ckanext/webpageview/tests/test_view.py',
u'doc/conf.py',
u'profile_tests.py',
u'setup.py',
]
def test_string_literals_are_prefixed():
u'''
Test that string literals are prefixed by ``u``, ``b`` or ``ur``.
See http://docs.ckan.org/en/latest/contributing/unicode.html.
'''
errors = []
for abs_path, rel_path in walk_python_files():
if rel_path in _STRING_LITERALS_WHITELIST:
continue
problems = find_unprefixed_string_literals(abs_path)
if problems:
errors.append((rel_path, problems))
if errors:
lines = [u'Unprefixed string literals:']
for filename, problems in errors:
lines.append(u' ' + filename)
for line_no, col_no in problems:
lines.append(u' line {}, column {}'.format(line_no, col_no))
raise AssertionError(u'\n'.join(lines))
| 43.677054
| 85
| 0.70395
|
import ast
import io
import os
import os.path
import re
import subprocess
import sys
from six import text_type
from six.moves import xrange
FILESYSTEM_ENCODING = text_type(
sys.getfilesystemencoding() or sys.getdefaultencoding()
)
HERE = os.path.abspath(os.path.dirname(__file__.decode(FILESYSTEM_ENCODING)))
PROJECT_ROOT = os.path.normpath(os.path.join(HERE, u'..', u'..'))
IGNORED_DIRS = [
u'ckan/include',
]
def walk_python_files():
def _is_dir_ignored(root, d):
if d.startswith(u'.'):
return True
return os.path.join(rel_root, d) in IGNORED_DIRS
for abs_root, dirnames, filenames in os.walk(PROJECT_ROOT):
rel_root = os.path.relpath(abs_root, PROJECT_ROOT)
if rel_root == u'.':
rel_root = u''
dirnames[:] = [d for d in dirnames if not _is_dir_ignored(rel_root, d)]
for filename in filenames:
if not filename.endswith(u'.py'):
continue
abs_name = os.path.join(abs_root, filename)
rel_name = os.path.join(rel_root, filename)
yield abs_name, rel_name
def test_building_the_docs():
try:
output = subprocess.check_output(
[b'python',
b'setup.py',
b'build_sphinx'],
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as err:
assert False, (
u"Building the docs failed with return code: {code}".format(
code=err.returncode))
output_lines = output.split(u'\n')
errors = [line for line in output_lines if u'ERROR' in line]
if errors:
assert False, (u"Don't add any errors to the Sphinx build: "
u"{errors}".format(errors=errors))
warnings = [line for line in output_lines if u'WARNING' in line]
# Some warnings have been around for a long time and aren't easy to fix.
allowed_warnings = [
u'WARNING: duplicate label ckan.auth.create_user_via_web',
u'WARNING: duplicate label ckan.auth.create_unowned_dataset',
u'WARNING: duplicate label ckan.auth.user_create_groups',
u'WARNING: duplicate label ckan.auth.anon_create_dataset',
u'WARNING: duplicate label ckan.auth.user_delete_organizations',
u'WARNING: duplicate label ckan.auth.create_user_via_api',
u'WARNING: duplicate label ckan.auth.create_dataset_if_not_in_organization',
u'WARNING: duplicate label ckan.auth.user_delete_groups',
u'WARNING: duplicate label ckan.auth.user_create_organizations',
u'WARNING: duplicate label ckan.auth.roles_that_cascade_to_sub_groups',
u'WARNING: duplicate label ckan.auth.public_user_details',
]
warnings_to_remove = []
for allowed_warning in allowed_warnings:
for warning in warnings:
if allowed_warning in warning:
warnings_to_remove.append(warning)
break
new_warnings = [warning for warning in warnings
if warning not in warnings_to_remove]
if new_warnings:
assert False, (u"Don't add any new warnings to the Sphinx build: "
u"{warnings}".format(warnings=new_warnings))
def test_source_files_specify_encoding():
pattern = re.compile(u'
decode_errors = []
no_specification = []
for abs_path, rel_path in walk_python_files():
try:
with io.open(abs_path, encoding=u'utf-8') as f:
for line in f:
line = line.strip()
if pattern.match(line):
# Pattern found
break
elif line and not line.startswith(u'
# File contains non-empty non-comment line
no_specification.append(rel_path)
break
except UnicodeDecodeError:
decode_errors.append(rel_path)
msgs = []
if no_specification:
msgs.append(
u'The following files are missing an encoding specification: '
u'{}'.format(no_specification)
)
if decode_errors:
msgs.append(
u'The following files are not valid UTF-8: '
u'{}'.format(decode_errors)
)
if msgs:
assert False, u'\n\n'.join(msgs)
def renumerate(it):
return zip(xrange(len(it) - 1, -1, -1), reversed(it))
def find_unprefixed_string_literals(filename):
with io.open(filename, encoding=u'utf-8') as f:
lines = f.readlines()
# In some versions of Python, the ast module cannot deal with
# encoding declarations (http://bugs.python.org/issue22221). We
# therefore replace all comment lines at the beginning of the file
# with empty lines (to keep the line numbers correct).
for i, line in enumerate(lines):
line = line.strip()
if line.startswith(u'
lines[i] = u'\n'
elif line:
break
root = ast.parse(u''.join(lines), filename.encode(FILESYSTEM_ENCODING))
problems = []
for node in ast.walk(root):
if isinstance(node, ast.Str):
lineno = node.lineno - 1
col_offset = node.col_offset
if col_offset == -1:
# `lineno` and `col_offset` are broken for literals that span
# multiple lines: For these, `lineno` contains the line of the
# *closing* quotes, and `col_offset` is always -1, see
# https://bugs.python.org/issue16806. We therefore have to
# find the start of the literal manually, which is difficult
# since '''-literals can contain """ and vice versa. The
# following code assumes that no ''' or """ literal begins on
# the same line where a multi-line literal ends.
last_line = lines[lineno]
if last_line.rfind(u'"""') > last_line.rfind(u"'''"):
quotes = u'"""'
else:
quotes = u"'''"
for lineno, line in renumerate(lines[:lineno]):
try:
i = line.rindex(quotes)
if (i > 1) and (line[i - 2:i].lower() == u'ur'):
col_offset = i - 2
elif (i > 0) and (line[i - 1].lower() in u'rbu'):
col_offset = i - 1
else:
col_offset = 0
break
except ValueError:
continue
leading = lines[lineno][col_offset - 1:col_offset + 1]
if leading[:-1] == u'[': # data['id'] is unambiguous, ignore these
continue
if leading[-1:] not in u'ub': # Don't allow capital U and B either
problems.append((lineno + 1, col_offset + 1))
return sorted(problems)
_STRING_LITERALS_WHITELIST = [
u'bin/running_stats.py',
u'ckan/__init__.py',
u'ckan/authz.py',
u'ckan/ckan_nose_plugin.py',
u'ckan/config/environment.py',
u'ckan/config/install.py',
u'ckan/config/middleware/__init__.py',
u'ckan/config/middleware/common_middleware.py',
u'ckan/config/middleware/flask_app.py',
u'ckan/config/middleware/pylons_app.py',
u'ckan/config/routing.py',
u'ckan/controllers/admin.py',
u'ckan/controllers/api.py',
u'ckan/controllers/error.py',
u'ckan/controllers/feed.py',
u'ckan/controllers/group.py',
u'ckan/controllers/home.py',
u'ckan/controllers/organization.py',
u'ckan/controllers/package.py',
u'ckan/controllers/partyline.py',
u'ckan/controllers/revision.py',
u'ckan/controllers/tag.py',
u'ckan/controllers/user.py',
u'ckan/controllers/util.py',
u'ckan/exceptions.py',
u'ckan/i18n/check_po_files.py',
u'ckan/lib/activity_streams.py',
u'ckan/lib/activity_streams_session_extension.py',
u'ckan/lib/app_globals.py',
u'ckan/lib/auth_tkt.py',
u'ckan/lib/authenticator.py',
u'ckan/lib/base.py',
u'ckan/lib/captcha.py',
u'ckan/lib/cli.py',
u'ckan/lib/config_tool.py',
u'ckan/lib/create_test_data.py',
u'ckan/lib/datapreview.py',
u'ckan/lib/dictization/__init__.py',
u'ckan/lib/dictization/model_dictize.py',
u'ckan/lib/dictization/model_save.py',
u'ckan/lib/email_notifications.py',
u'ckan/lib/extract.py',
u'ckan/lib/fanstatic_extensions.py',
u'ckan/lib/fanstatic_resources.py',
u'ckan/lib/formatters.py',
u'ckan/lib/hash.py',
u'ckan/lib/helpers.py',
u'ckan/lib/i18n.py',
u'ckan/lib/jinja_extensions.py',
u'ckan/lib/jsonp.py',
u'ckan/lib/mailer.py',
u'ckan/lib/maintain.py',
u'ckan/lib/munge.py',
u'ckan/lib/navl/__init__.py',
u'ckan/lib/navl/dictization_functions.py',
u'ckan/lib/navl/validators.py',
u'ckan/lib/plugins.py',
u'ckan/lib/render.py',
u'ckan/lib/search/__init__.py',
u'ckan/lib/search/common.py',
u'ckan/lib/search/index.py',
u'ckan/lib/search/query.py',
u'ckan/lib/search/sql.py',
u'ckan/lib/uploader.py',
u'ckan/logic/__init__.py',
u'ckan/logic/action/__init__.py',
u'ckan/logic/action/create.py',
u'ckan/logic/action/delete.py',
u'ckan/logic/action/get.py',
u'ckan/logic/action/patch.py',
u'ckan/logic/action/update.py',
u'ckan/logic/auth/__init__.py',
u'ckan/logic/auth/create.py',
u'ckan/logic/auth/delete.py',
u'ckan/logic/auth/get.py',
u'ckan/logic/auth/patch.py',
u'ckan/logic/auth/update.py',
u'ckan/logic/converters.py',
u'ckan/logic/schema.py',
u'ckan/logic/validators.py',
u'ckan/migration/manage.py',
u'ckan/migration/versions/001_add_existing_tables.py',
u'ckan/migration/versions/002_add_author_and_maintainer.py',
u'ckan/migration/versions/003_add_user_object.py',
u'ckan/migration/versions/004_add_group_object.py',
u'ckan/migration/versions/005_add_authorization_tables.py',
u'ckan/migration/versions/006_add_ratings.py',
u'ckan/migration/versions/007_add_system_roles.py',
u'ckan/migration/versions/008_update_vdm_ids.py',
u'ckan/migration/versions/009_add_creation_timestamps.py',
u'ckan/migration/versions/010_add_user_about.py',
u'ckan/migration/versions/011_add_package_search_vector.py',
u'ckan/migration/versions/012_add_resources.py',
u'ckan/migration/versions/013_add_hash.py',
u'ckan/migration/versions/014_hash_2.py',
u'ckan/migration/versions/015_remove_state_object.py',
u'ckan/migration/versions/016_uuids_everywhere.py',
u'ckan/migration/versions/017_add_pkg_relationships.py',
u'ckan/migration/versions/018_adjust_licenses.py',
u'ckan/migration/versions/019_pkg_relationships_state.py',
u'ckan/migration/versions/020_add_changeset.py',
u'ckan/migration/versions/022_add_group_extras.py',
u'ckan/migration/versions/023_add_harvesting.py',
u'ckan/migration/versions/024_add_harvested_document.py',
u'ckan/migration/versions/025_add_authorization_groups.py',
u'ckan/migration/versions/026_authorization_group_user_pk.py',
u'ckan/migration/versions/027_adjust_harvester.py',
u'ckan/migration/versions/028_drop_harvest_source_status.py',
u'ckan/migration/versions/029_version_groups.py',
u'ckan/migration/versions/030_additional_user_attributes.py',
u'ckan/migration/versions/031_move_openid_to_new_field.py',
u'ckan/migration/versions/032_add_extra_info_field_to_resources.py',
u'ckan/migration/versions/033_auth_group_user_id_add_conditional.py',
u'ckan/migration/versions/034_resource_group_table.py',
u'ckan/migration/versions/035_harvesting_doc_versioning.py',
u'ckan/migration/versions/036_lockdown_roles.py',
u'ckan/migration/versions/037_role_anon_editor.py',
u'ckan/migration/versions/038_delete_migration_tables.py',
u'ckan/migration/versions/039_add_expired_id_and_dates.py',
u'ckan/migration/versions/040_reset_key_on_user.py',
u'ckan/migration/versions/041_resource_new_fields.py',
u'ckan/migration/versions/042_user_revision_indexes.py',
u'ckan/migration/versions/043_drop_postgres_search.py',
u'ckan/migration/versions/044_add_task_status.py',
u'ckan/migration/versions/045_user_name_unique.py',
u'ckan/migration/versions/046_drop_changesets.py',
u'ckan/migration/versions/047_rename_package_group_member.py',
u'ckan/migration/versions/048_add_activity_streams_tables.py',
u'ckan/migration/versions/049_add_group_approval_status.py',
u'ckan/migration/versions/050_term_translation_table.py',
u'ckan/migration/versions/051_add_tag_vocabulary.py',
u'ckan/migration/versions/052_update_member_capacities.py',
u'ckan/migration/versions/053_add_group_logo.py',
u'ckan/migration/versions/054_add_resource_created_date.py',
u'ckan/migration/versions/055_update_user_and_activity_detail.py',
u'ckan/migration/versions/056_add_related_table.py',
u'ckan/migration/versions/057_tracking.py',
u'ckan/migration/versions/058_add_follower_tables.py',
u'ckan/migration/versions/059_add_related_count_and_flag.py',
u'ckan/migration/versions/060_add_system_info_table.py',
u'ckan/migration/versions/061_add_follower__group_table.py',
u'ckan/migration/versions/062_add_dashboard_table.py',
u'ckan/migration/versions/063_org_changes.py',
u'ckan/migration/versions/064_add_email_last_sent_column.py',
u'ckan/migration/versions/065_add_email_notifications_preference.py',
u'ckan/migration/versions/066_default_package_type.py',
u'ckan/migration/versions/067_turn_extras_to_strings.py',
u'ckan/migration/versions/068_add_package_extras_index.py',
u'ckan/migration/versions/069_resource_url_and_metadata_modified.py',
u'ckan/migration/versions/070_add_activity_and_resource_indexes.py',
u'ckan/migration/versions/071_add_state_column_to_user_table.py',
u'ckan/migration/versions/072_add_resource_view.py',
u'ckan/migration/versions/073_update_resource_view_resource_id_constraint.py',
u'ckan/migration/versions/074_remove_resource_groups.py',
u'ckan/migration/versions/075_rename_view_plugins.py',
u'ckan/migration/versions/076_rename_view_plugins_2.py',
u'ckan/migration/versions/077_add_revisions_to_system_info.py',
u'ckan/migration/versions/078_remove_old_authz_model.py',
u'ckan/migration/versions/079_resource_revision_index.py',
u'ckan/migration/versions/080_continuity_id_indexes.py',
u'ckan/migration/versions/081_set_datastore_active.py',
u'ckan/migration/versions/082_create_index_creator_user_id.py',
u'ckan/migration/versions/083_remove_related_items.py',
u'ckan/migration/versions/084_add_metadata_created.py',
u'ckan/model/__init__.py',
u'ckan/model/activity.py',
u'ckan/model/core.py',
u'ckan/model/dashboard.py',
u'ckan/model/domain_object.py',
u'ckan/model/extension.py',
u'ckan/model/follower.py',
u'ckan/model/group.py',
u'ckan/model/group_extra.py',
u'ckan/model/license.py',
u'ckan/model/meta.py',
u'ckan/model/misc.py',
u'ckan/model/modification.py',
u'ckan/model/package.py',
u'ckan/model/package_extra.py',
u'ckan/model/package_relationship.py',
u'ckan/model/rating.py',
u'ckan/model/resource.py',
u'ckan/model/resource_view.py',
u'ckan/model/system_info.py',
u'ckan/model/tag.py',
u'ckan/model/task_status.py',
u'ckan/model/term_translation.py',
u'ckan/model/tracking.py',
u'ckan/model/types.py',
u'ckan/model/user.py',
u'ckan/model/vocabulary.py',
u'ckan/pastertemplates/__init__.py',
u'ckan/plugins/core.py',
u'ckan/plugins/toolkit.py',
u'ckan/plugins/toolkit_sphinx_extension.py',
u'ckan/tests/config/test_environment.py',
u'ckan/tests/config/test_middleware.py',
u'ckan/tests/controllers/__init__.py',
u'ckan/tests/controllers/test_admin.py',
u'ckan/tests/controllers/test_api.py',
u'ckan/tests/controllers/test_feed.py',
u'ckan/tests/controllers/test_group.py',
u'ckan/tests/controllers/test_home.py',
u'ckan/tests/controllers/test_organization.py',
u'ckan/tests/controllers/test_package.py',
u'ckan/tests/controllers/test_user.py',
u'ckan/tests/controllers/test_util.py',
u'ckan/tests/factories.py',
u'ckan/tests/helpers.py',
u'ckan/tests/i18n/test_check_po_files.py',
u'ckan/tests/legacy/__init__.py',
u'ckan/tests/legacy/ckantestplugins.py',
u'ckan/tests/legacy/functional/api/__init__.py',
u'ckan/tests/legacy/functional/api/base.py',
u'ckan/tests/legacy/functional/api/model/test_group.py',
u'ckan/tests/legacy/functional/api/model/test_licenses.py',
u'ckan/tests/legacy/functional/api/model/test_package.py',
u'ckan/tests/legacy/functional/api/model/test_ratings.py',
u'ckan/tests/legacy/functional/api/model/test_relationships.py',
u'ckan/tests/legacy/functional/api/model/test_revisions.py',
u'ckan/tests/legacy/functional/api/model/test_tag.py',
u'ckan/tests/legacy/functional/api/model/test_vocabulary.py',
u'ckan/tests/legacy/functional/api/test_activity.py',
u'ckan/tests/legacy/functional/api/test_api.py',
u'ckan/tests/legacy/functional/api/test_dashboard.py',
u'ckan/tests/legacy/functional/api/test_email_notifications.py',
u'ckan/tests/legacy/functional/api/test_follow.py',
u'ckan/tests/legacy/functional/api/test_misc.py',
u'ckan/tests/legacy/functional/api/test_package_search.py',
u'ckan/tests/legacy/functional/api/test_resource.py',
u'ckan/tests/legacy/functional/api/test_resource_search.py',
u'ckan/tests/legacy/functional/api/test_user.py',
u'ckan/tests/legacy/functional/api/test_util.py',
u'ckan/tests/legacy/functional/test_activity.py',
u'ckan/tests/legacy/functional/test_admin.py',
u'ckan/tests/legacy/functional/test_error.py',
u'ckan/tests/legacy/functional/test_group.py',
u'ckan/tests/legacy/functional/test_package.py',
u'ckan/tests/legacy/functional/test_pagination.py',
u'ckan/tests/legacy/functional/test_preview_interface.py',
u'ckan/tests/legacy/functional/test_revision.py',
u'ckan/tests/legacy/functional/test_tag.py',
u'ckan/tests/legacy/functional/test_tracking.py',
u'ckan/tests/legacy/functional/test_user.py',
u'ckan/tests/legacy/html_check.py',
u'ckan/tests/legacy/lib/__init__.py',
u'ckan/tests/legacy/lib/test_authenticator.py',
u'ckan/tests/legacy/lib/test_cli.py',
u'ckan/tests/legacy/lib/test_dictization.py',
u'ckan/tests/legacy/lib/test_dictization_schema.py',
u'ckan/tests/legacy/lib/test_email_notifications.py',
u'ckan/tests/legacy/lib/test_hash.py',
u'ckan/tests/legacy/lib/test_helpers.py',
u'ckan/tests/legacy/lib/test_i18n.py',
u'ckan/tests/legacy/lib/test_navl.py',
u'ckan/tests/legacy/lib/test_resource_search.py',
u'ckan/tests/legacy/lib/test_simple_search.py',
u'ckan/tests/legacy/lib/test_solr_package_search.py',
u'ckan/tests/legacy/lib/test_solr_package_search_synchronous_update.py',
u'ckan/tests/legacy/lib/test_solr_schema_version.py',
u'ckan/tests/legacy/lib/test_solr_search_index.py',
u'ckan/tests/legacy/lib/test_tag_search.py',
u'ckan/tests/legacy/logic/test_action.py',
u'ckan/tests/legacy/logic/test_auth.py',
u'ckan/tests/legacy/logic/test_init.py',
u'ckan/tests/legacy/logic/test_member.py',
u'ckan/tests/legacy/logic/test_tag.py',
u'ckan/tests/legacy/logic/test_tag_vocab.py',
u'ckan/tests/legacy/logic/test_validators.py',
u'ckan/tests/legacy/misc/test_format_text.py',
u'ckan/tests/legacy/misc/test_mock_mail_server.py',
u'ckan/tests/legacy/misc/test_sync.py',
u'ckan/tests/legacy/mock_mail_server.py',
u'ckan/tests/legacy/mock_plugin.py',
u'ckan/tests/legacy/models/test_activity.py',
u'ckan/tests/legacy/models/test_extras.py',
u'ckan/tests/legacy/models/test_follower.py',
u'ckan/tests/legacy/models/test_group.py',
u'ckan/tests/legacy/models/test_misc.py',
u'ckan/tests/legacy/models/test_package.py',
u'ckan/tests/legacy/models/test_package_relationships.py',
u'ckan/tests/legacy/models/test_purge_revision.py',
u'ckan/tests/legacy/models/test_resource.py',
u'ckan/tests/legacy/models/test_revision.py',
u'ckan/tests/legacy/models/test_user.py',
u'ckan/tests/legacy/schema/test_schema.py',
u'ckan/tests/legacy/test_coding_standards.py',
u'ckan/tests/legacy/test_plugins.py',
u'ckan/tests/legacy/test_versions.py',
u'ckan/tests/lib/__init__.py',
u'ckan/tests/lib/dictization/test_model_dictize.py',
u'ckan/tests/lib/navl/test_dictization_functions.py',
u'ckan/tests/lib/navl/test_validators.py',
u'ckan/tests/lib/search/test_index.py',
u'ckan/tests/lib/test_app_globals.py',
u'ckan/tests/lib/test_auth_tkt.py',
u'ckan/tests/lib/test_base.py',
u'ckan/tests/lib/test_cli.py',
u'ckan/tests/lib/test_config_tool.py',
u'ckan/tests/lib/test_datapreview.py',
u'ckan/tests/lib/test_helpers.py',
u'ckan/tests/lib/test_mailer.py',
u'ckan/tests/lib/test_munge.py',
u'ckan/tests/lib/test_navl.py',
u'ckan/tests/logic/action/__init__.py',
u'ckan/tests/logic/action/test_create.py',
u'ckan/tests/logic/action/test_delete.py',
u'ckan/tests/logic/action/test_get.py',
u'ckan/tests/logic/action/test_patch.py',
u'ckan/tests/logic/action/test_update.py',
u'ckan/tests/logic/auth/__init__.py',
u'ckan/tests/logic/auth/test_create.py',
u'ckan/tests/logic/auth/test_delete.py',
u'ckan/tests/logic/auth/test_get.py',
u'ckan/tests/logic/auth/test_init.py',
u'ckan/tests/logic/auth/test_update.py',
u'ckan/tests/logic/test_conversion.py',
u'ckan/tests/logic/test_converters.py',
u'ckan/tests/logic/test_schema.py',
u'ckan/tests/logic/test_validators.py',
u'ckan/tests/migration/__init__.py',
u'ckan/tests/model/__init__.py',
u'ckan/tests/model/test_license.py',
u'ckan/tests/model/test_resource.py',
u'ckan/tests/model/test_resource_view.py',
u'ckan/tests/model/test_system_info.py',
u'ckan/tests/model/test_user.py',
u'ckan/tests/plugins/__init__.py',
u'ckan/tests/plugins/test_toolkit.py',
u'ckan/tests/test_authz.py',
u'ckan/tests/test_factories.py',
u'ckan/websetup.py',
u'ckanext/datapusher/cli.py',
u'ckanext/datapusher/helpers.py',
u'ckanext/datapusher/interfaces.py',
u'ckanext/datapusher/logic/action.py',
u'ckanext/datapusher/logic/schema.py',
u'ckanext/datapusher/plugin.py',
u'ckanext/datapusher/tests/test.py',
u'ckanext/datapusher/tests/test_action.py',
u'ckanext/datapusher/tests/test_default_views.py',
u'ckanext/datapusher/tests/test_interfaces.py',
u'ckanext/datastore/controller.py',
u'ckanext/datastore/helpers.py',
u'ckanext/datastore/backend/postgres.py',
u'ckanext/datastore/interfaces.py',
u'ckanext/datastore/logic/action.py',
u'ckanext/datastore/logic/auth.py',
u'ckanext/datastore/logic/schema.py',
u'ckanext/datastore/plugin.py',
u'ckanext/datastore/tests/helpers.py',
u'ckanext/datastore/tests/sample_datastore_plugin.py',
u'ckanext/datastore/tests/test_configure.py',
u'ckanext/datastore/tests/test_create.py',
u'ckanext/datastore/tests/test_db.py',
u'ckanext/datastore/tests/test_delete.py',
u'ckanext/datastore/tests/test_disable.py',
u'ckanext/datastore/tests/test_dump.py',
u'ckanext/datastore/tests/test_helpers.py',
u'ckanext/datastore/tests/test_info.py',
u'ckanext/datastore/tests/test_interface.py',
u'ckanext/datastore/tests/test_plugin.py',
u'ckanext/datastore/tests/test_search.py',
u'ckanext/datastore/tests/test_unit.py',
u'ckanext/datastore/tests/test_upsert.py',
u'ckanext/example_iauthfunctions/plugin_v2.py',
u'ckanext/example_iauthfunctions/plugin_v3.py',
u'ckanext/example_iauthfunctions/plugin_v4.py',
u'ckanext/example_iauthfunctions/plugin_v5_custom_config_setting.py',
u'ckanext/example_iauthfunctions/plugin_v6_parent_auth_functions.py',
u'ckanext/example_iauthfunctions/tests/test_example_iauthfunctions.py',
u'ckanext/example_iconfigurer/controller.py',
u'ckanext/example_iconfigurer/plugin.py',
u'ckanext/example_iconfigurer/plugin_v1.py',
u'ckanext/example_iconfigurer/plugin_v2.py',
u'ckanext/example_iconfigurer/tests/test_example_iconfigurer.py',
u'ckanext/example_iconfigurer/tests/test_iconfigurer_toolkit.py',
u'ckanext/example_iconfigurer/tests/test_iconfigurer_update_config.py',
u'ckanext/example_idatasetform/plugin.py',
u'ckanext/example_idatasetform/plugin_v1.py',
u'ckanext/example_idatasetform/plugin_v2.py',
u'ckanext/example_idatasetform/plugin_v3.py',
u'ckanext/example_idatasetform/plugin_v4.py',
u'ckanext/example_idatasetform/tests/test_controllers.py',
u'ckanext/example_idatasetform/tests/test_example_idatasetform.py',
u'ckanext/example_igroupform/plugin.py',
u'ckanext/example_igroupform/tests/test_controllers.py',
u'ckanext/example_iresourcecontroller/plugin.py',
u'ckanext/example_iresourcecontroller/tests/test_example_iresourcecontroller.py',
u'ckanext/example_itemplatehelpers/plugin.py',
u'ckanext/example_itranslation/plugin.py',
u'ckanext/example_itranslation/plugin_v1.py',
u'ckanext/example_itranslation/tests/test_plugin.py',
u'ckanext/example_iuploader/plugin.py',
u'ckanext/example_iuploader/test/test_plugin.py',
u'ckanext/example_ivalidators/plugin.py',
u'ckanext/example_ivalidators/tests/test_ivalidators.py',
u'ckanext/example_theme_docs/custom_config_setting/plugin.py',
u'ckanext/example_theme_docs/custom_emails/plugin.py',
u'ckanext/example_theme_docs/custom_emails/tests.py',
u'ckanext/example_theme_docs/v01_empty_extension/plugin.py',
u'ckanext/example_theme_docs/v02_empty_template/plugin.py',
u'ckanext/example_theme_docs/v03_jinja/plugin.py',
u'ckanext/example_theme_docs/v04_ckan_extends/plugin.py',
u'ckanext/example_theme_docs/v05_block/plugin.py',
u'ckanext/example_theme_docs/v06_super/plugin.py',
u'ckanext/example_theme_docs/v07_helper_function/plugin.py',
u'ckanext/example_theme_docs/v08_custom_helper_function/plugin.py',
u'ckanext/example_theme_docs/v09_snippet/plugin.py',
u'ckanext/example_theme_docs/v10_custom_snippet/plugin.py',
u'ckanext/example_theme_docs/v11_HTML_and_CSS/plugin.py',
u'ckanext/example_theme_docs/v12_extra_public_dir/plugin.py',
u'ckanext/example_theme_docs/v13_custom_css/plugin.py',
u'ckanext/example_theme_docs/v14_more_custom_css/plugin.py',
u'ckanext/example_theme_docs/v15_fanstatic/plugin.py',
u'ckanext/example_theme_docs/v16_initialize_a_javascript_module/plugin.py',
u'ckanext/example_theme_docs/v17_popover/plugin.py',
u'ckanext/example_theme_docs/v18_snippet_api/plugin.py',
u'ckanext/example_theme_docs/v19_01_error/plugin.py',
u'ckanext/example_theme_docs/v19_02_error_handling/plugin.py',
u'ckanext/example_theme_docs/v20_pubsub/plugin.py',
u'ckanext/example_theme_docs/v21_custom_jquery_plugin/plugin.py',
u'ckanext/imageview/plugin.py',
u'ckanext/imageview/tests/test_view.py',
u'ckanext/multilingual/plugin.py',
u'ckanext/multilingual/tests/test_multilingual_plugin.py',
u'ckanext/reclineview/plugin.py',
u'ckanext/reclineview/tests/test_view.py',
u'ckanext/resourceproxy/controller.py',
u'ckanext/resourceproxy/plugin.py',
u'ckanext/resourceproxy/tests/test_proxy.py',
u'ckanext/stats/__init__.py',
u'ckanext/stats/controller.py',
u'ckanext/stats/plugin.py',
u'ckanext/stats/stats.py',
u'ckanext/stats/tests/__init__.py',
u'ckanext/stats/tests/test_stats_lib.py',
u'ckanext/stats/tests/test_stats_plugin.py',
u'ckanext/test_tag_vocab_plugin.py',
u'ckanext/textview/plugin.py',
u'ckanext/textview/tests/test_view.py',
u'ckanext/webpageview/plugin.py',
u'ckanext/webpageview/tests/test_view.py',
u'doc/conf.py',
u'profile_tests.py',
u'setup.py',
]
def test_string_literals_are_prefixed():
errors = []
for abs_path, rel_path in walk_python_files():
if rel_path in _STRING_LITERALS_WHITELIST:
continue
problems = find_unprefixed_string_literals(abs_path)
if problems:
errors.append((rel_path, problems))
if errors:
lines = [u'Unprefixed string literals:']
for filename, problems in errors:
lines.append(u' ' + filename)
for line_no, col_no in problems:
lines.append(u' line {}, column {}'.format(line_no, col_no))
raise AssertionError(u'\n'.join(lines))
| true
| true
|
1c42536e439f6f04c16663eefff72cc4898eaacc
| 32,905
|
py
|
Python
|
rule-based-agents/v4_1_avrdistance_rule.py
|
guocongyun/gfootball-competition
|
e922d8afbe41b346e894be2f2ad10434ae7ed56d
|
[
"MIT"
] | null | null | null |
rule-based-agents/v4_1_avrdistance_rule.py
|
guocongyun/gfootball-competition
|
e922d8afbe41b346e894be2f2ad10434ae7ed56d
|
[
"MIT"
] | null | null | null |
rule-based-agents/v4_1_avrdistance_rule.py
|
guocongyun/gfootball-competition
|
e922d8afbe41b346e894be2f2ad10434ae7ed56d
|
[
"MIT"
] | null | null | null |
from kaggle_environments.envs.football.helpers import *
import math
import random
def find_patterns(obs, player_x, player_y):
""" find list of appropriate patterns in groups of memory patterns """
for get_group in groups_of_memory_patterns:
group = get_group(obs, player_x, player_y)
if group["environment_fits"](obs, player_x, player_y):
return group["get_memory_patterns"](obs, player_x, player_y)
def get_action_of_agent(obs, player_x, player_y):
""" get action of appropriate pattern in agent's memory """
memory_patterns = find_patterns(obs, player_x, player_y)
# find appropriate pattern in list of memory patterns
for get_pattern in memory_patterns:
pattern = get_pattern(obs, player_x, player_y)
if pattern["environment_fits"](obs, player_x, player_y):
return pattern["get_action"](obs, player_x, player_y)
#%%
def get_average_distance_to_opponents(obs, player_x, player_y, distance=0.03):
""" get average distance to closest opponents """
distances_sum = 0
distances_amount = 0
for i in range(1, len(obs["right_team"])):
# if opponent is ahead of player
if obs["right_team"][i][0] > (player_x - 0.02):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < distance:
distances_sum += distance_to_opponent
distances_amount += 1
# if there is no opponents close around
if distances_amount == 0:
return 2, distances_amount
return distances_sum / distances_amount, distances_amount
def check_legal_move(obs, player_x, player_y):
for i in range(1, len(obs["right_team"])):
if obs["right_team"][i][0] > (player_x - 0.02):
return True
return False
def get_distance(x1, y1, x2, y2):
""" get two-dimensional Euclidean distance, considering y size of the field """
return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)
#%%
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def bad_angle_short_pass(obs, player_x, player_y):
""" perform a short pass, if player is at bad angle to opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player is at bad angle to opponent's goal
if ((abs(player_y) > 0.15 and player_x > 0.85) or
(player_x > 0.7 and player_y > 0.07 and obs["left_team_direction"][obs["active"]][1] > 0) or
(player_x > 0.7 and player_y < -0.07 and obs["left_team_direction"][obs["active"]][1] < 0)):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if player_y > 0:
if Action.Top not in obs["sticky_actions"]:
return Action.Top
else:
if Action.Bottom not in obs["sticky_actions"]:
return Action.Bottom
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_goalkeeper_shot(obs, player_x, player_y):
""" shot if close to the goalkeeper """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
goalkeeper_x = obs["right_team"][0][0] + obs["right_team_direction"][0][0] * 13
goalkeeper_y = obs["right_team"][0][1] + obs["right_team_direction"][0][1] * 13
# player located close to the goalkeeper
if get_distance(player_x, player_y, goalkeeper_x, goalkeeper_y) < 0.3:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if player_y > 0 and player_y < 0.03:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
if player_y < 0 and player_y > -0.03:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_short_pass(obs, player_x, player_y):
""" perform a short pass, if close to opponent's player and close to teammate """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < 0.03:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.2 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal:
return True
break
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_long_pass(obs, player_x, player_y):
""" perform a long pass, if close to opponent's player and close to teammate """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < 0.06 and distance_to_opponent > 0.03:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.4 and distance_to_teammate > 0.2 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal - 1/3:
return True
break
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.LongPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_high_pass(obs, player_x, player_y):
""" perform a high pass, if close to opponent's player and close to teammate """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent > 0.06 and distance_to_opponent < 0.09:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.6 and distance_to_teammate > 0.4 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal - 2/3:
return True
break
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
#%%
def far_from_goal_shot(obs, player_x, player_y):
""" perform a shot, if far from opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player is far from opponent's goal or it's the goalkeeper
if player_x < -0.6 or obs["ball_owned_player"] == 0:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def far_from_goal_high_pass(obs, player_x, player_y):
""" perform a high pass, if far from opponent's goal """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player is far from opponent's goal or it's the goalkeeper
if player_x < -0.3 or obs["ball_owned_player"] == 0:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Right not in obs["sticky_actions"]:
return Action.Right
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def go_through_opponents(obs, player_x, player_y):
""" avoid closest opponents by going around them """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# right direction is safest
biggest_distance, final_opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y)
obs["memory_patterns"]["go_around_opponent"] = Action.Right
# if top right direction is safest
top_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y - 0.01)
if (top_right > biggest_distance and player_y > -0.15) or (top_right == 2 and player_y > 0.07):
biggest_distance = top_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.TopRight
# if bottom right direction is safest
bottom_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y + 0.01)
if (bottom_right > biggest_distance and player_y < 0.15) or (bottom_right == 2 and player_y < -0.07):
biggest_distance = bottom_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.BottomRight
# is player surrounded?
if final_opponents_amount >= 3:
obs["memory_patterns"]["go_around_opponent_surrounded"] = True
else:
obs["memory_patterns"]["go_around_opponent_surrounded"] = False
return True
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
# if player is surrounded
if obs["memory_patterns"]["go_around_opponent_surrounded"]:
return Action.HighPass
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return obs["memory_patterns"]["go_around_opponent"]
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def khorne_slide(obs, berzerker_x, berzerker_y):
""" BLOOD FOR THE BLOOD GOD!!! SKULLS FOR THE SKULL THRONE!!! """
def environment_fits(obs, berzerker_x, berzerker_y):
""" environment fits constraints """
# if prey has the ball
if obs["ball_owned_team"] == 1:
prey_x = obs["right_team"][obs["ball_owned_player"]][0]
prey_y = obs["right_team"][obs["ball_owned_player"]][1]
# by x position, amount of berzerker's team players between prey and goal of berzerker's team
players_amount = 0
for i in range(1, len(obs["left_team"])):
if obs["left_team"][i][0] < prey_x:
players_amount += 1
prey_x_direction = obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_x = prey_x + obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_y = prey_y + obs["right_team_direction"][obs["ball_owned_player"]][1]
future_berzerker_x = berzerker_x + obs["left_team_direction"][obs["active"]][0]
future_berzerker_y = berzerker_y + obs["left_team_direction"][obs["active"]][1]
distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)
future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)
# if berzerker is not close to his own penalty zone
# and prey is beyond x position of too many players of berzerker's team
# and berzerker is close enough to prey
# and berzerker is running in direction of prey
if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and
players_amount <= 7 and
future_distance_to_prey < 0.015 and
distance_to_prey > future_distance_to_prey):
return True
return False
def get_action(obs, berzerker_x, berzerker_y):
""" get action of this memory pattern """
return Action.Slide
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom(obs, player_x, player_y):
""" run to the ball if it is to the bottom from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom from player's position
if (obs["ball"][1] > player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.Bottom
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_left(obs, player_x, player_y):
""" run to the ball if it is to the bottom left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom left from player's position
if (obs["ball"][0] < player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.BottomLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_right(obs, player_x, player_y):
""" run to the ball if it is to the bottom right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the bottom right from player's position
if (obs["ball"][0] > player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.BottomRight
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_left(obs, player_x, player_y):
""" run to the ball if it is to the left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the left from player's position
if (obs["ball"][0] < player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.Left
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_right(obs, player_x, player_y):
""" run to the ball if it is to the right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the right from player's position
if (obs["ball"][0] > player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.Right
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top(obs, player_x, player_y):
""" run to the ball if it is to the top from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top from player's position
if (obs["ball"][1] < player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.Top
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_left(obs, player_x, player_y):
""" run to the ball if it is to the top left from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top left from player's position
if (obs["ball"][0] < player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.TopLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_right(obs, player_x, player_y):
""" run to the ball if it is to the top right from player's position """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# ball is to the top right from player's position
if (obs["ball"][0] > player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.TopRight
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
#%%
def idle(obs, player_x, player_y):
""" do nothing, release all sticky actions """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
return True
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
#%%
def start_sprinting(obs, player_x, player_y):
""" make sure player is sprinting """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
if Action.Sprint not in obs["sticky_actions"]:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
return Action.Sprint
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def corner(obs, player_x, player_y):
""" perform a shot in corner game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is corner game mode
if obs['game_mode'] == GameMode.Corner:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if player_y > 0:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def free_kick(obs, player_x, player_y):
""" perform a high pass or a shot in free kick game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is free kick game mode
if obs['game_mode'] == GameMode.FreeKick:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
# shot if player close to goal
if player_x > 0.5:
if player_y > 0:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
# high pass if player far from goal
else:
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def goal_kick(obs, player_x, player_y):
""" perform a short pass in goal kick game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is goal kick game mode
if obs['game_mode'] == GameMode.GoalKick:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if (random.random() < 0.5 and
Action.TopRight not in obs["sticky_actions"] and
Action.BottomRight not in obs["sticky_actions"]):
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def kick_off(obs, player_x, player_y):
""" perform a short pass in kick off game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is kick off game mode
if obs['game_mode'] == GameMode.KickOff:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if player_y > 0:
if Action.Top not in obs["sticky_actions"]:
return Action.Top
else:
if Action.Bottom not in obs["sticky_actions"]:
return Action.Bottom
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def penalty(obs, player_x, player_y):
""" perform a shot in penalty game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is penalty game mode
if obs['game_mode'] == GameMode.Penalty:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if (random.random() < 0.5 and
Action.TopRight not in obs["sticky_actions"] and
Action.BottomRight not in obs["sticky_actions"]):
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def throw_in(obs, player_x, player_y):
""" perform a short pass in throw in game mode """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# it is throw in game mode
if obs['game_mode'] == GameMode.ThrowIn:
return True
return False
def get_action(obs, player_x, player_y):
""" get action of this memory pattern """
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def defence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which opponent's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player don't have the ball
if obs["ball_owned_team"] != 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
# shift ball position
obs["ball"][0] += obs["ball_direction"][0] * 7
obs["ball"][1] += obs["ball_direction"][1] * 3
# if opponent has the ball and is far from y axis center
if abs(obs["ball"][1]) > 0.07 and obs["ball_owned_team"] == 1:
obs["ball"][0] -= 0.01
if obs["ball"][1] > 0:
obs["ball"][1] -= 0.01
else:
obs["ball"][1] += 0.01
memory_patterns = [
start_sprinting,
khorne_slide,
run_to_ball_right,
run_to_ball_left,
run_to_ball_bottom,
run_to_ball_top,
run_to_ball_top_right,
run_to_ball_top_left,
run_to_ball_bottom_right,
run_to_ball_bottom_left,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def goalkeeper_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for goalkeeper """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player is a goalkeeper have the ball
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
obs["ball_owned_player"] == 0):
return True
return False
#%%
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
close_to_opponent_short_pass,
close_to_opponent_long_pass,
close_to_opponent_high_pass,
far_from_goal_high_pass,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
#%%
def offence_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for environments in which player's team has the ball """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# player have the ball
if obs["ball_owned_player"] == obs["active"] and obs["ball_owned_team"] == 0:
return True
return False
#%%
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
far_from_goal_shot,
far_from_goal_high_pass,
bad_angle_short_pass,
close_to_goalkeeper_shot,
close_to_opponent_short_pass,
close_to_opponent_long_pass,
go_through_opponents,
start_sprinting,
idle
]
return memory_patterns
#%%
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def other_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for all other environments """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
return True
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def special_game_modes_memory_patterns(obs, player_x, player_y):
""" group of memory patterns for special game mode environments """
def environment_fits(obs, player_x, player_y):
""" environment fits constraints """
# if game mode is not normal
if obs['game_mode'] != GameMode.Normal:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
""" get list of memory patterns """
memory_patterns = [
corner,
free_kick,
goal_kick,
kick_off,
penalty,
throw_in,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# list of groups of memory patterns
groups_of_memory_patterns = [
special_game_modes_memory_patterns,
goalkeeper_memory_patterns,
offence_memory_patterns,
defence_memory_patterns,
other_memory_patterns
]
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# @human_readable_agent wrapper modifies raw observations
# provided by the environment:
# https://github.com/google-research/football/blob/master/gfootball/doc/observation.md#raw-observations
# into a form easier to work with by humans.
# Following modifications are applied:
# - Action, PlayerRole and GameMode enums are introduced.
# - 'sticky_actions' are turned into a set of active actions (Action enum)
# see usage example below.
# - 'game_mode' is turned into GameMode enum.
# - 'designated' field is removed, as it always equals to 'active'
# when a single player is controlled on the team.
# - 'left_team_roles'/'right_team_roles' are turned into PlayerRole enums.
# - Action enum is to be returned by the agent function.
@human_readable_agent
def agent(obs):
""" Ole ole ole ole """
# dictionary for Memory Patterns data
obs["memory_patterns"] = {}
# We always control left team (observations and actions
# are mirrored appropriately by the environment).
controlled_player_pos = obs["left_team"][obs["active"]]
# get action of appropriate pattern in agent's memory
action = get_action_of_agent(obs, controlled_player_pos[0], controlled_player_pos[1])
# return action
return action
| 43.013072
| 181
| 0.632123
|
from kaggle_environments.envs.football.helpers import *
import math
import random
def find_patterns(obs, player_x, player_y):
for get_group in groups_of_memory_patterns:
group = get_group(obs, player_x, player_y)
if group["environment_fits"](obs, player_x, player_y):
return group["get_memory_patterns"](obs, player_x, player_y)
def get_action_of_agent(obs, player_x, player_y):
memory_patterns = find_patterns(obs, player_x, player_y)
for get_pattern in memory_patterns:
pattern = get_pattern(obs, player_x, player_y)
if pattern["environment_fits"](obs, player_x, player_y):
return pattern["get_action"](obs, player_x, player_y)
def get_average_distance_to_opponents(obs, player_x, player_y, distance=0.03):
distances_sum = 0
distances_amount = 0
for i in range(1, len(obs["right_team"])):
if obs["right_team"][i][0] > (player_x - 0.02):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < distance:
distances_sum += distance_to_opponent
distances_amount += 1
if distances_amount == 0:
return 2, distances_amount
return distances_sum / distances_amount, distances_amount
def check_legal_move(obs, player_x, player_y):
for i in range(1, len(obs["right_team"])):
if obs["right_team"][i][0] > (player_x - 0.02):
return True
return False
def get_distance(x1, y1, x2, y2):
return math.sqrt((x1 - x2) ** 2 + (y1 * 2.38 - y2 * 2.38) ** 2)
def bad_angle_short_pass(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if ((abs(player_y) > 0.15 and player_x > 0.85) or
(player_x > 0.7 and player_y > 0.07 and obs["left_team_direction"][obs["active"]][1] > 0) or
(player_x > 0.7 and player_y < -0.07 and obs["left_team_direction"][obs["active"]][1] < 0)):
return True
return False
def get_action(obs, player_x, player_y):
if player_y > 0:
if Action.Top not in obs["sticky_actions"]:
return Action.Top
else:
if Action.Bottom not in obs["sticky_actions"]:
return Action.Bottom
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_goalkeeper_shot(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
goalkeeper_x = obs["right_team"][0][0] + obs["right_team_direction"][0][0] * 13
goalkeeper_y = obs["right_team"][0][1] + obs["right_team_direction"][0][1] * 13
# player located close to the goalkeeper
if get_distance(player_x, player_y, goalkeeper_x, goalkeeper_y) < 0.3:
return True
return False
def get_action(obs, player_x, player_y):
if player_y > 0 and player_y < 0.03:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
if player_y < 0 and player_y > -0.03:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_short_pass(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < 0.03:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.2 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal:
return True
break
return False
def get_action(obs, player_x, player_y):
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_long_pass(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent < 0.06 and distance_to_opponent > 0.03:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.4 and distance_to_teammate > 0.2 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal - 1/3:
return True
break
return False
def get_action(obs, player_x, player_y):
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.LongPass
return {"environment_fits": environment_fits, "get_action": get_action}
def close_to_opponent_high_pass(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
for i in range(1, len(obs["right_team"])):
distance_to_opponent = get_distance(player_x, player_y, obs["right_team"][i][0], obs["right_team"][i][1])
if distance_to_opponent > 0.06 and distance_to_opponent < 0.09:
for j in range(1, len(obs["left_team"])):
player_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
teammate_to_opponents = get_average_distance_to_opponents(obs, player_x, player_y)
distance_to_teammate = get_distance(player_x, player_y, obs["left_team"][j][0], obs["left_team"][j][1])
if distance_to_teammate < 0.6 and distance_to_teammate > 0.4 and player_to_opponents[0] < teammate_to_opponents[0] and check_legal_move(obs, player_x, player_y):
teammate_distance_to_goal = get_distance(obs["left_team"][j][0], obs["left_team"][j][1], 1, 0)
player_distance_to_goal = get_distance(player_x, player_y, 1, 0)
if teammate_distance_to_goal < player_distance_to_goal - 2/3:
return True
break
return False
def get_action(obs, player_x, player_y):
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
#%%
def far_from_goal_shot(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# player is far from opponent's goal or it's the goalkeeper
if player_x < -0.6 or obs["ball_owned_player"] == 0:
return True
return False
def get_action(obs, player_x, player_y):
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def far_from_goal_high_pass(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# player is far from opponent's goal or it's the goalkeeper
if player_x < -0.3 or obs["ball_owned_player"] == 0:
return True
return False
def get_action(obs, player_x, player_y):
if Action.Right not in obs["sticky_actions"]:
return Action.Right
if Action.Sprint in obs["sticky_actions"]:
return Action.ReleaseSprint
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def go_through_opponents(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# right direction is safest
biggest_distance, final_opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y)
obs["memory_patterns"]["go_around_opponent"] = Action.Right
# if top right direction is safest
top_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y - 0.01)
if (top_right > biggest_distance and player_y > -0.15) or (top_right == 2 and player_y > 0.07):
biggest_distance = top_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.TopRight
# if bottom right direction is safest
bottom_right, opponents_amount = get_average_distance_to_opponents(obs, player_x + 0.01, player_y + 0.01)
if (bottom_right > biggest_distance and player_y < 0.15) or (bottom_right == 2 and player_y < -0.07):
biggest_distance = bottom_right
final_opponents_amount = opponents_amount
obs["memory_patterns"]["go_around_opponent"] = Action.BottomRight
# is player surrounded?
if final_opponents_amount >= 3:
obs["memory_patterns"]["go_around_opponent_surrounded"] = True
else:
obs["memory_patterns"]["go_around_opponent_surrounded"] = False
return True
def get_action(obs, player_x, player_y):
# if player is surrounded
if obs["memory_patterns"]["go_around_opponent_surrounded"]:
return Action.HighPass
if Action.Sprint not in obs["sticky_actions"]:
return Action.Sprint
return obs["memory_patterns"]["go_around_opponent"]
return {"environment_fits": environment_fits, "get_action": get_action}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
def khorne_slide(obs, berzerker_x, berzerker_y):
def environment_fits(obs, berzerker_x, berzerker_y):
# if prey has the ball
if obs["ball_owned_team"] == 1:
prey_x = obs["right_team"][obs["ball_owned_player"]][0]
prey_y = obs["right_team"][obs["ball_owned_player"]][1]
# by x position, amount of berzerker's team players between prey and goal of berzerker's team
players_amount = 0
for i in range(1, len(obs["left_team"])):
if obs["left_team"][i][0] < prey_x:
players_amount += 1
prey_x_direction = obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_x = prey_x + obs["right_team_direction"][obs["ball_owned_player"]][0]
future_prey_y = prey_y + obs["right_team_direction"][obs["ball_owned_player"]][1]
future_berzerker_x = berzerker_x + obs["left_team_direction"][obs["active"]][0]
future_berzerker_y = berzerker_y + obs["left_team_direction"][obs["active"]][1]
distance_to_prey = get_distance(berzerker_x, berzerker_y, prey_x, prey_y)
future_distance_to_prey = get_distance(future_berzerker_x, future_berzerker_y, future_prey_x, future_prey_y)
# if berzerker is not close to his own penalty zone
# and prey is beyond x position of too many players of berzerker's team
if ((berzerker_x > -0.65 or abs(berzerker_y) > 0.3) and
players_amount <= 7 and
future_distance_to_prey < 0.015 and
distance_to_prey > future_distance_to_prey):
return True
return False
def get_action(obs, berzerker_x, berzerker_y):
return Action.Slide
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if (obs["ball"][1] > player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
return Action.Bottom
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_left(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# ball is to the bottom left from player's position
if (obs["ball"][0] < player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
return Action.BottomLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_bottom_right(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if (obs["ball"][0] > player_x and
obs["ball"][1] > player_y):
return True
return False
def get_action(obs, player_x, player_y):
return Action.BottomRight
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_left(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# ball is to the left from player's position
if (obs["ball"][0] < player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
return Action.Left
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_right(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if (obs["ball"][0] > player_x and
abs(obs["ball"][1] - player_y) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
return Action.Right
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# ball is to the top from player's position
if (obs["ball"][1] < player_y and
abs(obs["ball"][0] - player_x) < 0.01):
return True
return False
def get_action(obs, player_x, player_y):
return Action.Top
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_left(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if (obs["ball"][0] < player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
return Action.TopLeft
return {"environment_fits": environment_fits, "get_action": get_action}
def run_to_ball_top_right(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# ball is to the top right from player's position
if (obs["ball"][0] > player_x and
obs["ball"][1] < player_y):
return True
return False
def get_action(obs, player_x, player_y):
return Action.TopRight
return {"environment_fits": environment_fits, "get_action": get_action}
def idle(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
return True
def get_action(obs, player_x, player_y):
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def start_sprinting(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if Action.Sprint not in obs["sticky_actions"]:
return True
return False
def get_action(obs, player_x, player_y):
return Action.Sprint
return {"environment_fits": environment_fits, "get_action": get_action}
def corner(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.Corner:
return True
return False
def get_action(obs, player_x, player_y):
if player_y > 0:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def free_kick(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.FreeKick:
return True
return False
def get_action(obs, player_x, player_y):
if player_x > 0.5:
if player_y > 0:
if Action.TopRight not in obs["sticky_actions"]:
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
else:
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.HighPass
return {"environment_fits": environment_fits, "get_action": get_action}
def goal_kick(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.GoalKick:
return True
return False
def get_action(obs, player_x, player_y):
if (random.random() < 0.5 and
Action.TopRight not in obs["sticky_actions"] and
Action.BottomRight not in obs["sticky_actions"]):
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def kick_off(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.KickOff:
return True
return False
def get_action(obs, player_x, player_y):
if player_y > 0:
if Action.Top not in obs["sticky_actions"]:
return Action.Top
else:
if Action.Bottom not in obs["sticky_actions"]:
return Action.Bottom
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def penalty(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.Penalty:
return True
return False
def get_action(obs, player_x, player_y):
if (random.random() < 0.5 and
Action.TopRight not in obs["sticky_actions"] and
Action.BottomRight not in obs["sticky_actions"]):
return Action.TopRight
else:
if Action.BottomRight not in obs["sticky_actions"]:
return Action.BottomRight
return Action.Shot
return {"environment_fits": environment_fits, "get_action": get_action}
def throw_in(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs['game_mode'] == GameMode.ThrowIn:
return True
return False
def get_action(obs, player_x, player_y):
if Action.Right not in obs["sticky_actions"]:
return Action.Right
return Action.ShortPass
return {"environment_fits": environment_fits, "get_action": get_action}
def defence_memory_patterns(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
if obs["ball_owned_team"] != 0:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
# shift ball position
obs["ball"][0] += obs["ball_direction"][0] * 7
obs["ball"][1] += obs["ball_direction"][1] * 3
# if opponent has the ball and is far from y axis center
if abs(obs["ball"][1]) > 0.07 and obs["ball_owned_team"] == 1:
obs["ball"][0] -= 0.01
if obs["ball"][1] > 0:
obs["ball"][1] -= 0.01
else:
obs["ball"][1] += 0.01
memory_patterns = [
start_sprinting,
khorne_slide,
run_to_ball_right,
run_to_ball_left,
run_to_ball_bottom,
run_to_ball_top,
run_to_ball_top_right,
run_to_ball_top_left,
run_to_ball_bottom_right,
run_to_ball_bottom_left,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def goalkeeper_memory_patterns(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# player is a goalkeeper have the ball
if (obs["ball_owned_player"] == obs["active"] and
obs["ball_owned_team"] == 0 and
obs["ball_owned_player"] == 0):
return True
return False
#%%
def get_memory_patterns(obs, player_x, player_y):
memory_patterns = [
close_to_opponent_short_pass,
close_to_opponent_long_pass,
close_to_opponent_high_pass,
far_from_goal_high_pass,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
#%%
def offence_memory_patterns(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# player have the ball
if obs["ball_owned_player"] == obs["active"] and obs["ball_owned_team"] == 0:
return True
return False
#%%
def get_memory_patterns(obs, player_x, player_y):
memory_patterns = [
far_from_goal_shot,
far_from_goal_high_pass,
bad_angle_short_pass,
close_to_goalkeeper_shot,
close_to_opponent_short_pass,
close_to_opponent_long_pass,
go_through_opponents,
start_sprinting,
idle
]
return memory_patterns
#%%
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def other_memory_patterns(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
return True
def get_memory_patterns(obs, player_x, player_y):
memory_patterns = [
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
def special_game_modes_memory_patterns(obs, player_x, player_y):
def environment_fits(obs, player_x, player_y):
# if game mode is not normal
if obs['game_mode'] != GameMode.Normal:
return True
return False
def get_memory_patterns(obs, player_x, player_y):
memory_patterns = [
corner,
free_kick,
goal_kick,
kick_off,
penalty,
throw_in,
idle
]
return memory_patterns
return {"environment_fits": environment_fits, "get_memory_patterns": get_memory_patterns}
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# list of groups of memory patterns
groups_of_memory_patterns = [
special_game_modes_memory_patterns,
goalkeeper_memory_patterns,
offence_memory_patterns,
defence_memory_patterns,
other_memory_patterns
]
# "%%writefile -a submission.py" will append the code below to submission.py,
# it WILL NOT rewrite submission.py
# @human_readable_agent wrapper modifies raw observations
# provided by the environment:
# https://github.com/google-research/football/blob/master/gfootball/doc/observation.md#raw-observations
# into a form easier to work with by humans.
# Following modifications are applied:
# - Action, PlayerRole and GameMode enums are introduced.
# - 'sticky_actions' are turned into a set of active actions (Action enum)
# see usage example below.
# - 'game_mode' is turned into GameMode enum.
# - 'designated' field is removed, as it always equals to 'active'
# when a single player is controlled on the team.
# - 'left_team_roles'/'right_team_roles' are turned into PlayerRole enums.
# - Action enum is to be returned by the agent function.
@human_readable_agent
def agent(obs):
# dictionary for Memory Patterns data
obs["memory_patterns"] = {}
# We always control left team (observations and actions
# are mirrored appropriately by the environment).
controlled_player_pos = obs["left_team"][obs["active"]]
# get action of appropriate pattern in agent's memory
action = get_action_of_agent(obs, controlled_player_pos[0], controlled_player_pos[1])
return action
| true
| true
|
1c425380f5523969791e6ad7dabba1dbd1fdcea1
| 370
|
py
|
Python
|
src/seirsplus/__init__.py
|
SEIRS-Plus/v2
|
3adc155400deaa4093e523ae81d2a25989888654
|
[
"MIT"
] | 1
|
2022-03-04T08:05:58.000Z
|
2022-03-04T08:05:58.000Z
|
src/seirsplus/__init__.py
|
SEIRS-Plus/v2
|
3adc155400deaa4093e523ae81d2a25989888654
|
[
"MIT"
] | null | null | null |
src/seirsplus/__init__.py
|
SEIRS-Plus/v2
|
3adc155400deaa4093e523ae81d2a25989888654
|
[
"MIT"
] | null | null | null |
# Standard Libraries
try:
from importlib import metadata
except ImportError:
# Try backported to PY<37 `importlib_metadata`.
import importlib_metadata as metadata
# Internal Libraries
from .dev_tools.logging_utils import _configure_seirsplus_loggers
_configure_seirsplus_loggers(root_module_name=__name__)
__version__ = metadata.version(__package__)
__all__ = []
| 23.125
| 65
| 0.832432
|
try:
from importlib import metadata
except ImportError:
import importlib_metadata as metadata
from .dev_tools.logging_utils import _configure_seirsplus_loggers
_configure_seirsplus_loggers(root_module_name=__name__)
__version__ = metadata.version(__package__)
__all__ = []
| true
| true
|
1c4253d90edbc6466778a935a566ed598ee8df09
| 4,649
|
py
|
Python
|
mainwindow/theme.py
|
philliphqs/Safey
|
a60a4f379083373c4615e51f09e70a4fd4e68ef3
|
[
"MIT"
] | 2
|
2020-11-13T20:02:53.000Z
|
2021-06-27T16:05:18.000Z
|
mainwindow/theme.py
|
philliphqs/Safey
|
a60a4f379083373c4615e51f09e70a4fd4e68ef3
|
[
"MIT"
] | null | null | null |
mainwindow/theme.py
|
philliphqs/Safey
|
a60a4f379083373c4615e51f09e70a4fd4e68ef3
|
[
"MIT"
] | null | null | null |
from dearpygui.core import *
from assets import properties
import json
def main():
set_main_window_title(f'{properties.ProductName}')
set_main_window_size(1308, 759)
def check_theme_on_startup():
with open('user/settings.hqs', 'r') as settings:
theme = json.load(settings)
if theme["theme"] == 'Dark':
DefaultTheme()
elif theme["theme"] == 'dark':
DefaultTheme()
elif theme["theme"] == 'Default':
DefaultTheme()
elif theme["theme"] == 'default':
DefaultTheme()
elif theme["theme"] == 'None':
DefaultTheme()
else:
try:
set_theme(theme["theme"])
style()
except:
DefaultTheme()
def style():
add_additional_font(properties.DefaultFont, 15, custom_glyph_chars=[[0x00C1, 0x00C3], [0x00C4, 0x00C8]])
set_style_window_padding(8.00, 8.00)
set_style_frame_padding(4.00, 3.00)
set_style_item_spacing(8.00, 4.00)
set_style_item_inner_spacing(4.00, 4.00)
set_style_touch_extra_padding(0.00, 0.00)
set_style_indent_spacing(21.00)
set_style_scrollbar_size(14.00)
set_style_grab_min_size(10.00)
set_style_window_border_size(1.00)
set_style_child_border_size(1.00)
set_style_popup_border_size(1.00)
set_style_frame_border_size(0.00)
set_style_tab_border_size(0.00)
set_style_window_rounding(7.00)
set_style_child_rounding(0.00)
set_style_frame_rounding(2.00)
set_style_popup_rounding(0.00)
set_style_scrollbar_rounding(9.00)
set_style_grab_rounding(2.00)
set_style_tab_rounding(4.00)
set_style_window_title_align(0.00, 0.50)
set_style_window_menu_button_position(mvDir_Left)
set_style_color_button_position(mvDir_Right)
set_style_button_text_align(0.50, 0.50)
set_style_selectable_text_align(0.00, 0.00)
set_style_display_safe_area_padding(3.00, 3.00)
set_style_global_alpha(1.00)
set_style_antialiased_lines(True)
set_style_antialiased_fill(True)
set_style_curve_tessellation_tolerance(1.25)
set_style_circle_segment_max_error(1.60)
def colors():
set_theme_item(mvGuiCol_Text, 255, 255, 255, 255)
set_theme_item(mvGuiCol_TextDisabled, 128, 128, 128, 255)
set_theme_item(mvGuiCol_WindowBg, 15, 15, 15, 240)
set_theme_item(mvGuiCol_PopupBg, 20, 20, 20, 240)
set_theme_item(mvGuiCol_Border, 110, 110, 128, 128)
set_theme_item(mvGuiCol_FrameBg, 49, 49, 49, 138)
set_theme_item(mvGuiCol_FrameBgHovered, 41, 41, 41, 138)
set_theme_item(mvGuiCol_FrameBgActive, 37, 36, 36, 138)
set_theme_item(mvGuiCol_TitleBg, 10, 10, 10, 255)
set_theme_item(mvGuiCol_TitleBgActive, 23, 23, 23, 255)
set_theme_item(mvGuiCol_TitleBgCollapsed, 0, 0, 0, 130)
set_theme_item(mvGuiCol_MenuBarBg, 36, 36, 36, 255)
set_theme_item(mvGuiCol_ScrollbarBg, 5, 5, 5, 135)
set_theme_item(mvGuiCol_ScrollbarGrab, 30, 30, 30, 255)
set_theme_item(mvGuiCol_ScrollbarGrabHovered, 31, 31, 31, 255)
set_theme_item(mvGuiCol_ScrollbarGrabActive, 32, 32, 32, 255)
set_theme_item(mvGuiCol_CheckMark, 139, 139, 139, 255)
set_theme_item(mvGuiCol_SliderGrab, 86, 86, 86, 255)
set_theme_item(mvGuiCol_SliderGrabActive, 52, 52, 52, 255)
set_theme_item(mvGuiCol_Button, 114, 114, 114, 102)
set_theme_item(mvGuiCol_ButtonHovered, 99, 99, 99, 102)
set_theme_item(mvGuiCol_ButtonActive, 85, 85, 85, 102)
set_theme_item(mvGuiCol_Header, 87, 87, 87, 79)
set_theme_item(mvGuiCol_HeaderHovered, 70, 70, 70, 204)
set_theme_item(mvGuiCol_HeaderActive, 62, 62, 62, 255)
set_theme_item(mvGuiCol_Separator, 110, 110, 128, 128)
set_theme_item(mvGuiCol_SeparatorHovered, 89, 89, 89, 199)
set_theme_item(mvGuiCol_SeparatorActive, 62, 62, 62, 255)
set_theme_item(mvGuiCol_ResizeGrip, 130, 130, 130, 64)
set_theme_item(mvGuiCol_ResizeGripHovered, 99, 99, 99, 171)
set_theme_item(mvGuiCol_ResizeGripActive, 113, 113, 113, 242)
set_theme_item(mvGuiCol_Tab, 56, 56, 56, 220)
set_theme_item(mvGuiCol_TabHovered, 73, 73, 73, 204)
set_theme_item(mvGuiCol_TabActive, 82, 82, 82, 255)
set_theme_item(mvGuiCol_TabUnfocused, 17, 26, 38, 248)
set_theme_item(mvGuiCol_TabUnfocusedActive, 35, 67, 108, 255)
set_theme_item(mvGuiCol_PlotLines, 156, 156, 156, 255)
set_theme_item(mvGuiCol_PlotLinesHovered, 255, 110, 89, 255)
set_theme_item(mvGuiCol_TextSelectedBg, 66, 150, 250, 89)
set_theme_item(mvGuiCol_NavHighlight, 66, 150, 250, 255)
set_theme_item(mvGuiCol_ModalWindowDimBg, 204, 204, 204, 89)
def DefaultTheme():
style()
colors()
| 41.141593
| 108
| 0.718434
|
from dearpygui.core import *
from assets import properties
import json
def main():
set_main_window_title(f'{properties.ProductName}')
set_main_window_size(1308, 759)
def check_theme_on_startup():
with open('user/settings.hqs', 'r') as settings:
theme = json.load(settings)
if theme["theme"] == 'Dark':
DefaultTheme()
elif theme["theme"] == 'dark':
DefaultTheme()
elif theme["theme"] == 'Default':
DefaultTheme()
elif theme["theme"] == 'default':
DefaultTheme()
elif theme["theme"] == 'None':
DefaultTheme()
else:
try:
set_theme(theme["theme"])
style()
except:
DefaultTheme()
def style():
add_additional_font(properties.DefaultFont, 15, custom_glyph_chars=[[0x00C1, 0x00C3], [0x00C4, 0x00C8]])
set_style_window_padding(8.00, 8.00)
set_style_frame_padding(4.00, 3.00)
set_style_item_spacing(8.00, 4.00)
set_style_item_inner_spacing(4.00, 4.00)
set_style_touch_extra_padding(0.00, 0.00)
set_style_indent_spacing(21.00)
set_style_scrollbar_size(14.00)
set_style_grab_min_size(10.00)
set_style_window_border_size(1.00)
set_style_child_border_size(1.00)
set_style_popup_border_size(1.00)
set_style_frame_border_size(0.00)
set_style_tab_border_size(0.00)
set_style_window_rounding(7.00)
set_style_child_rounding(0.00)
set_style_frame_rounding(2.00)
set_style_popup_rounding(0.00)
set_style_scrollbar_rounding(9.00)
set_style_grab_rounding(2.00)
set_style_tab_rounding(4.00)
set_style_window_title_align(0.00, 0.50)
set_style_window_menu_button_position(mvDir_Left)
set_style_color_button_position(mvDir_Right)
set_style_button_text_align(0.50, 0.50)
set_style_selectable_text_align(0.00, 0.00)
set_style_display_safe_area_padding(3.00, 3.00)
set_style_global_alpha(1.00)
set_style_antialiased_lines(True)
set_style_antialiased_fill(True)
set_style_curve_tessellation_tolerance(1.25)
set_style_circle_segment_max_error(1.60)
def colors():
set_theme_item(mvGuiCol_Text, 255, 255, 255, 255)
set_theme_item(mvGuiCol_TextDisabled, 128, 128, 128, 255)
set_theme_item(mvGuiCol_WindowBg, 15, 15, 15, 240)
set_theme_item(mvGuiCol_PopupBg, 20, 20, 20, 240)
set_theme_item(mvGuiCol_Border, 110, 110, 128, 128)
set_theme_item(mvGuiCol_FrameBg, 49, 49, 49, 138)
set_theme_item(mvGuiCol_FrameBgHovered, 41, 41, 41, 138)
set_theme_item(mvGuiCol_FrameBgActive, 37, 36, 36, 138)
set_theme_item(mvGuiCol_TitleBg, 10, 10, 10, 255)
set_theme_item(mvGuiCol_TitleBgActive, 23, 23, 23, 255)
set_theme_item(mvGuiCol_TitleBgCollapsed, 0, 0, 0, 130)
set_theme_item(mvGuiCol_MenuBarBg, 36, 36, 36, 255)
set_theme_item(mvGuiCol_ScrollbarBg, 5, 5, 5, 135)
set_theme_item(mvGuiCol_ScrollbarGrab, 30, 30, 30, 255)
set_theme_item(mvGuiCol_ScrollbarGrabHovered, 31, 31, 31, 255)
set_theme_item(mvGuiCol_ScrollbarGrabActive, 32, 32, 32, 255)
set_theme_item(mvGuiCol_CheckMark, 139, 139, 139, 255)
set_theme_item(mvGuiCol_SliderGrab, 86, 86, 86, 255)
set_theme_item(mvGuiCol_SliderGrabActive, 52, 52, 52, 255)
set_theme_item(mvGuiCol_Button, 114, 114, 114, 102)
set_theme_item(mvGuiCol_ButtonHovered, 99, 99, 99, 102)
set_theme_item(mvGuiCol_ButtonActive, 85, 85, 85, 102)
set_theme_item(mvGuiCol_Header, 87, 87, 87, 79)
set_theme_item(mvGuiCol_HeaderHovered, 70, 70, 70, 204)
set_theme_item(mvGuiCol_HeaderActive, 62, 62, 62, 255)
set_theme_item(mvGuiCol_Separator, 110, 110, 128, 128)
set_theme_item(mvGuiCol_SeparatorHovered, 89, 89, 89, 199)
set_theme_item(mvGuiCol_SeparatorActive, 62, 62, 62, 255)
set_theme_item(mvGuiCol_ResizeGrip, 130, 130, 130, 64)
set_theme_item(mvGuiCol_ResizeGripHovered, 99, 99, 99, 171)
set_theme_item(mvGuiCol_ResizeGripActive, 113, 113, 113, 242)
set_theme_item(mvGuiCol_Tab, 56, 56, 56, 220)
set_theme_item(mvGuiCol_TabHovered, 73, 73, 73, 204)
set_theme_item(mvGuiCol_TabActive, 82, 82, 82, 255)
set_theme_item(mvGuiCol_TabUnfocused, 17, 26, 38, 248)
set_theme_item(mvGuiCol_TabUnfocusedActive, 35, 67, 108, 255)
set_theme_item(mvGuiCol_PlotLines, 156, 156, 156, 255)
set_theme_item(mvGuiCol_PlotLinesHovered, 255, 110, 89, 255)
set_theme_item(mvGuiCol_TextSelectedBg, 66, 150, 250, 89)
set_theme_item(mvGuiCol_NavHighlight, 66, 150, 250, 255)
set_theme_item(mvGuiCol_ModalWindowDimBg, 204, 204, 204, 89)
def DefaultTheme():
style()
colors()
| true
| true
|
1c4254c676ab2e70c68aeec16b48c70c687f05e7
| 397
|
py
|
Python
|
PythonClub/PythonClub/wsgi.py
|
mmhansen137/itc172-redux
|
155fffa57fc6b8db6a9b55c95d1a1458921b8aaa
|
[
"CC0-1.0"
] | null | null | null |
PythonClub/PythonClub/wsgi.py
|
mmhansen137/itc172-redux
|
155fffa57fc6b8db6a9b55c95d1a1458921b8aaa
|
[
"CC0-1.0"
] | null | null | null |
PythonClub/PythonClub/wsgi.py
|
mmhansen137/itc172-redux
|
155fffa57fc6b8db6a9b55c95d1a1458921b8aaa
|
[
"CC0-1.0"
] | null | null | null |
"""
WSGI config for PythonClub project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PythonClub.settings')
application = get_wsgi_application()
| 23.352941
| 78
| 0.788413
|
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'PythonClub.settings')
application = get_wsgi_application()
| true
| true
|
1c4256799c51e59fbfabea4b7ab2f7ba3a5a7dfb
| 13,764
|
py
|
Python
|
micronet/compression/quantization/wqaq/iao/main.py
|
RiccardoRuggiero/micronet
|
bfdac2a50a5f0f8484a253b356c06a166bf7e6a0
|
[
"MIT"
] | null | null | null |
micronet/compression/quantization/wqaq/iao/main.py
|
RiccardoRuggiero/micronet
|
bfdac2a50a5f0f8484a253b356c06a166bf7e6a0
|
[
"MIT"
] | null | null | null |
micronet/compression/quantization/wqaq/iao/main.py
|
RiccardoRuggiero/micronet
|
bfdac2a50a5f0f8484a253b356c06a166bf7e6a0
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../../../..")
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
import quantize
def setup_seed(seed):
torch.manual_seed(seed)
# torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def save_state(model, best_acc):
print('==> Saving model ...')
state = {
'best_acc': best_acc,
'state_dict': model.state_dict(),
}
state_copy = state['state_dict'].copy()
for key in state_copy.keys():
if 'module' in key:
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
if args.model_type == 0:
if args.bn_fuse == 1:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_bn_fused.pth')
else:
torch.save(state, 'models_save/nin_bn_fused.pth')
else:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin.pth')
else:
torch.save(state, 'models_save/nin.pth')
else:
if args.bn_fuse == 1:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc_bn_fused.pth')
else:
torch.save(state, 'models_save/nin_gc_bn_fused.pth')
else:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc.pth')
else:
torch.save(state, 'models_save/nin_gc.pth')
def adjust_learning_rate(optimizer, epoch):
update_list = [80, 130, 180, 230, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
epoch, batch_idx * len(data), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.data.item(),
optimizer.param_groups[0]['lr']))
return
def test():
global best_acc
model.eval()
test_loss = 0
correct = 0
for data, target in testloader:
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += criterion(output, target).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(testloader.dataset)
if acc > best_acc:
best_acc = acc
save_state(model, best_acc)
average_test_loss = test_loss / (len(testloader.dataset) / args.eval_batch_size)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
average_test_loss, correct, len(testloader.dataset),
100. * float(correct) / len(testloader.dataset)))
print('Best Accuracy: {:.2f}%\n'.format(best_acc))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true',
help='set if only CPU is available')
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--data', action='store', default='../../../../data',
help='dataset path')
parser.add_argument('--lr', action='store', default=0.01,
help='the intial learning rate')
parser.add_argument('--wd', action='store', default=1e-5,
help='the intial learning rate')
# prune_qat
parser.add_argument('--prune_qat', default='', type=str, metavar='PATH',
help='the path to the prune_qat model')
# refine
parser.add_argument('--refine', default='', type=str, metavar='PATH',
help='the path to the float_refine model')
# resume
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='the path to the resume model')
parser.add_argument('--train_batch_size', type=int, default=64)
parser.add_argument('--eval_batch_size', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--start_epochs', type=int, default=1, metavar='N',
help='number of epochs to train_start')
parser.add_argument('--end_epochs', type=int, default=300, metavar='N',
help='number of epochs to train_end')
# W/A — bits
parser.add_argument('--w_bits', type=int, default=8)
parser.add_argument('--a_bits', type=int, default=8)
# bn融合标志位
parser.add_argument('--bn_fuse', type=int, default=0,
help='bn_fuse:1')
# 量化方法选择
parser.add_argument('--q_type', type=int, default=0,
help='quant_type:0-symmetric, 1-asymmetric')
# 量化级别选择
parser.add_argument('--q_level', type=int, default=0,
help='quant_level:0-per_channel, 1-per_layer')
# weight_observer选择
parser.add_argument('--weight_observer', type=int, default=0,
help='quant_weight_observer:0-MinMaxObserver, 1-MovingAverageMinMaxObserver')
# pretrained_model标志位
parser.add_argument('--pretrained_model', action='store_true',
help='pretrained_model')
# qaft标志位
parser.add_argument('--qaft', action='store_true',
help='quantization-aware-finetune')
# prune_qaft
parser.add_argument('--prune_qaft', default='', type=str, metavar='PATH',
help='the path to the prune_qaft model')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if not args.cpu:
device = 'cuda'
else:
device = 'cpu'
setup_seed(1)
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size,
shuffle=True, num_workers=args.num_workers)
testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=args.num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
if args.prune_qat:
print('******Prune QAT model******')
#checkpoint = torch.load('../prune/models_save/nin_refine.pth')
checkpoint = torch.load(args.prune_qat)
cfg = checkpoint['cfg']
if args.model_type == 0:
model = nin.Net(cfg=checkpoint['cfg'])
else:
model = nin_gc.Net(cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
best_acc = 0
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
elif args.prune_qaft:
print('******Prune QAFT model******')
#checkpoint = torch.load('models_save/nin_bn_fused.pth')
checkpoint = torch.load(args.prune_qaft)
cfg = checkpoint['cfg']
if args.model_type == 0:
model = nin.Net(cfg=checkpoint['cfg'])
else:
model = nin_gc.Net(cfg=checkpoint['cfg'])
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
elif args.refine:
print('******Float Refine model******')
#checkpoint = torch.load('models_save/nin.pth')
state_dict = torch.load(args.refine)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(state_dict)
best_acc = 0
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
elif args.resume:
print('******Reume model******')
#checkpoint = torch.load('models_save/nin.pth')
checkpoint = torch.load(args.resume)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
else:
print('******Initializing model******')
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
best_acc = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
if not args.cpu:
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
base_lr = float(args.lr)
param_dict = dict(model.named_parameters())
params = []
for key, value in param_dict.items():
params += [{'params': [value], 'lr': base_lr, 'weight_decay':args.wd}]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(params, lr=base_lr, weight_decay=args.wd)
for epoch in range(args.start_epochs, args.end_epochs):
adjust_learning_rate(optimizer, epoch)
train(epoch)
test()
| 40.60177
| 101
| 0.56873
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
sys.path.append("../../../..")
import math
import os
import argparse
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.autograd import Variable
from torch.nn import init
from models import nin_gc, nin
import quantize
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
torch.backends.cudnn.deterministic = True
def save_state(model, best_acc):
print('==> Saving model ...')
state = {
'best_acc': best_acc,
'state_dict': model.state_dict(),
}
state_copy = state['state_dict'].copy()
for key in state_copy.keys():
if 'module' in key:
state['state_dict'][key.replace('module.', '')] = \
state['state_dict'].pop(key)
if args.model_type == 0:
if args.bn_fuse == 1:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_bn_fused.pth')
else:
torch.save(state, 'models_save/nin_bn_fused.pth')
else:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin.pth')
else:
torch.save(state, 'models_save/nin.pth')
else:
if args.bn_fuse == 1:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc_bn_fused.pth')
else:
torch.save(state, 'models_save/nin_gc_bn_fused.pth')
else:
if args.prune_qat or args.qaft:
torch.save({'cfg': cfg, 'best_acc': best_acc,
'state_dict': state['state_dict']}, 'models_save/nin_gc.pth')
else:
torch.save(state, 'models_save/nin_gc.pth')
def adjust_learning_rate(optimizer, epoch):
update_list = [80, 130, 180, 230, 280]
if epoch in update_list:
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * 0.1
return
def train(epoch):
model.train()
for batch_idx, (data, target) in enumerate(trainloader):
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
loss = criterion(output, target)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format(
epoch, batch_idx * len(data), len(trainloader.dataset),
100. * batch_idx / len(trainloader), loss.data.item(),
optimizer.param_groups[0]['lr']))
return
def test():
global best_acc
model.eval()
test_loss = 0
correct = 0
for data, target in testloader:
if not args.cpu:
data, target = data.cuda(), target.cuda()
data, target = Variable(data), Variable(target)
output = model(data)
test_loss += criterion(output, target).data.item()
pred = output.data.max(1, keepdim=True)[1]
correct += pred.eq(target.data.view_as(pred)).cpu().sum()
acc = 100. * float(correct) / len(testloader.dataset)
if acc > best_acc:
best_acc = acc
save_state(model, best_acc)
average_test_loss = test_loss / (len(testloader.dataset) / args.eval_batch_size)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)'.format(
average_test_loss, correct, len(testloader.dataset),
100. * float(correct) / len(testloader.dataset)))
print('Best Accuracy: {:.2f}%\n'.format(best_acc))
return
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--cpu', action='store_true',
help='set if only CPU is available')
parser.add_argument('--gpu_id', action='store', default='',
help='gpu_id')
parser.add_argument('--data', action='store', default='../../../../data',
help='dataset path')
parser.add_argument('--lr', action='store', default=0.01,
help='the intial learning rate')
parser.add_argument('--wd', action='store', default=1e-5,
help='the intial learning rate')
parser.add_argument('--prune_qat', default='', type=str, metavar='PATH',
help='the path to the prune_qat model')
parser.add_argument('--refine', default='', type=str, metavar='PATH',
help='the path to the float_refine model')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
help='the path to the resume model')
parser.add_argument('--train_batch_size', type=int, default=64)
parser.add_argument('--eval_batch_size', type=int, default=64)
parser.add_argument('--num_workers', type=int, default=2)
parser.add_argument('--start_epochs', type=int, default=1, metavar='N',
help='number of epochs to train_start')
parser.add_argument('--end_epochs', type=int, default=300, metavar='N',
help='number of epochs to train_end')
parser.add_argument('--w_bits', type=int, default=8)
parser.add_argument('--a_bits', type=int, default=8)
parser.add_argument('--bn_fuse', type=int, default=0,
help='bn_fuse:1')
parser.add_argument('--q_type', type=int, default=0,
help='quant_type:0-symmetric, 1-asymmetric')
parser.add_argument('--q_level', type=int, default=0,
help='quant_level:0-per_channel, 1-per_layer')
parser.add_argument('--weight_observer', type=int, default=0,
help='quant_weight_observer:0-MinMaxObserver, 1-MovingAverageMinMaxObserver')
parser.add_argument('--pretrained_model', action='store_true',
help='pretrained_model')
parser.add_argument('--qaft', action='store_true',
help='quantization-aware-finetune')
parser.add_argument('--prune_qaft', default='', type=str, metavar='PATH',
help='the path to the prune_qaft model')
parser.add_argument('--model_type', type=int, default=1,
help='model type:0-nin,1-nin_gc')
args = parser.parse_args()
print('==> Options:', args)
if args.gpu_id:
os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id
if not args.cpu:
device = 'cuda'
else:
device = 'cpu'
setup_seed(1)
print('==> Preparing data..')
transform_train = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))])
trainset = torchvision.datasets.CIFAR10(root=args.data, train=True, download=True,
transform=transform_train)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=args.train_batch_size,
shuffle=True, num_workers=args.num_workers)
testset = torchvision.datasets.CIFAR10(root=args.data, train=False, download=True,
transform=transform_test)
testloader = torch.utils.data.DataLoader(testset, batch_size=args.eval_batch_size,
shuffle=False, num_workers=args.num_workers)
classes = ('plane', 'car', 'bird', 'cat', 'deer',
'dog', 'frog', 'horse', 'ship', 'truck')
if args.prune_qat:
print('******Prune QAT model******')
checkpoint = torch.load(args.prune_qat)
cfg = checkpoint['cfg']
if args.model_type == 0:
model = nin.Net(cfg=checkpoint['cfg'])
else:
model = nin_gc.Net(cfg=checkpoint['cfg'])
model.load_state_dict(checkpoint['state_dict'])
best_acc = 0
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
elif args.prune_qaft:
print('******Prune QAFT model******')
checkpoint = torch.load(args.prune_qaft)
cfg = checkpoint['cfg']
if args.model_type == 0:
model = nin.Net(cfg=checkpoint['cfg'])
else:
model = nin_gc.Net(cfg=checkpoint['cfg'])
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
elif args.refine:
print('******Float Refine model******')
state_dict = torch.load(args.refine)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
model.load_state_dict(state_dict)
best_acc = 0
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
elif args.resume:
print('******Reume model******')
checkpoint = torch.load(args.resume)
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
model.load_state_dict(checkpoint['state_dict'])
best_acc = checkpoint['best_acc']
else:
print('******Initializing model******')
if args.model_type == 0:
model = nin.Net()
else:
model = nin_gc.Net()
best_acc = 0
for m in model.modules():
if isinstance(m, nn.Conv2d):
init.xavier_uniform_(m.weight)
if m.bias is not None:
init.zeros_(m.bias)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, 0, 0.01)
if m.bias is not None:
init.zeros_(m.bias)
print('***ori_model***\n', model)
quantize.prepare(model, inplace=True, a_bits=args.a_bits,
w_bits=args.w_bits, q_type=args.q_type,
q_level=args.q_level, device=device,
weight_observer=args.weight_observer,
bn_fuse=args.bn_fuse,
pretrained_model=args.pretrained_model,
qaft=args.qaft)
print('\n***quant_model***\n', model)
if not args.cpu:
model.cuda()
model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count()))
base_lr = float(args.lr)
param_dict = dict(model.named_parameters())
params = []
for key, value in param_dict.items():
params += [{'params': [value], 'lr': base_lr, 'weight_decay':args.wd}]
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(params, lr=base_lr, weight_decay=args.wd)
for epoch in range(args.start_epochs, args.end_epochs):
adjust_learning_rate(optimizer, epoch)
train(epoch)
test()
| true
| true
|
1c4257770447c53571e0123f20816322805b5207
| 647
|
py
|
Python
|
ansible_config.py
|
arista-netdevops-community/excel-to-avd
|
8762697888b8d411eca65a5bd7e974f963e92956
|
[
"Apache-2.0"
] | 1
|
2021-04-03T03:56:03.000Z
|
2021-04-03T03:56:03.000Z
|
ansible_config.py
|
arista-netdevops-community/excel-to-avd
|
8762697888b8d411eca65a5bd7e974f963e92956
|
[
"Apache-2.0"
] | null | null | null |
ansible_config.py
|
arista-netdevops-community/excel-to-avd
|
8762697888b8d411eca65a5bd7e974f963e92956
|
[
"Apache-2.0"
] | 1
|
2021-07-29T17:04:00.000Z
|
2021-07-29T17:04:00.000Z
|
ansible_config = '''[defaults]
host_key_checking = False
inventory =./inventory.yml
gathering = explicit
retry_files_enabled = False
collections_paths = ./collections:./ansible_collections:~/.ansible/collections:/usr/share/ansible/collections:../../../ansible-avd/:../../../ansible-cvp/
jinja2_extensions = jinja2.ext.loopcontrols,jinja2.ext.do,jinja2.ext.i18n
# enable the YAML callback plugin.
stdout_callback = yaml
# enable the stdout_callback when running ad-hoc commands.
bin_ansible_callbacks = True
forks = 15
callback_whitelist = profile_roles, profile_tasks, timer
[persistent_connection]
connect_timeout = 120
command_timeout = 120'''
| 38.058824
| 153
| 0.789799
|
ansible_config = '''[defaults]
host_key_checking = False
inventory =./inventory.yml
gathering = explicit
retry_files_enabled = False
collections_paths = ./collections:./ansible_collections:~/.ansible/collections:/usr/share/ansible/collections:../../../ansible-avd/:../../../ansible-cvp/
jinja2_extensions = jinja2.ext.loopcontrols,jinja2.ext.do,jinja2.ext.i18n
# enable the YAML callback plugin.
stdout_callback = yaml
# enable the stdout_callback when running ad-hoc commands.
bin_ansible_callbacks = True
forks = 15
callback_whitelist = profile_roles, profile_tasks, timer
[persistent_connection]
connect_timeout = 120
command_timeout = 120'''
| true
| true
|
1c4257a2308fbbdb3d1b76d4c2e28baa76179a6c
| 233
|
py
|
Python
|
Solutions/Solution_Problem_7.py
|
GuilhermeLouli/logic-python
|
37193911c09d3362f2ee974f571dbb90ac62d26a
|
[
"MIT"
] | null | null | null |
Solutions/Solution_Problem_7.py
|
GuilhermeLouli/logic-python
|
37193911c09d3362f2ee974f571dbb90ac62d26a
|
[
"MIT"
] | null | null | null |
Solutions/Solution_Problem_7.py
|
GuilhermeLouli/logic-python
|
37193911c09d3362f2ee974f571dbb90ac62d26a
|
[
"MIT"
] | null | null | null |
password = input("Senha: ")
for i in range(0, 9):
for j in range(0, 9):
for k in range(0, 9):
holder = str(i) + str(j) + str(k)
if holder == password:
print("A senha é: ", holder)
| 25.888889
| 45
| 0.467811
|
password = input("Senha: ")
for i in range(0, 9):
for j in range(0, 9):
for k in range(0, 9):
holder = str(i) + str(j) + str(k)
if holder == password:
print("A senha é: ", holder)
| true
| true
|
1c42596d2a04ac274292a4823bdb5d679270cf98
| 229,676
|
py
|
Python
|
salt/modules/file.py
|
Lesvek/salt
|
6fbd8f66cb34bd5e0fcd6a8105ef0b17f19191ec
|
[
"Apache-2.0"
] | 2
|
2017-12-01T19:20:49.000Z
|
2018-10-03T23:54:12.000Z
|
salt/modules/file.py
|
Lesvek/salt
|
6fbd8f66cb34bd5e0fcd6a8105ef0b17f19191ec
|
[
"Apache-2.0"
] | null | null | null |
salt/modules/file.py
|
Lesvek/salt
|
6fbd8f66cb34bd5e0fcd6a8105ef0b17f19191ec
|
[
"Apache-2.0"
] | 1
|
2019-12-17T13:37:16.000Z
|
2019-12-17T13:37:16.000Z
|
"""
Manage information about regular files, directories,
and special files on the minion, set/read user,
group, mode, and data
"""
# TODO: We should add the capability to do u+r type operations here
# some time in the future
import datetime
import errno
import fnmatch
import glob
import hashlib
import itertools
import logging
import mmap
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import urllib.parse
from collections import namedtuple
from collections.abc import Iterable, Mapping
from functools import reduce
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.http
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
from salt.exceptions import get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
try:
import grp
import pwd
except ImportError:
pass
log = logging.getLogger(__name__)
__func_alias__ = {"makedirs_": "makedirs"}
AttrChanges = namedtuple("AttrChanges", "added,removed")
def __virtual__():
"""
Only work on POSIX-like systems
"""
# win_file takes care of windows
if salt.utils.platform.is_windows():
return (
False,
"The file execution module cannot be loaded: only available on "
"non-Windows systems - use win_file instead.",
)
return True
def __clean_tmp(sfn):
"""
Clean out a template temp file
"""
if sfn.startswith(
os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)
):
# Don't remove if it exists in file_roots (any saltenv)
all_roots = itertools.chain.from_iterable(__opts__["file_roots"].values())
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
"""
Common function for setting error information for return dicts
"""
ret["result"] = False
ret["comment"] = err_msg
return ret
def _binary_replace(old, new):
"""
This function does NOT do any diffing, it just checks the old and new files
to see if either is binary, and provides an appropriate string noting the
difference between the two files. If neither file is binary, an empty
string is returned.
This function should only be run AFTER it has been determined that the
files differ.
"""
old_isbin = not __utils__["files.is_text"](old)
new_isbin = not __utils__["files.is_text"](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return "Replace binary file"
elif old_isbin:
return "Replace binary file with text file"
elif new_isbin:
return "Replace text file with binary file"
return ""
def _get_bkroot():
"""
Get the location of the backup dir in the minion cache
"""
# Get the cachedir from the minion config
return os.path.join(__salt__["config.get"]("cachedir"), "file_backup")
def _splitlines_preserving_trailing_newline(str):
"""
Returns a list of the lines in the string, breaking at line boundaries and
preserving a trailing newline (if present).
Essentially, this works like ``str.striplines(False)`` but preserves an
empty line at the end. This is equivalent to the following code:
.. code-block:: python
lines = str.splitlines()
if str.endswith('\n') or str.endswith('\r'):
lines.append('')
"""
lines = str.splitlines()
if str.endswith("\n") or str.endswith("\r"):
lines.append("")
return lines
def _chattr_version():
"""
Return the version of chattr installed
"""
# There's no really *good* way to get the version of chattr installed.
# It's part of the e2fsprogs package - we could try to parse the version
# from the package manager, but there's no guarantee that it was
# installed that way.
#
# The most reliable approach is to just check tune2fs, since that should
# be installed with chattr, at least if it was installed in a conventional
# manner.
#
# See https://unix.stackexchange.com/a/520399/5788 for discussion.
tune2fs = salt.utils.path.which("tune2fs")
if not tune2fs or salt.utils.platform.is_aix():
return None
cmd = [tune2fs]
result = __salt__["cmd.run"](cmd, ignore_retcode=True, python_shell=False)
match = re.search(
r"tune2fs (?P<version>[0-9\.]+)",
salt.utils.stringutils.to_str(result),
)
if match is None:
version = None
else:
version = match.group("version")
return version
def _chattr_has_extended_attrs():
"""
Return ``True`` if chattr supports extended attributes, that is,
the version is >1.41.22. Otherwise, ``False``
"""
ver = _chattr_version()
if ver is None:
return False
needed_version = salt.utils.versions.LooseVersion("1.41.12")
chattr_version = salt.utils.versions.LooseVersion(ver)
return chattr_version > needed_version
def gid_to_group(gid):
"""
Convert the group id to the group name on this system
gid
gid to convert to a group name
CLI Example:
.. code-block:: bash
salt '*' file.gid_to_group 0
"""
try:
gid = int(gid)
except ValueError:
# This is not an integer, maybe it's already the group name?
gid = group_to_gid(gid)
if gid == "":
# Don't even bother to feed it to grp
return ""
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
# If group is not present, fall back to the gid.
return gid
def group_to_gid(group):
"""
Convert the group to the gid on this system
group
group to convert to its gid
CLI Example:
.. code-block:: bash
salt '*' file.group_to_gid root
"""
if group is None:
return ""
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ""
def get_gid(path, follow_symlinks=True):
"""
Return the id of the group that owns a given file
path
file or directory of which to get the gid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_gid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
"""
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"gid", -1
)
def get_group(path, follow_symlinks=True):
"""
Return the group that owns a given file
path
file or directory of which to get the group
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_group /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
"""
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"group", False
)
def uid_to_user(uid):
"""
Convert a uid to a user name
uid
uid to convert to a username
CLI Example:
.. code-block:: bash
salt '*' file.uid_to_user 0
"""
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
# If user is not present, fall back to the uid.
return uid
def user_to_uid(user):
"""
Convert user name to a uid
user
user name to convert to its uid
CLI Example:
.. code-block:: bash
salt '*' file.user_to_uid root
"""
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ""
def get_uid(path, follow_symlinks=True):
"""
Return the id of the user that owns a given file
path
file or directory of which to get the uid
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_uid /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
"""
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"uid", -1
)
def get_user(path, follow_symlinks=True):
"""
Return the user that owns a given file
path
file or directory of which to get the user
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_user /etc/passwd
.. versionchanged:: 0.16.4
``follow_symlinks`` option added
"""
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"user", False
)
def get_mode(path, follow_symlinks=True):
"""
Return the mode of a file
path
file or directory of which to get the mode
follow_symlinks
indicated if symlinks should be followed
CLI Example:
.. code-block:: bash
salt '*' file.get_mode /etc/passwd
.. versionchanged:: 2014.1.0
``follow_symlinks`` option added
"""
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"mode", ""
)
def set_mode(path, mode):
"""
Set the mode of a file
path
file or directory of which to set the mode
mode
mode to set the path to
CLI Example:
.. code-block:: bash
salt '*' file.set_mode /etc/passwd 0644
"""
path = os.path.expanduser(path)
mode = str(mode).lstrip("0Oo")
if not mode:
mode = "0"
if not os.path.exists(path):
raise CommandExecutionError("{}: File not found".format(path))
try:
os.chmod(path, int(mode, 8))
except Exception: # pylint: disable=broad-except
return "Invalid Mode " + mode
return get_mode(path)
def lchown(path, user, group):
"""
Chown a file, pass the file the desired user and group without following
symlinks.
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
"""
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ""
if uid == "":
if user:
err += "User does not exist\n"
else:
uid = -1
if gid == "":
if group:
err += "Group does not exist\n"
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
"""
Chown a file, pass the file the desired user and group
path
path to the file or directory
user
user owner
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chown /etc/passwd root root
"""
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ""
if uid == "":
if user:
err += "User does not exist\n"
else:
uid = -1
if gid == "":
if group:
err += "Group does not exist\n"
else:
gid = -1
if not os.path.exists(path):
try:
# Broken symlinks will return false, but still need to be chowned
return os.lchown(path, uid, gid)
except OSError:
pass
err += "File not found"
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
"""
Change the group of a file
path
path to the file or directory
group
group owner
CLI Example:
.. code-block:: bash
salt '*' file.chgrp /etc/passwd root
"""
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
"""
.. versionadded:: 2018.3.0
Compare attributes of a given file to given attributes.
Returns a pair (list) where first item are attributes to
add and second item are to be removed.
Please take into account when using this function that some minions will
not have lsattr installed.
path
path to file to compare attributes with.
attrs
string of attributes to compare against a given file
"""
# lsattr for AIX is not the same thing as lsattr for linux.
if salt.utils.platform.is_aix():
return None
try:
lattrs = lsattr(path).get(path, "")
except AttributeError:
# lsattr not installed
return None
new = set(attrs)
old = set(lattrs)
# The "e" attribute can be set, but it cannot not be reset, so we add it to
# the new set if it is present in the old set.
if "e" in old:
new.add("e")
return AttrChanges(
added="".join(new - old) or None,
removed="".join(old - new) or None,
)
def lsattr(path):
"""
.. versionadded:: 2018.3.0
.. versionchanged:: 2018.3.1
If ``lsattr`` is not installed on the system, ``None`` is returned.
.. versionchanged:: 2018.3.4
If on ``AIX``, ``None`` is returned even if in filesystem as lsattr on ``AIX``
is not the same thing as the linux version.
Obtain the modifiable attributes of the given file. If path
is to a directory, an empty list is returned.
path
path to file to obtain attributes of. File/directory must exist.
CLI Example:
.. code-block:: bash
salt '*' file.lsattr foo1.txt
"""
if not salt.utils.path.which("lsattr") or salt.utils.platform.is_aix():
return None
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist: " + path)
cmd = ["lsattr", path]
result = __salt__["cmd.run"](cmd, ignore_retcode=True, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith("lsattr: "):
attrs, file = line.split(None, 1)
if _chattr_has_extended_attrs():
pattern = r"[aAcCdDeijPsStTu]"
else:
pattern = r"[acdijstuADST]"
results[file] = re.findall(pattern, attrs)
return results
def chattr(*files, **kwargs):
"""
.. versionadded:: 2018.3.0
Change the attributes of files. This function accepts one or more files and
the following options:
operator
Can be wither ``add`` or ``remove``. Determines whether attributes
should be added or removed from files
attributes
One or more of the following characters: ``aAcCdDeijPsStTu``,
representing attributes to add to/remove from files
version
a version number to assign to the file(s)
flags
One or more of the following characters: ``RVf``, representing
flags to assign to chattr (recurse, verbose, suppress most errors)
CLI Example:
.. code-block:: bash
salt '*' file.chattr foo1.txt foo2.txt operator=add attributes=ai
salt '*' file.chattr foo3.txt operator=remove attributes=i version=2
"""
operator = kwargs.pop("operator", None)
attributes = kwargs.pop("attributes", None)
flags = kwargs.pop("flags", None)
version = kwargs.pop("version", None)
if (operator is None) or (operator not in ("add", "remove")):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes."
)
if attributes is None:
raise SaltInvocationError("Need attributes: [aAcCdDeijPsStTu]")
cmd = ["chattr"]
if operator == "add":
attrs = "+{}".format(attributes)
elif operator == "remove":
attrs = "-{}".format(attributes)
cmd.append(attrs)
if flags is not None:
cmd.append("-{}".format(flags))
if version is not None:
cmd.extend(["-v", version])
cmd.extend(files)
result = __salt__["cmd.run"](cmd, python_shell=False)
if bool(result):
return False
return True
def get_sum(path, form="sha256"):
"""
Return the checksum for the given file. The following checksum algorithms
are supported:
* md5
* sha1
* sha224
* sha256 **(default)**
* sha384
* sha512
path
path to the file or directory
form
desired sum format
CLI Example:
.. code-block:: bash
salt '*' file.get_sum /etc/passwd sha512
"""
path = os.path.expanduser(path)
if not os.path.isfile(path):
return "File not found"
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form="sha256", chunk_size=65536):
"""
Get the hash sum of a file
This is better than ``get_sum`` for the following reasons:
- It does not read the entire file into memory.
- It does not return a string on error. The returned value of
``get_sum`` cannot really be trusted since it is vulnerable to
collisions: ``get_sum(..., 'xyz') == 'Hash xyz not supported'``
path
path to the file or directory
form
desired sum format
chunk_size
amount to sum at once
CLI Example:
.. code-block:: bash
salt '*' file.get_hash /etc/shadow
"""
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(
file_name="",
source="",
source_hash=None,
source_hash_name=None,
saltenv="base",
verify_ssl=True,
):
"""
.. versionadded:: 2016.11.0
Used by :py:func:`file.get_managed <salt.modules.file.get_managed>` to
obtain the hash and hash type from the parameters specified below.
file_name
Optional file name being managed, for matching with
:py:func:`file.extract_hash <salt.modules.file.extract_hash>`.
source
Source file, as used in :py:mod:`file <salt.states.file>` and other
states. If ``source_hash`` refers to a file containing hashes, then
this filename will be used to match a filename in that file. If the
``source_hash`` is a hash expression, then this argument will be
ignored.
source_hash
Hash file/expression, as used in :py:mod:`file <salt.states.file>` and
other states. If this value refers to a remote URL or absolute path to
a local file, it will be cached and :py:func:`file.extract_hash
<salt.modules.file.extract_hash>` will be used to obtain a hash from
it.
source_hash_name
Specific file name to look for when ``source_hash`` refers to a remote
file, used to disambiguate ambiguous matches.
saltenv: base
Salt fileserver environment from which to retrieve the source_hash. This
value will only be used when ``source_hash`` refers to a file on the
Salt fileserver (i.e. one beginning with ``salt://``).
verify_ssl
If ``False``, remote https file sources (``https://``) and source_hash
will not attempt to validate the servers certificate. Default is True.
.. versionadded:: 3002
CLI Example:
.. code-block:: bash
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=499ae16dcae71eeb7c3a30c75ea7a1a6
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5
salt '*' file.get_source_sum /tmp/foo.tar.gz source=http://mydomain.tld/foo.tar.gz source_hash=https://mydomain.tld/hashes.md5 source_hash_name=./dir2/foo.tar.gz
"""
def _invalid_source_hash_format():
"""
DRY helper for reporting invalid source_hash input
"""
raise CommandExecutionError(
"Source hash {} format is invalid. The supported formats are: "
"1) a hash, 2) an expression in the format <hash_type>=<hash>, or "
"3) either a path to a local file containing hashes, or a URI of "
"a remote hash file. Supported protocols for remote hash files "
"are: {}. The hash may also not be of a valid length, the "
"following are supported hash types and lengths: {}.".format(
source_hash,
", ".join(salt.utils.files.VALID_PROTOS),
", ".join(
[
"{} ({})".format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)
]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = urllib.parse.urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__["cp.cache_file"](
source_hash, saltenv, verify_ssl=verify_ssl
)
if not hash_fn:
raise CommandExecutionError(
"Source hash file {} not found".format(source_hash)
)
else:
if proto != "":
# Some unsupported protocol (e.g. foo://) is being used.
# We'll get into this else block if a hash expression
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, "", file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
ret["hsum"] = ret["hsum"].lower()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret["hash_type"], ret["hsum"] = [
x.strip() for x in source_hash.split("=", 1)
]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match("^[{}]+$".format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret["hsum"] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret["hash_type"] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret["hash_type"] not in HASHES:
raise CommandExecutionError(
"Invalid hash type '{}'. Supported hash types are: {}. "
"Either remove the hash type and simply use '{}' as the "
"source_hash, or change the hash type to a supported type.".format(
ret["hash_type"], ", ".join(HASHES), ret["hsum"]
)
)
else:
hsum_len = len(ret["hsum"])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret["hash_type"]]:
raise CommandExecutionError(
"Invalid length ({}) for hash type '{}'. Either "
"remove the hash type and simply use '{}' as the "
"source_hash, or change the hash type to '{}'".format(
hsum_len,
ret["hash_type"],
ret["hsum"],
HASHES_REVMAP[hsum_len],
)
)
ret["hsum"] = ret["hsum"].lower()
return ret
def check_hash(path, file_hash):
"""
Check if a file matches the given hash string
Returns ``True`` if the hash matches, otherwise ``False``.
path
Path to a file local to the minion.
hash
The hash to check against the file specified in the ``path`` argument.
.. versionchanged:: 2016.11.4
For this and newer versions the hash can be specified without an
accompanying hash type (e.g. ``e138491e9d5b97023cea823fe17bac22``),
but for earlier releases it is necessary to also specify the hash type
in the format ``<hash_type>=<hash_value>`` (e.g.
``md5=e138491e9d5b97023cea823fe17bac22``).
CLI Example:
.. code-block:: bash
salt '*' file.check_hash /etc/fstab e138491e9d5b97023cea823fe17bac22
salt '*' file.check_hash /etc/fstab md5=e138491e9d5b97023cea823fe17bac22
"""
path = os.path.expanduser(path)
if not isinstance(file_hash, str):
raise SaltInvocationError("hash must be a string")
for sep in (":", "="):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
"Hash {} (length: {}) could not be matched to a supported "
"hash type. The supported hash types and lengths are: "
"{}".format(
file_hash,
hash_len,
", ".join(
[
"{} ({})".format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)
]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
"""
Approximate the Unix ``find(1)`` command and return a list of paths that
meet the specified criteria.
The options include match criteria:
.. code-block:: text
name = path-glob # case sensitive
iname = path-glob # case insensitive
regex = path-regex # case sensitive
iregex = path-regex # case insensitive
type = file-types # match any listed type
user = users # match any listed user
group = groups # match any listed group
size = [+-]number[size-unit] # default unit = byte
mtime = interval # modified since date
grep = regex # search file contents
and/or actions:
.. code-block:: text
delete [= file-types] # default type = 'f'
exec = command [arg ...] # where {} is replaced by pathname
print [= print-opts]
and/or depth criteria:
.. code-block:: text
maxdepth = maximum depth to transverse in path
mindepth = minimum depth to transverse before checking files or directories
The default action is ``print=path``
``path-glob``:
.. code-block:: text
* = match zero or more chars
? = match any char
[abc] = match a, b, or c
[!abc] or [^abc] = match anything except a, b, and c
[x-y] = match chars x through y
[!x-y] or [^x-y] = match anything except chars x through y
{a,b,c} = match a or b or c
``path-regex``: a Python Regex (regular expression) pattern to match pathnames
``file-types``: a string of one or more of the following:
.. code-block:: text
a: all file types
b: block device
c: character device
d: directory
p: FIFO (named pipe)
f: plain file
l: symlink
s: socket
``users``: a space and/or comma separated list of user names and/or uids
``groups``: a space and/or comma separated list of group names and/or gids
``size-unit``:
.. code-block:: text
b: bytes
k: kilobytes
m: megabytes
g: gigabytes
t: terabytes
interval:
.. code-block:: text
[<num>w] [<num>d] [<num>h] [<num>m] [<num>s]
where:
w: week
d: day
h: hour
m: minute
s: second
print-opts: a comma and/or space separated list of one or more of the
following:
.. code-block:: text
group: group name
md5: MD5 digest of file contents
mode: file permissions (as integer)
mtime: last modification time (as time_t)
name: file basename
path: file absolute path
size: file size in bytes
type: file type
user: user name
CLI Examples:
.. code-block:: bash
salt '*' file.find / type=f name=\\*.bak size=+10m
salt '*' file.find /var mtime=+30d size=+10m print=path,size,mtime
salt '*' file.find /var/log name=\\*.[0-9] mtime=+30d size=+10m delete
"""
if "delete" in args:
kwargs["delete"] = "f"
elif "print" in args:
kwargs["print"] = "path"
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return "error: {}".format(ex)
ret = [
item
for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))]
for item in i
]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
"""
Escape single quotes and forward slashes
"""
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(
path,
before,
after,
limit="",
backup=".bak",
options="-r -e",
flags="g",
escape_all=False,
negate_match=False,
):
"""
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit: ``''``
An initial pattern to search for before searching for ``before``
backup: ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
options: ``-r -e``
Options to pass to sed
flags: ``g``
Flags to modify the sed search; e.g., ``i`` for case-insensitive pattern
matching
negate_match: False
Negate the search command (``!``)
.. versionadded:: 0.17.0
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
"""
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
# Mandate that before and after are strings
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == "darwin":
options = options.replace("-r", "-E")
cmd = ["sed"]
cmd.append("-i{}".format(backup) if backup else "-i")
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r"{limit}{negate_match}s/{before}/{after}/{flags}".format(
limit="/{}/ ".format(limit) if limit else "",
negate_match="!" if negate_match else "",
before=before,
after=after,
flags=flags,
)
)
cmd.append(path)
return __salt__["cmd.run_all"](cmd, python_shell=False)
def sed_contains(path, text, limit="", flags="g"):
"""
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the file at ``path`` contains ``text``. Utilizes sed to
perform the search (line-wise search).
Note: the ``p`` flag will be added to any flags you pass in.
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
"""
# Largely inspired by Fabric's contrib.files.contains()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = "-n -r -e"
if sys.platform == "darwin":
options = options.replace("-r", "-E")
cmd = ["sed"]
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r"{limit}s/{before}/$/{flags}".format(
limit="/{}/ ".format(limit) if limit else "",
before=before,
flags="p{}".format(flags),
)
)
cmd.append(path)
result = __salt__["cmd.run"](cmd, python_shell=False)
return bool(result)
def psed(
path,
before,
after,
limit="",
backup=".bak",
flags="gMS",
escape_all=False,
multi=False,
):
"""
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Make a simple edit to a file (pure Python version)
Equivalent to:
.. code-block:: bash
sed <backup> <options> "/<limit>/ s/<before>/<after>/<flags> <file>"
path
The full path to the file to be edited
before
A pattern to find in order to replace with ``after``
after
Text that will replace ``before``
limit: ``''``
An initial pattern to search for before searching for ``before``
backup: ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
flags: ``gMS``
Flags to modify the search. Valid values are:
- ``g``: Replace all occurrences of the pattern, not just the first.
- ``I``: Ignore case.
- ``L``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\s`` and ``\\S``
dependent on the locale.
- ``M``: Treat multiple lines as a single line.
- ``S``: Make `.` match all characters, including newlines.
- ``U``: Make ``\\w``, ``\\W``, ``\\b``, ``\\B``, ``\\d``, ``\\D``,
``\\s`` and ``\\S`` dependent on Unicode.
- ``X``: Verbose (whitespace is ignored).
multi: ``False``
If True, treat the entire file as a single line
Forward slashes and single quotes will be escaped automatically in the
``before`` and ``after`` patterns.
CLI Example:
.. code-block:: bash
salt '*' file.sed /etc/httpd/httpd.conf 'LogLevel warn' 'LogLevel info'
"""
# Largely inspired by Fabric's contrib.files.sed()
# XXX:dc: Do we really want to always force escaping?
#
# Mandate that before and after are strings
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
# The pattern to replace with does not need to be escaped
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, "{}{}".format(path, backup))
with salt.utils.files.fopen(path, "w") as ofile:
with salt.utils.files.fopen("{}{}".format(path, backup), "r") as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(line),
before,
after,
limit,
flags,
)
)
)
else:
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(ifile.read()),
before,
after,
limit,
flags,
)
)
)
RE_FLAG_TABLE = {"I": re.I, "L": re.L, "M": re.M, "S": re.S, "U": re.U, "X": re.X}
def _psed(text, before, after, limit, flags):
"""
Does the actual work for file.psed, so that single lines can be passed in
"""
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = "".join(comps[1:])
count = 1
if "g" in flags:
count = 0
flags = flags.replace("g", "")
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path, regex, char="#", backup=".bak"):
"""
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Uncomment specified commented lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be uncommented.
This regex should not include the comment character. A leading ``^``
character will be stripped for convenience (for easily switching
between comment() and uncomment()).
char: ``#``
The character to remove in order to uncomment a line
backup: ``.bak``
The file will be backed up before edit with this file extension;
**WARNING:** each time ``sed``/``comment``/``uncomment`` is called will
overwrite this backup
CLI Example:
.. code-block:: bash
salt '*' file.uncomment /etc/hosts.deny 'ALL: PARANOID'
"""
return comment_line(path=path, regex=regex, char=char, cmnt=False, backup=backup)
def comment(path, regex, char="#", backup=".bak"):
"""
.. deprecated:: 0.17.0
Use :py:func:`~salt.modules.file.replace` instead.
Comment out specified lines in a file
path
The full path to the file to be edited
regex
A regular expression used to find the lines that are to be commented;
this pattern will be wrapped in parenthesis and will move any
preceding/trailing ``^`` or ``$`` characters outside the parenthesis
(e.g., the pattern ``^foo$`` will be rewritten as ``^(foo)$``)
char: ``#``
The character to be inserted at the beginning of a line in order to
comment it out
backup: ``.bak``
The file will be backed up before edit with this file extension
.. warning::
This backup will be overwritten each time ``sed`` / ``comment`` /
``uncomment`` is called. Meaning the backup will only be useful
after the first invocation.
CLI Example:
.. code-block:: bash
salt '*' file.comment /etc/modules pcspkr
"""
return comment_line(path=path, regex=regex, char=char, cmnt=True, backup=backup)
def comment_line(path, regex, char="#", cmnt=True, backup=".bak"):
r"""
Comment or Uncomment a line in a text file.
:param path: string
The full path to the text file.
:param regex: string
A regex expression that begins with ``^`` that will find the line you wish
to comment. Can be as simple as ``^color =``
:param char: string
The character used to comment a line in the type of file you're referencing.
Default is ``#``
:param cmnt: boolean
True to comment the line. False to uncomment the line. Default is True.
:param backup: string
The file extension to give the backup file. Default is ``.bak``
Set to False/None to not keep a backup.
:return: boolean
Returns True if successful, False if not
CLI Example:
The following example will comment out the ``pcspkr`` line in the
``/etc/modules`` file using the default ``#`` character and create a backup
file named ``modules.bak``
.. code-block:: bash
salt '*' file.comment_line '/etc/modules' '^pcspkr'
CLI Example:
The following example will uncomment the ``log_level`` setting in ``minion``
config file if it is set to either ``warning``, ``info``, or ``debug`` using
the ``#`` character and create a backup file named ``minion.bk``
.. code-block:: bash
salt '*' file.comment_line 'C:\salt\conf\minion' '^log_level: (warning|info|debug)' '#' False '.bk'
"""
# Get the regex for comment or uncomment
if cmnt:
regex = "{}({}){}".format(
"^" if regex.startswith("^") else "",
regex.lstrip("^").rstrip("$"),
"$" if regex.endswith("$") else "",
)
else:
regex = r"^{}\s*({}){}".format(
char, regex.lstrip("^").rstrip("$"), "$" if regex.endswith("$") else ""
)
# Load the real path to the file
path = os.path.realpath(os.path.expanduser(path))
# Make sure the file exists
if not os.path.isfile(path):
raise SaltInvocationError("File not found: {}".format(path))
# Make sure it is a text file
if not __utils__["files.is_text"](path):
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
found = False
# Dictionaries for comparing changes
orig_file = []
new_file = []
# Buffer size for fopen
bufsize = os.path.getsize(path)
try:
# Use a read-only handle to open the file
with salt.utils.files.fopen(path, mode="rb", buffering=bufsize) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
# Is it in this line
line = salt.utils.stringutils.to_unicode(line)
if re.match(regex, line):
# Load lines into dictionaries, set found to True
orig_file.append(line)
if cmnt:
new_file.append("{}{}".format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except OSError as exc:
raise CommandExecutionError(
"Unable to open file '{}'. Exception: {}".format(path, exc)
)
# We've searched the whole file. If we didn't find anything, return False
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Create a copy to read from and to use as a backup later
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
try:
# Open the file in write mode
mode = "w"
with salt.utils.files.fopen(path, mode=mode, buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(
temp_file, mode="rb", buffering=bufsize
) as r_file:
# Loop through each line of the file and look for a match
for line in r_file:
line = salt.utils.stringutils.to_unicode(line)
try:
# Is it in this line
if re.match(regex, line):
# Write the new line
if cmnt:
wline = "{}{}".format(char, line)
else:
wline = line.lstrip(char)
else:
# Write the existing line (no change)
wline = line
wline = salt.utils.stringutils.to_str(wline)
w_file.write(wline)
except OSError as exc:
raise CommandExecutionError(
"Unable to write file '{}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
if backup:
# Move the backup file to the original directory
backup_name = "{}{}".format(path, backup)
try:
shutil.move(temp_file, backup_name)
except OSError as exc:
raise CommandExecutionError(
"Unable to move the temp file '{}' to the "
"backup file '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
# Return a diff using the two dictionaries
return __utils__["stringutils.get_diff"](orig_file, new_file)
def _get_flags(flags):
"""
Return an integer appropriate for use as a flag for the re module from a
list of human-readable strings
.. code-block:: python
>>> _get_flags(['MULTILINE', 'IGNORECASE'])
10
>>> _get_flags('MULTILINE')
8
>>> _get_flags(2)
2
"""
if isinstance(flags, str):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, int):
raise SaltInvocationError("Invalid re flag given: {}".format(flag))
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, int):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{}", must be given either as a single flag '
"string, a list of strings, or as an integer".format(flags)
)
def _add_flags(flags, new_flags):
"""
Combine ``flags`` and ``new_flags``
"""
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path, preserve_inode=True):
"""
Create a temp file and move/copy the contents of ``path`` to the temp file.
Return the path to the temp file.
path
The full path to the file whose contents will be moved/copied to a temp file.
Whether it's moved or copied depends on the value of ``preserve_inode``.
preserve_inode
Preserve the inode of the file, so that any hard links continue to share the
inode with the original filename. This works by *copying* the file, reading
from the copy, and writing to the file at the original inode. If ``False``, the
file will be *moved* rather than copied, and a new file will be written to a
new inode, but using the original filename. Hard links will then share an inode
with the backup, instead (if using ``backup`` to create a backup copy).
Default is ``True``.
"""
temp_file = None
# Create the temp file
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except OSError as exc:
raise CommandExecutionError(
"Unable to create temp file. Exception: {}".format(exc)
)
# use `copy` to preserve the inode of the
# original file, and thus preserve hardlinks
# to the inode. otherwise, use `move` to
# preserve prior behavior, which results in
# writing the file to a new inode.
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to copy file '{}' to the temp file '{}'. Exception: {}".format(
path, temp_file, exc
)
)
else:
try:
shutil.move(path, temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to move file '{}' to the temp file '{}'. Exception: {}".format(
path, temp_file, exc
)
)
return temp_file
def _regex_to_static(src, regex):
"""
Expand regular expression to static match.
"""
if not src or not regex:
return None
try:
compiled = re.compile(regex, re.DOTALL)
src = [line for line in src if compiled.search(line) or line.count(regex)]
except Exception as ex: # pylint: disable=broad-except
raise CommandExecutionError("{}: '{}'".format(_get_error_message(ex), regex))
return src
def _assert_occurrence(probe, target, amount=1):
"""
Raise an exception, if there are different amount of specified occurrences in src.
"""
occ = len(probe)
if occ > amount:
msg = "more than"
elif occ < amount:
msg = "less than"
elif not occ:
msg = "no"
else:
msg = None
if msg:
raise CommandExecutionError(
'Found {} expected occurrences in "{}" expression'.format(msg, target)
)
return occ
def _set_line_indent(src, line, indent):
"""
Indent the line with the source line.
"""
if not indent:
return line
idt = []
for c in src:
if c not in ["\t", " "]:
break
idt.append(c)
return "".join(idt) + line.lstrip()
def _get_eol(line):
match = re.search("((?<!\r)\n|\r(?!\n)|\r\n)$", line)
return match and match.group() or ""
def _set_line_eol(src, line):
"""
Add line ending
"""
line_ending = _get_eol(src) or os.linesep
return line.rstrip() + line_ending
def _set_line(
lines,
content=None,
match=None,
mode=None,
location=None,
before=None,
after=None,
indent=True,
):
"""
Take ``lines`` and insert ``content`` and the correct place. If
``mode`` is ``'delete'`` then delete the ``content`` line instead.
Returns a list of modified lines.
lines
The original file lines to modify.
content
Content of the line. Allowed to be empty if ``mode='delete'``.
match
The regex or contents to seek for on the line.
mode
What to do with the matching line. One of the following options
is required:
- ensure
If ``content`` does not exist, it will be added.
- replace
If the line already exists, it will be replaced(???? TODO WHAT DOES THIS MEAN?)
- delete
Delete the line, if found.
- insert
Insert a line if it does not already exist.
.. note::
If ``mode=insert`` is used, at least one of the following
options must also be defined: ``location``, ``before``, or
``after``. If ``location`` is used, it takes precedence
over the other two options
location
``start`` or ``end``. Defines where to place the content in the
lines. **Note** this option is only used when ``mode='insert`` is
specified. If a location is passed in, it takes precedence over
both the ``before`` and ``after`` kwargs.
- start
Place the ``content`` at the beginning of the lines.
- end
Place the ``content`` at the end of the lines.
before
Regular expression or an exact, case-sensitive fragment of the
line to place the ``content`` before. This option is only used
when either ``ensure`` or ``insert`` mode is specified.
after
Regular expression or an exact, case-sensitive fragment of the
line to plaece the ``content`` after. This option is only used
when either ``ensure`` or ``insert`` mode is specified.
indent
Keep indentation to match the previous line. Ignored when
``mode='delete'`` is specified.
"""
if mode not in ("insert", "ensure", "delete", "replace"):
if mode is None:
raise CommandExecutionError(
"Mode was not defined. How to process the file?"
)
else:
raise CommandExecutionError("Unknown mode: {}".format(mode))
if mode != "delete" and content is None:
raise CommandExecutionError("Content can only be empty if mode is delete")
if not match and before is None and after is None:
match = content
after = _regex_to_static(lines, after)
before = _regex_to_static(lines, before)
match = _regex_to_static(lines, match)
if not lines and mode in ("delete", "replace"):
log.warning("Cannot find text to %s. File is empty.", mode)
lines = []
elif mode == "delete" and match:
lines = [line for line in lines if line != match[0]]
elif mode == "replace" and match:
idx = lines.index(match[0])
original_line = lines.pop(idx)
lines.insert(idx, _set_line_indent(original_line, content, indent))
elif mode == "insert":
if before is None and after is None and location is None:
raise CommandExecutionError(
'On insert either "location" or "before/after" conditions are'
" required.",
)
if location:
if location == "end":
if lines:
lines.append(_set_line_indent(lines[-1], content, indent))
else:
lines.append(content)
elif location == "start":
if lines:
lines.insert(0, _set_line_eol(lines[0], content))
else:
lines = [content + os.linesep]
else:
if before and after:
_assert_occurrence(before, "before")
_assert_occurrence(after, "after")
first = lines.index(after[0])
last = lines.index(before[0])
lines.insert(last, _set_line_indent(lines[last], content, indent))
elif after:
_assert_occurrence(after, "after")
idx = lines.index(after[0])
next_line = None if idx + 1 >= len(lines) else lines[idx + 1]
if next_line is None or next_line.rstrip("\r\n") != content.rstrip(
"\r\n"
):
lines.insert(idx + 1, _set_line_indent(lines[idx], content, indent))
elif before:
_assert_occurrence(before, "before")
idx = lines.index(before[0])
prev_line = lines[idx - 1]
if prev_line.rstrip("\r\n") != content.rstrip("\r\n"):
lines.insert(idx, _set_line_indent(lines[idx], content, indent))
else:
raise CommandExecutionError("Neither before or after was found in file")
elif mode == "ensure":
if before and after:
_assert_occurrence(after, "after")
_assert_occurrence(before, "before")
after_index = lines.index(after[0])
before_index = lines.index(before[0])
already_there = any(line.lstrip() == content for line in lines)
if not already_there:
if after_index + 1 == before_index:
lines.insert(
after_index + 1,
_set_line_indent(lines[after_index], content, indent),
)
elif after_index + 2 == before_index:
# TODO: This should change, it doesn't match existing
# behavior -W. Werner, 2019-06-28
lines[after_index + 1] = _set_line_indent(
lines[after_index], content, indent
)
else:
raise CommandExecutionError(
"Found more than one line between boundaries"
' "before" and "after".'
)
elif before:
_assert_occurrence(before, "before")
before_index = lines.index(before[0])
if before_index == 0 or lines[before_index - 1].rstrip(
"\r\n"
) != content.rstrip("\r\n"):
lines.insert(
before_index,
_set_line_indent(lines[before_index - 1], content, indent),
)
elif after:
_assert_occurrence(after, "after")
after_index = lines.index(after[0])
is_last_line = after_index + 1 >= len(lines)
if is_last_line or lines[after_index + 1].rstrip("\r\n") != content.rstrip(
"\r\n"
):
lines.insert(
after_index + 1,
_set_line_indent(lines[after_index], content, indent),
)
else:
raise CommandExecutionError(
"Wrong conditions? Unable to ensure line without knowing where"
" to put it before and/or after."
)
return lines
def line(
path,
content=None,
match=None,
mode=None,
location=None,
before=None,
after=None,
show_changes=True,
backup=False,
quiet=False,
indent=True,
):
# pylint: disable=W1401
"""
.. versionadded:: 2015.8.0
Line-focused editing of a file.
.. note::
``file.line`` exists for historic reasons, and is not
generally recommended. It has a lot of quirks. You may find
``file.replace`` to be more suitable.
``file.line`` is most useful if you have single lines in a file
(potentially a config file) that you would like to manage. It can
remove, add, and replace a single line at a time.
path
Filesystem path to the file to be edited.
content
Content of the line. Allowed to be empty if ``mode='delete'``.
match
Match the target line for an action by
a fragment of a string or regular expression.
If neither ``before`` nor ``after`` are provided, and ``match``
is also ``None``, match falls back to the ``content`` value.
mode
Defines how to edit a line. One of the following options is
required:
- ensure
If line does not exist, it will be added. If ``before``
and ``after`` are specified either zero lines, or lines
that contain the ``content`` line are allowed to be in between
``before`` and ``after``. If there are lines, and none of
them match then it will produce an error.
- replace
If line already exists, the entire line will be replaced.
- delete
Delete the line, if found.
- insert
Nearly identical to ``ensure``. If a line does not exist,
it will be added.
The differences are that multiple (and non-matching) lines are
alloweed between ``before`` and ``after``, if they are
specified. The line will always be inserted right before
``before``. ``insert`` also allows the use of ``location`` to
specify that the line should be added at the beginning or end of
the file.
.. note::
If ``mode='insert'`` is used, at least one of ``location``,
``before``, or ``after`` is required. If ``location`` is used,
``before`` and ``after`` are ignored.
location
In ``mode='insert'`` only, whether to place the ``content`` at the
beginning or end of a the file. If ``location`` is provided,
``before`` and ``after`` are ignored. Valid locations:
- start
Place the content at the beginning of the file.
- end
Place the content at the end of the file.
before
Regular expression or an exact case-sensitive fragment of the string.
Will be tried as **both** a regex **and** a part of the line. Must
match **exactly** one line in the file. This value is only used in
``ensure`` and ``insert`` modes. The ``content`` will be inserted just
before this line, with matching indentation unless ``indent=False``.
after
Regular expression or an exact case-sensitive fragment of the string.
Will be tried as **both** a regex **and** a part of the line. Must
match **exactly** one line in the file. This value is only used in
``ensure`` and ``insert`` modes. The ``content`` will be inserted
directly after this line, unless ``before`` is also provided. If
``before`` is not provided, indentation will match this line, unless
``indent=False``.
show_changes
Output a unified diff of the old file and the new file.
If ``False`` return a boolean if any changes were made.
Default is ``True``
.. note::
Using this option will store two copies of the file in-memory
(the original version and the edited version) in order to generate the diff.
backup
Create a backup of the original file with the extension:
"Year-Month-Day-Hour-Minutes-Seconds".
quiet
Do not raise any exceptions. E.g. ignore the fact that the file that is
tried to be edited does not exist and nothing really happened.
indent
Keep indentation with the previous line. This option is not considered when
the ``delete`` mode is specified. Default is ``True``
CLI Example:
.. code-block:: bash
salt '*' file.line /etc/nsswitch.conf "networks:\tfiles dns" after="hosts:.*?" mode='ensure'
.. note::
If an equal sign (``=``) appears in an argument to a Salt command, it is
interpreted as a keyword argument in the format of ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.line /path/to/file content="CREATEMAIL_SPOOL=no" match="CREATE_MAIL_SPOOL=yes" mode="replace"
**Examples:**
Here's a simple config file.
.. code-block:: ini
[some_config]
# Some config file
# this line will go away
here=False
away=True
goodybe=away
.. code-block:: bash
salt \\* file.line /some/file.conf mode=delete match=away
This will produce:
.. code-block:: ini
[some_config]
# Some config file
here=False
away=True
goodbye=away
If that command is executed 2 more times, this will be the result:
.. code-block:: ini
[some_config]
# Some config file
here=False
If we reset the file to its original state and run
.. code-block:: bash
salt \\* file.line /some/file.conf mode=replace match=away content=here
Three passes will this state will result in this file:
.. code-block:: ini
[some_config]
# Some config file
here
here=False
here
here
Each pass replacing the first line found.
Given this file:
.. code-block:: text
insert after me
something
insert before me
The following command
.. code-block:: bash
salt \\* file.line /some/file.txt mode=insert after="insert after me" before="insert before me" content=thrice
If that command is executed 3 times, the result will be:
.. code-block:: text
insert after me
something
thrice
thrice
thrice
insert before me
If the mode is ``ensure`` instead, it will fail each time. To succeed, we
need to remove the incorrect line between before and after:
.. code-block:: text
insert after me
insert before me
With an ensure mode, this will insert ``thrice`` the first time and
make no changes for subsequent calls. For something simple this is
fine, but if you have instead blocks like this:
.. code-block:: text
Begin SomeBlock
foo = bar
End
Begin AnotherBlock
another = value
End
And you try to use ensure this way:
.. code-block:: bash
salt \\* file.line /tmp/fun.txt mode="ensure" content="this = should be my content" after="Begin SomeBlock" before="End"
This will fail because there are multiple ``End`` lines. Without that
problem, it still would fail because there is a non-matching line,
``foo = bar``. Ensure **only** allows either zero, or the matching
line present to be present in between ``before`` and ``after``.
"""
# pylint: enable=W1401
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError(
'File "{}" does not exists or is not a file.'.format(path)
)
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ["insert", "ensure", "delete", "replace"]:
if mode is None:
raise CommandExecutionError(
"Mode was not defined. How to process the file?"
)
else:
raise CommandExecutionError('Unknown mode: "{}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
# it gets passed when needed. Feature #37092
empty_content_modes = ["delete"]
if mode not in empty_content_modes and content is None:
raise CommandExecutionError(
'Content can only be empty if mode is "{}"'.format(
", ".join(empty_content_modes)
)
)
del empty_content_modes
# Before/after has privilege. If nothing defined, match is used by content.
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode="r") as fp_:
body = salt.utils.data.decode_list(fp_.readlines())
body_before = hashlib.sha256(
salt.utils.stringutils.to_bytes("".join(body))
).hexdigest()
# Add empty line at the end if last line ends with eol.
# Allows simpler code
if body and _get_eol(body[-1]):
body.append("")
if os.stat(path).st_size == 0 and mode in ("delete", "replace"):
log.warning("Cannot find text to %s. File '%s' is empty.", mode, path)
body = []
body = _set_line(
lines=body,
content=content,
match=match,
mode=mode,
location=location,
before=before,
after=after,
indent=indent,
)
if body:
for idx, line in enumerate(body):
if not _get_eol(line) and idx + 1 < len(body):
prev = idx and idx - 1 or 1
body[idx] = _set_line_eol(body[prev], line)
# We do not need empty line at the end anymore
if "" == body[-1]:
body.pop()
changed = (
body_before
!= hashlib.sha256(salt.utils.stringutils.to_bytes("".join(body))).hexdigest()
)
if backup and changed and __opts__["test"] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(
temp_file,
"{}.{}".format(
path, time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
),
)
except OSError as exc:
raise CommandExecutionError(
"Unable to create the backup file of {}. Exception: {}".format(
path, exc
)
)
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, "r") as fp_:
path_content = salt.utils.data.decode_list(fp_.read().splitlines(True))
changes_diff = __utils__["stringutils.get_diff"](path_content, body)
if __opts__["test"] is False:
fh_ = None
try:
# Make sure we match the file mode from salt.utils.files.fopen
mode = "w"
body = salt.utils.data.decode_list(body, to_str=True)
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.writelines(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(
path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup=".bak",
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
"""
.. versionadded:: 0.17.0
Replace occurrences of a pattern in a file. If ``show_changes`` is
``True``, then a diff of what changed will be returned, otherwise a
``True`` will be returned when changes are made, and ``False`` when
no changes are made.
This is a pure Python implementation that wraps Python's :py:func:`~re.sub`.
path
Filesystem path to the file to be edited. If a symlink is specified, it
will be resolved to its target.
pattern
A regular expression, to be matched using Python's
:py:func:`~re.search`.
repl
The replacement text
count: 0
Maximum number of pattern occurrences to be replaced. If count is a
positive integer ``n``, only ``n`` occurrences will be replaced,
otherwise all occurrences will be replaced.
flags (list or int)
A list of flags defined in the ``re`` module documentation from the
Python standard library. Each list item should be a string that will
correlate to the human-friendly flag name. E.g., ``['IGNORECASE',
'MULTILINE']``. Optionally, ``flags`` may be an int, with a value
corresponding to the XOR (``|``) of all the desired flags. Defaults to
8 (which supports 'MULTILINE').
bufsize (int or str)
How much of the file to buffer into memory at once. The
default value ``1`` processes one line at a time. The special value
``file`` may be specified which will read the entire file into memory
before processing.
append_if_not_found: False
.. versionadded:: 2014.7.0
If set to ``True``, and pattern is not found, then the content will be
appended to the file.
prepend_if_not_found: False
.. versionadded:: 2014.7.0
If set to ``True`` and pattern is not found, then the content will be
prepended to the file.
not_found_content
.. versionadded:: 2014.7.0
Content to use for append/prepend if not found. If None (default), uses
``repl``. Useful when ``repl`` uses references to group in pattern.
backup: .bak
The file extension to use for a backup of the file before editing. Set
to ``False`` to skip making a backup.
dry_run: False
If set to ``True``, no changes will be made to the file, the function
will just return the changes that would have been made (or a
``True``/``False`` value if ``show_changes`` is set to ``False``).
search_only: False
If set to true, this no changes will be performed on the file, and this
function will simply return ``True`` if the pattern was matched, and
``False`` if not.
show_changes: True
If ``True``, return a diff of changes made. Otherwise, return ``True``
if changes were made, and ``False`` if not.
.. note::
Using this option will store two copies of the file in memory (the
original version and the edited version) in order to generate the
diff. This may not normally be a concern, but could impact
performance if used with large files.
ignore_if_missing: False
.. versionadded:: 2015.8.0
If set to ``True``, this function will simply return ``False``
if the file doesn't exist. Otherwise, an error will be thrown.
preserve_inode: True
.. versionadded:: 2015.8.0
Preserve the inode of the file, so that any hard links continue to
share the inode with the original filename. This works by *copying* the
file, reading from the copy, and writing to the file at the original
inode. If ``False``, the file will be *moved* rather than copied, and a
new file will be written to a new inode, but using the original
filename. Hard links will then share an inode with the backup, instead
(if using ``backup`` to create a backup copy).
backslash_literal: False
.. versionadded:: 2016.11.7
Interpret backslashes as literal backslashes for the repl and not
escape characters. This will help when using append/prepend so that
the backslashes are not interpreted for the repl on the second run of
the state.
If an equal sign (``=``) appears in an argument to a Salt command it is
interpreted as a keyword argument in the format ``key=val``. That
processing can be bypassed in order to pass an equal sign through to the
remote shell command by manually specifying the kwarg:
.. code-block:: bash
salt '*' file.replace /path/to/file pattern='=' repl=':'
salt '*' file.replace /path/to/file pattern="bind-address\\s*=" repl='bind-address:'
CLI Examples:
.. code-block:: bash
salt '*' file.replace /etc/httpd/httpd.conf pattern='LogLevel warn' repl='LogLevel info'
salt '*' file.replace /some/file pattern='before' repl='after' flags='[MULTILINE, IGNORECASE]'
"""
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError("File not found: {}".format(path))
if not __utils__["files.is_text"](path):
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
"search_only cannot be used with append/prepend_if_not_found"
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
"Only one of append and prepend_if_not_found is permitted"
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == "file":
bufsize = filesize
# Search the file; track if any changes have been made for the return val
has_changes = False
orig_file = [] # used for show_changes and change detection
new_file = [] # used for show_changes and change detection
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
# Avoid TypeErrors by forcing repl to be bytearray related to mmap
# Replacement text may contains integer: 123 for example
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = (
salt.utils.stringutils.to_unicode(not_found_content)
if not_found_content and (prepend_if_not_found or append_if_not_found)
else salt.utils.stringutils.to_unicode(repl)
)
try:
# First check the whole file, determine whether to make the replacement
# Searching first avoids modifying the time stamp if there are no changes
r_data = None
# Use a read-only handle to open the file
with salt.utils.files.fopen(path, mode="rb", buffering=bufsize) as r_file:
try:
# mmap throws a ValueError if the file is empty.
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ)
except (ValueError, OSError):
# size of file in /proc is 0, but contains data
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
# Just search; bail as early as a match is found
if re.search(cpattern, r_data):
return True # `with` block handles file closure
else:
return False
else:
result, nrepl = re.subn(
cpattern,
repl.replace("\\", "\\\\") if backslash_literal else repl,
r_data,
count,
)
# found anything? (even if no change)
if nrepl > 0:
found = True
# Identity check the potential change
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
# Search for content, to avoid pre/appending the
# content if it was pre/appended in a previous run.
if re.search(
salt.utils.stringutils.to_bytes(
"^{}($|(?=\r\n))".format(re.escape(content))
),
r_data,
flags=flags_num,
):
# Content was found, so set found.
found = True
orig_file = (
r_data.read(filesize).splitlines(True)
if isinstance(r_data, mmap.mmap)
else r_data.splitlines(True)
)
new_file = result.splitlines(True)
except OSError as exc:
raise CommandExecutionError(
"Unable to open file '{}'. Exception: {}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
# Write the replacement text in this block.
try:
# Create a copy to read from and to use as a backup later
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
r_data = None
try:
# Open the file in write mode
with salt.utils.files.fopen(path, mode="w", buffering=bufsize) as w_file:
try:
# Open the temp file in read mode
with salt.utils.files.fopen(
temp_file, mode="r", buffering=bufsize
) as r_file:
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ)
result, nrepl = re.subn(
cpattern,
repl.replace("\\", "\\\\") if backslash_literal else repl,
r_data,
count,
)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except OSError as exc:
raise CommandExecutionError(
"Unable to write file '{}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(
0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep)
)
else:
# append_if_not_found
# Make sure we have a newline at the end of the file
if 0 != len(new_file):
if not new_file[-1].endswith(
salt.utils.stringutils.to_bytes(os.linesep)
):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(
not_found_content + salt.utils.stringutils.to_bytes(os.linesep)
)
has_changes = True
if not dry_run:
try:
# Create a copy to read from and for later use as a backup
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, "wb")
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
# keep the backup only if it was requested
# and only if there were any changes
backup_name = "{}{}".format(path, backup)
try:
shutil.move(temp_file, backup_name)
except OSError as exc:
raise CommandExecutionError(
"Unable to move the temp file '{}' to the "
"backup file '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = "{}{}".format(given_path, backup)
target_backup = "{}{}".format(target_path, backup)
# Always clobber any existing symlink backup
# to match the behaviour of the 'backup' option
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except Exception: # pylint: disable=broad-except
raise CommandExecutionError(
"Unable create backup symlink '{}'. "
"Target was '{}'. "
"Exception: {}".format(symlink_backup, target_backup, exc)
)
elif temp_file:
try:
os.remove(temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to delete temp file '{}'. Exception: {}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
differences = __utils__["stringutils.get_diff"](orig_file, new_file)
if show_changes:
return differences
# We may have found a regex line match but don't need to change the line
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not differences:
has_changes = False
return has_changes
def blockreplace(
path,
marker_start="#-- start managed zone --",
marker_end="#-- end managed zone --",
content="",
append_if_not_found=False,
prepend_if_not_found=False,
backup=".bak",
dry_run=False,
show_changes=True,
append_newline=False,
insert_before_match=None,
insert_after_match=None,
):
"""
.. versionadded:: 2014.1.0
Replace content of a text block in a file, delimited by line markers
A block of content delimited by comments can help you manage several lines
entries without worrying about old entries removal.
.. note::
This function will store two copies of the file in-memory (the original
version and the edited version) in order to detect changes and only
edit the targeted file if necessary.
path
Filesystem path to the file to be edited
marker_start
The line content identifying a line as the start of the content block.
Note that the whole line containing this marker will be considered, so
whitespace or extra content before or after the marker is included in
final output
marker_end
The line content identifying the end of the content block. As of
versions 2017.7.5 and 2018.3.1, everything up to the text matching the
marker will be replaced, so it's important to ensure that your marker
includes the beginning of the text you wish to replace.
content
The content to be used between the two lines identified by marker_start
and marker_stop.
append_if_not_found: False
If markers are not found and set to ``True`` then, the markers and
content will be appended to the file.
prepend_if_not_found: False
If markers are not found and set to ``True`` then, the markers and
content will be prepended to the file.
insert_before_match
If markers are not found, this parameter can be set to a regex which will
insert the block before the first found occurrence in the file.
.. versionadded:: 3001
insert_after_match
If markers are not found, this parameter can be set to a regex which will
insert the block after the first found occurrence in the file.
.. versionadded:: 3001
backup
The file extension to use for a backup of the file if any edit is made.
Set to ``False`` to skip making a backup.
dry_run: False
If ``True``, do not make any edits to the file and simply return the
changes that *would* be made.
show_changes: True
Controls how changes are presented. If ``True``, this function will
return a unified diff of the changes made. If False, then it will
return a boolean (``True`` if any changes were made, otherwise
``False``).
append_newline: False
Controls whether or not a newline is appended to the content block. If
the value of this argument is ``True`` then a newline will be added to
the content block. If it is ``False``, then a newline will *not* be
added to the content block. If it is ``None`` then a newline will only
be added to the content block if it does not already end in a newline.
.. versionadded:: 2016.3.4
.. versionchanged:: 2017.7.5,2018.3.1
New behavior added when value is ``None``.
.. versionchanged:: 2019.2.0
The default value of this argument will change to ``None`` to match
the behavior of the :py:func:`file.blockreplace state
<salt.states.file.blockreplace>`
CLI Example:
.. code-block:: bash
salt '*' file.blockreplace /etc/hosts '#-- start managed zone foobar : DO NOT EDIT --' \\
'#-- end managed zone foobar --' $'10.0.1.1 foo.foobar\\n10.0.1.2 bar.foobar' True
"""
exclusive_params = [
append_if_not_found,
prepend_if_not_found,
bool(insert_before_match),
bool(insert_after_match),
]
if sum(exclusive_params) > 1:
raise SaltInvocationError(
"Only one of append_if_not_found, prepend_if_not_found,"
" insert_before_match, and insert_after_match is permitted"
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError("File not found: {}".format(path))
try:
file_encoding = __utils__["files.get_encoding"](path)
except CommandExecutionError:
file_encoding = None
if __utils__["files.is_binary"](path):
if not file_encoding:
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
if insert_before_match or insert_after_match:
if insert_before_match:
if not isinstance(insert_before_match, str):
raise CommandExecutionError(
"RegEx expected in insert_before_match parameter."
)
elif insert_after_match:
if not isinstance(insert_after_match, str):
raise CommandExecutionError(
"RegEx expected in insert_after_match parameter."
)
if append_newline is None and not content.endswith((os.linesep, "\n")):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split("\r\n"):
for content_line in win_line.split("\n"):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True, end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip("\r\n") + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
# We do not use in-place editing to avoid file attrs modifications when
# no changes are required and to avoid any file access on a partially
# written file.
try:
with salt.utils.files.fopen(
path, "r", encoding=file_encoding, newline=""
) as fi_file:
for line in fi_file:
write_line_to_new_file = True
if linesep is None:
# Auto-detect line separator
if line.endswith("\r\n"):
linesep = "\r\n"
elif line.endswith("\n"):
linesep = "\n"
else:
# No newline(s) in file, fall back to system's linesep
linesep = os.linesep
if marker_start in line:
# We've entered the content block
in_block = True
else:
if in_block:
# We're not going to write the lines from the old file to
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
_add_content(
linesep,
lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:],
)
# Save the line from the original file
orig_file.append(line)
if write_line_to_new_file:
new_file.append(line)
except OSError as exc:
raise CommandExecutionError("Failed to read from {}: {}".format(path, exc))
finally:
if linesep is None:
# If the file was empty, we will not have set linesep yet. Assume
# the system's line separator. This is needed for when we
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception: # pylint: disable=broad-except
pass
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
"Unterminated marked block. End of file reached before marker_end."
)
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
_add_content(linesep, lines=new_file)
block_found = True
elif insert_before_match or insert_after_match:
match_regex = insert_before_match or insert_after_match
match_idx = [
i for i, item in enumerate(orig_file) if re.search(match_regex, item)
]
if match_idx:
match_idx = match_idx[0]
for line in _add_content(linesep):
if insert_after_match:
match_idx += 1
new_file.insert(match_idx, line)
if insert_before_match:
match_idx += 1
block_found = True
else:
raise CommandExecutionError(
"Cannot edit marked block. Markers were not found in file."
)
if block_found:
diff = __utils__["stringutils.get_diff"](orig_file, new_file)
has_changes = diff != ""
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms["user"] = get_user(path)
perms["group"] = get_group(path)
perms["mode"] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = "{}{}".format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=backup_path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(
name=backup_path,
ret=None,
user=perms["user"],
group=perms["group"],
mode=perms["mode"],
)
if not block_found:
raise CommandExecutionError(
"Cannot edit marked block. Markers were not found in file."
)
diff = __utils__["stringutils.get_diff"](orig_file, new_file)
has_changes = diff != ""
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms["user"] = get_user(path)
perms["group"] = get_group(path)
perms["mode"] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = "{}{}".format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=backup_path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(
backup_path, None, perms["user"], perms["group"], perms["mode"]
)
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, "wb")
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()
# this may have overwritten file attrs
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(path, None, perms["user"], perms["group"], perms["mode"])
if show_changes:
return diff
return has_changes
def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline=False):
"""
.. versionadded:: 0.17.0
Search for occurrences of a pattern in a file
Except for multiline, params are identical to
:py:func:`~salt.modules.file.replace`.
multiline
If true, inserts 'MULTILINE' into ``flags`` and sets ``bufsize`` to
'file'.
.. versionadded:: 2015.8.0
CLI Example:
.. code-block:: bash
salt '*' file.search /etc/crontab 'mymaintenance.sh'
"""
if multiline:
flags = _add_flags(flags, "MULTILINE")
bufsize = "file"
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
# Any enhancements or fixes to one should affect the other.
return replace(
path,
pattern,
"",
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing,
)
def patch(originalfile, patchfile, options="", dry_run=False):
"""
.. versionadded:: 0.10.4
Apply a patch to a file or directory.
Equivalent to:
.. code-block:: bash
patch <options> -i <patchfile> <originalfile>
Or, when a directory is patched:
.. code-block:: bash
patch <options> -i <patchfile> -d <originalfile> -p0
originalfile
The full path to the file or directory to be patched
patchfile
A patch file to apply to ``originalfile``
options
Options to pass to patch.
CLI Example:
.. code-block:: bash
salt '*' file.patch /opt/file.txt /tmp/file.txt.patch
"""
patchpath = salt.utils.path.which("patch")
if not patchpath:
raise CommandExecutionError(
"patch executable not found. Is the distribution's patch package installed?"
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__["kernel"] in ("FreeBSD", "OpenBSD"):
cmd.append("-C")
else:
cmd.append("--dry-run")
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if "-N" not in cmd and "--forward" not in cmd:
cmd.append("--forward")
has_rejectfile_option = False
for option in cmd:
if (
option == "-r"
or option.startswith("-r ")
or option.startswith("--reject-file")
):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append("--reject-file=-")
cmd.extend(["-i", patchfile])
if os.path.isdir(originalfile):
cmd.extend(["-d", originalfile])
has_strip_option = False
for option in cmd:
if option.startswith("-p") or option.startswith("--strip="):
has_strip_option = True
break
if not has_strip_option:
cmd.append("--strip=0")
else:
cmd.append(originalfile)
return __salt__["cmd.run_all"](cmd, python_shell=False)
def contains(path, text):
"""
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the file at ``path`` contains ``text``
CLI Example:
.. code-block:: bash
salt '*' file.contains /etc/crontab 'mymaintenance.sh'
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except OSError:
return False
def contains_regex(path, regex, lchar=""):
"""
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return True if the given regular expression matches on any line in the text
of a given file.
If the lchar argument (leading char) is specified, it
will strip `lchar` from the left side of each line before trying to match
CLI Example:
.. code-block:: bash
salt '*' file.contains_regex /etc/crontab
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, "r") as target:
for line in target:
line = salt.utils.stringutils.to_unicode(line)
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except OSError:
return False
def contains_glob(path, glob_expr):
"""
.. deprecated:: 0.17.0
Use :func:`search` instead.
Return ``True`` if the given glob matches a string in the named file
CLI Example:
.. code-block:: bash
salt '*' file.contains_glob /etc/foobar '*cheese*'
"""
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except OSError:
return False
def append(path, *args, **kwargs):
"""
.. versionadded:: 0.9.5
Append text to the end of a file
path
path to file
`*args`
strings to append to file
CLI Example:
.. code-block:: bash
salt '*' file.append /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.append /etc/motd args='cheese=spam'
salt '*' file.append /etc/motd args="['cheese=spam','spam=cheese']"
"""
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
# Make sure we have a newline at the end of the file. Do this in binary
# mode so SEEK_END with nonzero offset will work.
with salt.utils.files.fopen(path, "rb+") as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except OSError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
# Empty file, simply append lines at the beginning of the file
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
# Append lines in text mode
with salt.utils.files.fopen(path, "a") as ofile:
for new_line in args:
ofile.write(
salt.utils.stringutils.to_str("{}{}".format(new_line, os.linesep))
)
return 'Wrote {} lines to "{}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
"""
.. versionadded:: 2014.7.0
Prepend text to the beginning of a file
path
path to file
`*args`
strings to prepend to the file
CLI Example:
.. code-block:: bash
salt '*' file.prepend /etc/motd \\
"With all thine offerings thou shalt offer salt." \\
"Salt is what makes things taste bad when it isn't in them."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.prepend /etc/motd args='cheese=spam'
salt '*' file.prepend /etc/motd args="['cheese=spam','spam=cheese']"
"""
path = os.path.expanduser(path)
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
try:
with salt.utils.files.fopen(path) as fhr:
contents = [
salt.utils.stringutils.to_unicode(line) for line in fhr.readlines()
]
except OSError:
contents = []
preface = []
for line in args:
preface.append("{}\n".format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(salt.utils.stringutils.to_str("".join(contents)))
return 'Prepended {} lines to "{}"'.format(len(args), path)
def write(path, *args, **kwargs):
"""
.. versionadded:: 2014.7.0
Write text to a file, overwriting any existing contents.
path
path to file
`*args`
strings to write to the file
CLI Example:
.. code-block:: bash
salt '*' file.write /etc/motd \\
"With all thine offerings thou shalt offer salt."
.. admonition:: Attention
If you need to pass a string to append and that string contains
an equal sign, you **must** include the argument name, args.
For example:
.. code-block:: bash
salt '*' file.write /etc/motd args='cheese=spam'
salt '*' file.write /etc/motd args="['cheese=spam','spam=cheese']"
"""
path = os.path.expanduser(path)
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
contents = []
for line in args:
contents.append("{}\n".format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(salt.utils.stringutils.to_str("".join(contents)))
return 'Wrote {} lines to "{}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
"""
.. versionadded:: 0.9.5
Just like the ``touch`` command, create a file if it doesn't exist or
simply update the atime and mtime if it already does.
atime:
Access time in Unix epoch time
mtime:
Last modification in Unix epoch time
CLI Example:
.. code-block:: bash
salt '*' file.touch /var/log/emptyfile
"""
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, "a"):
pass
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError("atime and mtime must be integers")
except OSError as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
"""
.. versionadded:: 2014.1.0
Seek to a position on a file and read it
path
path to file
seek
amount to read at once
offset
offset to start into the file
CLI Example:
.. code-block:: bash
salt '*' file.seek_read /path/to/file 4096 0
"""
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
"""
.. versionadded:: 2014.1.0
Seek to a position on a file and write to it
path
path to file
data
data to write to file
offset
position in file to start writing
CLI Example:
.. code-block:: bash
salt '*' file.seek_write /path/to/file 'some data' 4096
"""
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
"""
.. versionadded:: 2014.1.0
Seek to a position on a file and delete everything after that point
path
path to file
length
offset into file to truncate
CLI Example:
.. code-block:: bash
salt '*' file.truncate /path/to/file 512
"""
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, "rb+") as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
"""
.. versionadded:: 2014.1.0
Create a hard link to a file
CLI Example:
.. code-block:: bash
salt '*' file.link /path/to/file /path/to/link
"""
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
try:
os.link(src, path)
return True
except OSError as E:
raise CommandExecutionError("Could not create '{}': {}".format(path, E))
return False
def is_hardlink(path):
"""
Check if the path is a hard link by verifying that the number of links
is larger than 1
CLI Example:
.. code-block:: bash
salt '*' file.is_hardlink /path/to/link
"""
# Simply use lstat and count the st_nlink field to determine if this path
# is hardlinked to something.
res = lstat(os.path.expanduser(path))
return res and res["st_nlink"] > 1
def is_link(path):
"""
Check if the path is a symbolic link
CLI Example:
.. code-block:: bash
salt '*' file.is_link /path/to/link
"""
# This function exists because os.path.islink does not support Windows,
# therefore a custom function will need to be called. This function
# therefore helps API consistency by providing a single function to call for
# both operating systems.
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
"""
Create a symbolic link (symlink, soft link) to a file
CLI Example:
.. code-block:: bash
salt '*' file.symlink /path/to/file /path/to/link
"""
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug("link already in correct state: %s -> %s", path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
try:
os.symlink(src, path)
return True
except OSError:
raise CommandExecutionError("Could not create '{}'".format(path))
return False
def rename(src, dst):
"""
Rename a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.rename /path/to/src /path/to/dst
"""
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError("Could not rename '{}' to '{}'".format(src, dst))
return False
def copy(src, dst, recurse=False, remove_existing=False):
"""
Copy a file or directory from source to dst
In order to copy a directory, the recurse flag is required, and
will by default overwrite files in the destination with the same path,
and retain all other existing files. (similar to cp -r on unix)
remove_existing will remove all files in the target directory,
and then copy files from the source.
.. note::
The copy function accepts paths that are local to the Salt minion.
This function does not support salt://, http://, or the other
additional file paths that are supported by :mod:`states.file.managed
<salt.states.file.managed>` and :mod:`states.file.recurse
<salt.states.file.recurse>`.
CLI Example:
.. code-block:: bash
salt '*' file.copy /path/to/src /path/to/dst
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True
salt '*' file.copy /path/to/src_dir /path/to/dst_dir recurse=True remove_existing=True
"""
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
if not os.path.exists(src):
raise CommandExecutionError("No such file or directory '{}'".format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to"
" true!"
)
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError("Could not copy '{}' to '{}'".format(src, dst))
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
"""
.. versionadded:: 2014.1.0
Returns the lstat attributes for the given file or dir. Does not support
symbolic links.
CLI Example:
.. code-block:: bash
salt '*' file.lstat /path/to/file
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to file must be absolute.")
try:
lst = os.lstat(path)
return {
key: getattr(lst, key)
for key in (
"st_atime",
"st_ctime",
"st_gid",
"st_mode",
"st_mtime",
"st_nlink",
"st_size",
"st_uid",
)
}
except Exception: # pylint: disable=broad-except
return {}
def access(path, mode):
"""
.. versionadded:: 2014.1.0
Test whether the Salt process has the specified access to the file. One of
the following modes must be specified:
.. code-block:: text
f: Test the existence of the path
r: Test the readability of the path
w: Test the writability of the path
x: Test whether the path can be executed
CLI Example:
.. code-block:: bash
salt '*' file.access /path/to/file f
salt '*' file.access /path/to/file x
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to link must be absolute.")
modes = {"f": os.F_OK, "r": os.R_OK, "w": os.W_OK, "x": os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in modes.values():
return os.access(path, mode)
else:
raise SaltInvocationError("Invalid mode specified.")
def read(path, binary=False):
"""
.. versionadded:: 2017.7.0
Return the content of the file.
:param bool binary:
Whether to read and return binary data
CLI Example:
.. code-block:: bash
salt '*' file.read /path/to/file
"""
access_mode = "r"
if binary is True:
access_mode += "b"
with salt.utils.files.fopen(path, access_mode) as file_obj:
if binary is True:
return file_obj.read()
else:
return salt.utils.stringutils.to_unicode(file_obj.read())
def readlink(path, canonicalize=False):
"""
.. versionadded:: 2014.1.0
Return the path that a symlink points to
If canonicalize is set to True, then it return the final target
CLI Example:
.. code-block:: bash
salt '*' file.readlink /path/to/link
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to link must be absolute.")
if not os.path.islink(path):
raise SaltInvocationError("A valid link was not specified.")
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
"""
.. versionadded:: 2014.1.0
Return a list containing the contents of a directory
CLI Example:
.. code-block:: bash
salt '*' file.readdir /path/to/dir/
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Dir path must be absolute.")
if not os.path.isdir(path):
raise SaltInvocationError("A valid directory was not specified.")
dirents = [".", ".."]
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
"""
.. versionadded:: 2014.1.0
Perform a statvfs call against the filesystem that the file resides on
CLI Example:
.. code-block:: bash
salt '*' file.statvfs /path/to/file
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
try:
stv = os.statvfs(path)
return {
key: getattr(stv, key)
for key in (
"f_bavail",
"f_bfree",
"f_blocks",
"f_bsize",
"f_favail",
"f_ffree",
"f_files",
"f_flag",
"f_frsize",
"f_namemax",
)
}
except OSError:
raise CommandExecutionError("Could not statvfs '{}'".format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
"""
Return a dict containing the stats for a given file
CLI Example:
.. code-block:: bash
salt '*' file.stats /etc/passwd
"""
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
# Broken symlinks will return False for os.path.exists(), but still
# have a uid and gid
pstat = os.lstat(path)
except OSError:
# Not a broken symlink, just a nonexistent path
# NOTE: The file.directory state checks the content of the error
# message in this exception. Any changes made to the message for this
# exception will reflect the file.directory state as well, and will
# likely require changes there.
raise CommandExecutionError("Path not found: {}".format(path))
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret["inode"] = pstat.st_ino
ret["uid"] = pstat.st_uid
ret["gid"] = pstat.st_gid
ret["group"] = gid_to_group(pstat.st_gid)
ret["user"] = uid_to_user(pstat.st_uid)
ret["atime"] = pstat.st_atime
ret["mtime"] = pstat.st_mtime
ret["ctime"] = pstat.st_ctime
ret["size"] = pstat.st_size
ret["mode"] = salt.utils.files.normalize_mode(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret["sum"] = get_hash(path, hash_type)
ret["type"] = "file"
if stat.S_ISDIR(pstat.st_mode):
ret["type"] = "dir"
if stat.S_ISCHR(pstat.st_mode):
ret["type"] = "char"
if stat.S_ISBLK(pstat.st_mode):
ret["type"] = "block"
if stat.S_ISREG(pstat.st_mode):
ret["type"] = "file"
if stat.S_ISLNK(pstat.st_mode):
ret["type"] = "link"
if stat.S_ISFIFO(pstat.st_mode):
ret["type"] = "pipe"
if stat.S_ISSOCK(pstat.st_mode):
ret["type"] = "socket"
ret["target"] = os.path.realpath(path)
return ret
def rmdir(path):
"""
.. versionadded:: 2014.1.0
Remove the specified directory. Fails if a directory is not empty.
CLI Example:
.. code-block:: bash
salt '*' file.rmdir /tmp/foo/
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
if not os.path.isdir(path):
raise SaltInvocationError("A valid directory was not specified.")
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
"""
Remove the named file. If a directory is supplied, it will be recursively
deleted.
CLI Example:
.. code-block:: bash
salt '*' file.remove /tmp/foo
.. versionchanged:: 3000
The method now works on all types of file system entries, not just
files, directories and symlinks.
"""
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute: {}".format(path))
try:
if os.path.islink(path) or (os.path.exists(path) and not os.path.isdir(path)):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except OSError as exc:
raise CommandExecutionError("Could not remove '{}': {}".format(path, exc))
return False
def directory_exists(path):
"""
Tests to see if path is a valid directory. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.directory_exists /etc
"""
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
"""
Tests to see if path is a valid file. Returns True/False.
CLI Example:
.. code-block:: bash
salt '*' file.file_exists /etc/passwd
"""
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
"""
Tests to see if path after expansion is a valid path (file or directory).
Expansion allows usage of ? * and character ranges []. Tilde expansion
is not supported. Returns True/False.
.. versionadded:: 2014.7.0
CLI Example:
.. code-block:: bash
salt '*' file.path_exists_glob /etc/pam*/pass*
"""
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
"""
Reset the SELinux context on a given path
CLI Example:
.. code-block:: bash
salt '*' file.restorecon /home/user/.ssh/authorized_keys
"""
if recursive:
cmd = ["restorecon", "-FR", path]
else:
cmd = ["restorecon", "-F", path]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def get_selinux_context(path):
"""
Get an SELinux context from a given path
CLI Example:
.. code-block:: bash
salt '*' file.get_selinux_context /etc/hosts
"""
cmd_ret = __salt__["cmd.run_all"](["stat", "-c", "%C", path], python_shell=False)
if cmd_ret["retcode"] == 0:
ret = cmd_ret["stdout"]
else:
ret = "No selinux context information is available for {}".format(path)
return ret
def set_selinux_context(
path,
user=None,
role=None,
type=None, # pylint: disable=W0622
range=None, # pylint: disable=W0622
persist=False,
):
"""
.. versionchanged:: 3001
Added persist option
Set a specific SELinux label on a given path
CLI Example:
.. code-block:: bash
salt '*' file.set_selinux_context path <user> <role> <type> <range>
salt '*' file.set_selinux_context /etc/yum.repos.d/epel.repo system_u object_r system_conf_t s0
"""
if not any((user, role, type, range)):
return False
if persist:
fcontext_result = __salt__["selinux.fcontext_add_policy"](
path, sel_type=type, sel_user=user, sel_level=range
)
if fcontext_result.get("retcode", None) != 0:
# Problem setting fcontext policy
raise CommandExecutionError(
"Problem setting fcontext: {}".format(fcontext_result)
)
cmd = ["chcon"]
if user:
cmd.extend(["-u", user])
if role:
cmd.extend(["-r", role])
if type:
cmd.extend(["-t", type])
if range:
cmd.extend(["-l", range])
cmd.append(path)
ret = not __salt__["cmd.retcode"](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
"""
Check the source list and return the source to use
CLI Example:
.. code-block:: bash
salt '*' file.source_list salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' base
"""
contextkey = "{}_|-{}_|-{}".format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
# get the master file list
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__["cp.list_master"](saltenv)]
mdirs = [(d, saltenv) for d in __salt__["cp.list_master_dirs"](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__["cp.list_master"](senv)]
mdirs += [(d, senv) for d in __salt__["cp.list_master_dirs"](senv)]
ret = None
for single in source:
if isinstance(single, dict):
# check the proto, if it is http or ftp then download the file
# to check, if it is salt then check the master list
# if it is a local file, check if the file exists
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = urllib.parse.urlparse(single_src)
# Fix this for Windows
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = urllib.parse.urlparse(
"file://" + single_src
)
proto = urlparsed_single_src.scheme
if proto == "salt":
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith("http") or proto == "ftp":
query_res = salt.utils.http.query(
single_src, method="HEAD", decode_body=False
)
if "error" not in query_res:
ret = (single_src, single_hash)
break
elif proto == "file" and (
os.path.exists(urlparsed_single_src.netloc)
or os.path.exists(urlparsed_single_src.path)
or os.path.exists(
os.path.join(
urlparsed_single_src.netloc, urlparsed_single_src.path
)
)
):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, str):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = urllib.parse.urlparse(single)
if salt.utils.platform.is_windows():
# urlparse doesn't handle a local Windows path without the
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
# protocol and re-parse
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = urllib.parse.urlparse("file://" + single)
proto = urlparsed_src.scheme
if proto == "file" and (
os.path.exists(urlparsed_src.netloc)
or os.path.exists(urlparsed_src.path)
or os.path.exists(
os.path.join(urlparsed_src.netloc, urlparsed_src.path)
)
):
ret = (single, source_hash)
break
elif proto.startswith("http") or proto == "ftp":
query_res = salt.utils.http.query(
single, method="HEAD", decode_body=False
)
if "error" not in query_res:
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
# None of the list items matched
raise CommandExecutionError("none of the specified sources were found")
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(contents, template, context, defaults, saltenv):
"""
Return the contents after applying the templating engine
contents
template string
template
template format
context
Overrides default context variables passed to the template.
defaults
Default context passed to the template.
CLI Example:
.. code-block:: bash
salt '*' file.apply_template_on_contents \\
contents='This is a {{ template }} string.' \\
template=jinja \\
"context={}" "defaults={'template': 'cool'}" \\
saltenv=base
"""
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
# Apply templating
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__["grains"],
pillar=__pillar__,
salt=__salt__,
opts=__opts__,
)["data"]
if isinstance(contents, bytes):
# bytes -> str
contents = contents.decode("utf-8")
else:
ret = {}
ret["result"] = False
ret["comment"] = "Specified template format {} is not supported".format(
template
)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
verify_ssl=True,
**kwargs
):
"""
Return the managed file data for file.managed
name
location where the file lives on the server
template
template format
source
managed source file
source_hash
hash of the source file
source_hash_name
When ``source_hash`` refers to a remote file, this specifies the
filename to look for in that file.
.. versionadded:: 2016.3.5
user
Owner of file
group
Group owner of file
mode
Permissions of file
attrs
Attributes of file
.. versionadded:: 2018.3.0
context
Variables to add to the template context
defaults
Default values of for context_dict
skip_verify
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
verify_ssl
If ``False``, remote https file sources (``https://``) and source_hash
will not attempt to validate the servers certificate. Default is True.
.. versionadded:: 3002
CLI Example:
.. code-block:: bash
salt '*' file.get_managed /etc/httpd/conf.d/httpd.conf jinja salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' None root root '755' base None None
"""
# Copy the file to the minion and templatize it
sfn = ""
source_sum = {}
def _get_local_file_source_sum(path):
"""
DRY helper for getting the source_sum value from a locally cached
path.
"""
return {"hsum": get_hash(path, form="sha256"), "hash_type": "sha256"}
# If we have a source defined, let's figure out what the hash is
if source:
urlparsed_source = urllib.parse.urlparse(source)
if urlparsed_source.scheme in salt.utils.files.VALID_PROTOS:
parsed_scheme = urlparsed_source.scheme
else:
parsed_scheme = ""
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path
).rstrip(os.sep)
unix_local_source = parsed_scheme in ("file", "")
if parsed_scheme == "":
parsed_path = sfn = source
if not os.path.exists(sfn):
msg = "Local file source {} does not exist".format(sfn)
return "", {}, msg
elif parsed_scheme == "file":
sfn = parsed_path
if not os.path.exists(sfn):
msg = "Local file source {} does not exist".format(sfn)
return "", {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ":".join([parsed_scheme, parsed_path])
parsed_scheme = "file"
if parsed_scheme == "salt":
source_sum = __salt__["cp.hash_file"](source, saltenv)
if not source_sum:
return (
"",
{},
"Source file {} not found in saltenv '{}'".format(source, saltenv),
)
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(
name,
source,
source_hash,
source_hash_name,
saltenv,
verify_ssl=verify_ssl,
)
except CommandExecutionError as exc:
return "", {}, exc.strerror
else:
msg = (
"Unable to verify upstream hash of source file {}, "
"please set source_hash or set skip_verify to True".format(
salt.utils.url.redact_http_basic_auth(source)
)
)
return "", {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__["cp.is_cached"](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get("hash_type", "sha256")
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {"hsum": cached_sum, "hash_type": htype}
elif cached_sum != source_sum.get("hsum", __opts__["hash_type"]):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
# updated and the cache has to be refreshed, download the file.
if not sfn or cache_refetch:
try:
sfn = __salt__["cp.cache_file"](
source,
saltenv,
source_hash=source_sum.get("hsum"),
verify_ssl=verify_ssl,
)
except Exception as exc: # pylint: disable=broad-except
# A 404 or other error code may raise an exception, catch it
# and return a comment that will fail the calling state.
_source = salt.utils.url.redact_http_basic_auth(source)
return "", {}, "Failed to cache {}: {}".format(_source, exc)
# If cache failed, sfn will be False, so do a truth check on sfn first
# as invoking os.path.exists() on a bool raises a TypeError.
if not sfn or not os.path.exists(sfn):
_source = salt.utils.url.redact_http_basic_auth(source)
return sfn, {}, "Source file '{}' not found".format(_source)
if sfn == name:
raise SaltInvocationError("Source file cannot be the same as destination")
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__["grains"],
opts=__opts__,
**kwargs
)
else:
return (
sfn,
{},
"Specified template format {} is not supported".format(template),
)
if data["result"]:
sfn = data["data"]
hsum = get_hash(sfn, form="sha256")
source_sum = {"hash_type": "sha256", "hsum": hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data["data"]
return sfn, source_sum, ""
def extract_hash(
hash_fn, hash_type="sha256", file_name="", source="", source_hash_name=None
):
"""
.. versionchanged:: 2016.3.5
Prior to this version, only the ``file_name`` argument was considered
for filename matches in the hash file. This would be problematic for
cases in which the user was relying on a remote checksum file that they
do not control, and they wished to use a different name for that file
on the minion from the filename on the remote server (and in the
checksum file). For example, managing ``/tmp/myfile.tar.gz`` when the
remote file was at ``https://mydomain.tld/different_name.tar.gz``. The
:py:func:`file.managed <salt.states.file.managed>` state now also
passes this function the source URI as well as the ``source_hash_name``
(if specified). In cases where ``source_hash_name`` is specified, it
takes precedence over both the ``file_name`` and ``source``. When it is
not specified, ``file_name`` takes precedence over ``source``. This
allows for better capability for matching hashes.
.. versionchanged:: 2016.11.0
File name and source URI matches are no longer disregarded when
``source_hash_name`` is specified. They will be used as fallback
matches if there is no match to the ``source_hash_name`` value.
This routine is called from the :mod:`file.managed
<salt.states.file.managed>` state to pull a hash from a remote file.
Regular expressions are used line by line on the ``source_hash`` file, to
find a potential candidate of the indicated hash type. This avoids many
problems of arbitrary file layout rules. It specifically permits pulling
hash codes from debian ``*.dsc`` files.
If no exact match of a hash and filename are found, then the first hash
found (if any) will be returned. If no hashes at all are found, then
``None`` will be returned.
For example:
.. code-block:: yaml
openerp_7.0-latest-1.tar.gz:
file.managed:
- name: /tmp/openerp_7.0-20121227-075624-1_all.deb
- source: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.tar.gz
- source_hash: http://nightly.openerp.com/7.0/nightly/deb/openerp_7.0-20121227-075624-1.dsc
CLI Example:
.. code-block:: bash
salt '*' file.extract_hash /path/to/hash/file sha512 /etc/foo
"""
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
"file.extract_hash: Unsupported hash_type '%s', falling "
"back to matching any supported hash_type",
hash_type,
)
hash_type = ""
hash_len_expr = "{},{}".format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r"\/*"
if source_hash_name:
if not isinstance(source_hash_name, str):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
"file.extract_hash: Extracting %s hash for file matching "
"source_hash_name '%s'",
"any supported" if not hash_type else hash_type,
source_hash_name,
)
if file_name:
if not isinstance(file_name, str):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, str):
source = str(source)
urlparsed_source = urllib.parse.urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
"file.extract_hash: %s %s hash for file matching%s: %s",
"If no source_hash_name match found, will extract"
if source_hash_name
else "Extracting",
"any supported" if not hash_type else hash_type,
"" if len(basename_searches) == 1 else " either of the following",
", ".join(basename_searches),
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line.strip())
hash_re = r"(?i)(?<![a-z0-9])([a-f0-9]{" + hash_len_expr + "})(?![a-z0-9])"
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# There was a match, but it's not of the correct length
# to match one of the supported hash types.
matched = None
else:
matched = {"hsum": matched_hsum, "hash_type": matched_type}
if matched is None:
log.debug(
"file.extract_hash: In line '%s', no %shash found",
line,
"" if not hash_type else hash_type + " ",
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
"file.extract_hash: Line '%s' matches %s '%s'",
line,
match_type,
value,
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(
found,
line,
"source_hash_name",
source_hash_name,
matched,
)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r"\s+", line):
_add_to_matches(
found, line, "source_hash_name", source_hash_name, matched
)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(
found, line, "file_name", file_name, matched
)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r"\s+", line):
_add_to_matches(found, line, "file_name", file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, "source", source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r"\s+", line):
_add_to_matches(found, line, "source", source, matched)
hash_matched = True
if not hash_matched:
log.debug(
"file.extract_hash: Line '%s' contains %s hash "
"'%s', but line did not meet the search criteria",
line,
matched["hash_type"],
matched["hsum"],
)
for found_type, found_str in (
("source_hash_name", source_hash_name),
("file_name", file_name),
("source", source),
):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
"file.extract_hash: Multiple %s matches for %s: %s",
found_type,
found_str,
", ".join(
[
"{} ({})".format(x["hsum"], x["hash_type"])
for x in found[found_type]
]
),
)
ret = found[found_type][0]
log.debug(
"file.extract_hash: Returning %s hash '%s' as a match of %s",
ret["hash_type"],
ret["hsum"],
found_str,
)
return ret
if partial:
log.debug(
"file.extract_hash: Returning the partially identified %s hash '%s'",
partial["hash_type"],
partial["hsum"],
)
return partial
log.debug("file.extract_hash: No matches, returning None")
return None
def check_perms(
name,
ret,
user,
group,
mode,
attrs=None,
follow_symlinks=False,
seuser=None,
serole=None,
setype=None,
serange=None,
):
"""
.. versionchanged:: 3001
Added selinux options
Check the permissions on files, modify attributes and chown if needed. File
attributes are only verified if lsattr(1) is installed.
CLI Example:
.. code-block:: bash
salt '*' file.check_perms /etc/sudoers '{}' root root 400 ai
.. versionchanged:: 2014.1.3
``follow_symlinks`` option added
"""
name = os.path.expanduser(name)
if not ret:
ret = {"name": name, "changes": {}, "comment": [], "result": True}
orig_comment = ""
else:
orig_comment = ret["comment"]
ret["comment"] = []
# Check permissions
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
perms["luser"] = cur["user"]
perms["lgroup"] = cur["group"]
perms["lmode"] = salt.utils.files.normalize_mode(cur["mode"])
is_dir = os.path.isdir(name)
is_link = os.path.islink(name)
# user/group changes if needed, then check if it worked
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user) != user_to_uid(perms["luser"])
) or (not salt.utils.platform.is_windows() and user != perms["luser"]):
perms["cuser"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group) != group_to_gid(perms["lgroup"])
) or (not salt.utils.platform.is_windows() and group != perms["lgroup"]):
perms["cgroup"] = group
if "cuser" in perms or "cgroup" in perms:
if not __opts__["test"]:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms["luser"]
if group is None:
group = perms["lgroup"]
try:
chown_func(name, user, group)
# Python os.chown() does reset the suid and sgid,
# that's why setting the right mode again is needed here.
set_mode(name, mode)
except OSError:
ret["result"] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user)
!= user_to_uid(get_user(name, follow_symlinks=follow_symlinks))
and user != ""
) or (
not salt.utils.platform.is_windows()
and user != get_user(name, follow_symlinks=follow_symlinks)
and user != ""
):
if __opts__["test"] is True:
ret["changes"]["user"] = user
else:
ret["result"] = False
ret["comment"].append("Failed to change user to {}".format(user))
elif "cuser" in perms and user != "":
ret["changes"]["user"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group)
!= group_to_gid(get_group(name, follow_symlinks=follow_symlinks))
and user != ""
) or (
not salt.utils.platform.is_windows()
and group != get_group(name, follow_symlinks=follow_symlinks)
and user != ""
):
if __opts__["test"] is True:
ret["changes"]["group"] = group
else:
ret["result"] = False
ret["comment"].append("Failed to change group to {}".format(group))
elif "cgroup" in perms and user != "":
ret["changes"]["group"] = group
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms["lmode"]:
if __opts__["test"] is True:
ret["changes"]["mode"] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret["result"] = False
ret["comment"].append(
"Failed to change mode to {}".format(mode)
)
else:
ret["changes"]["mode"] = mode
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs and any(attr for attr in diff_attrs):
changes = {
"old": "".join(lsattr(name)[name]),
"new": None,
}
if __opts__["test"] is True:
changes["new"] = attrs
else:
if diff_attrs.added:
chattr(
name,
operator="add",
attributes=diff_attrs.added,
)
if diff_attrs.removed:
chattr(
name,
operator="remove",
attributes=diff_attrs.removed,
)
cmp_attrs = _cmp_attrs(name, attrs)
if any(attr for attr in cmp_attrs):
ret["result"] = False
ret["comment"].append(
"Failed to change attributes to {}".format(attrs)
)
changes["new"] = "".join(lsattr(name)[name])
else:
changes["new"] = attrs
if changes["old"] != changes["new"]:
ret["changes"]["attrs"] = changes
# Set selinux attributes if needed
if salt.utils.platform.is_linux() and (seuser or serole or setype or serange):
selinux_error = False
try:
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = get_selinux_context(name).split(":")
log.debug(
"Current selinux context user:%s role:%s type:%s range:%s",
current_seuser,
current_serole,
current_setype,
current_serange,
)
except ValueError:
log.error("Unable to get current selinux attributes")
ret["result"] = False
ret["comment"].append("Failed to get selinux attributes")
selinux_error = True
if not selinux_error:
requested_seuser = None
requested_serole = None
requested_setype = None
requested_serange = None
# Only set new selinux variables if updates are needed
if seuser and seuser != current_seuser:
requested_seuser = seuser
if serole and serole != current_serole:
requested_serole = serole
if setype and setype != current_setype:
requested_setype = setype
if serange and serange != current_serange:
requested_serange = serange
if (
requested_seuser
or requested_serole
or requested_setype
or requested_serange
):
# selinux updates needed, prep changes output
selinux_change_new = ""
selinux_change_orig = ""
if requested_seuser:
selinux_change_new += "User: {} ".format(requested_seuser)
selinux_change_orig += "User: {} ".format(current_seuser)
if requested_serole:
selinux_change_new += "Role: {} ".format(requested_serole)
selinux_change_orig += "Role: {} ".format(current_serole)
if requested_setype:
selinux_change_new += "Type: {} ".format(requested_setype)
selinux_change_orig += "Type: {} ".format(current_setype)
if requested_serange:
selinux_change_new += "Range: {} ".format(requested_serange)
selinux_change_orig += "Range: {} ".format(current_serange)
if __opts__["test"]:
ret["comment"] = "File {} selinux context to be updated".format(
name
)
ret["result"] = None
ret["changes"]["selinux"] = {
"Old": selinux_change_orig.strip(),
"New": selinux_change_new.strip(),
}
else:
try:
# set_selinux_context requires type to be set on any other change
if (
requested_seuser or requested_serole or requested_serange
) and not requested_setype:
requested_setype = current_setype
result = set_selinux_context(
name,
user=requested_seuser,
role=requested_serole,
type=requested_setype,
range=requested_serange,
persist=True,
)
log.debug("selinux set result: %s", result)
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = result.split(":")
except ValueError:
log.error("Unable to set current selinux attributes")
ret["result"] = False
ret["comment"].append("Failed to set selinux attributes")
selinux_error = True
if not selinux_error:
ret["comment"].append(
"The file {} is set to be changed".format(name)
)
if requested_seuser:
if current_seuser != requested_seuser:
ret["comment"].append("Unable to update seuser context")
ret["result"] = False
if requested_serole:
if current_serole != requested_serole:
ret["comment"].append("Unable to update serole context")
ret["result"] = False
if requested_setype:
if current_setype != requested_setype:
ret["comment"].append("Unable to update setype context")
ret["result"] = False
if requested_serange:
if current_serange != requested_serange:
ret["comment"].append(
"Unable to update serange context"
)
ret["result"] = False
ret["changes"]["selinux"] = {
"Old": selinux_change_orig.strip(),
"New": selinux_change_new.strip(),
}
# Only combine the comment list into a string
# after all comments are added above
if isinstance(orig_comment, str):
if orig_comment:
ret["comment"].insert(0, orig_comment)
ret["comment"] = "; ".join(ret["comment"])
# Set result to None at the very end of the function,
# after all changes have been recorded above
if __opts__["test"] is True and ret["changes"]:
ret["result"] = None
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs
):
"""
Check to see what changes need to be made for a file
CLI Example:
.. code-block:: bash
salt '*' file.check_managed /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
"""
# If the source is a list then find which file exists
source, source_hash = source_list(
source, source_hash, saltenv # pylint: disable=W0633
)
sfn = ""
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs
)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
# Ignore permission for files written temporary directories
# Files in any path will still be set correctly using get_managed()
if name.startswith(tempfile.gettempdir()):
for key in ["user", "group", "mode"]:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ["The following values are set to be changed:\n"]
comments.extend("{}: {}\n".format(key, val) for key, val in changes.items())
return None, "".join(comments)
return True, "The file {} is in the correct state".format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
**kwargs
):
"""
Return a dictionary of what changes need to be made for a file
.. versionchanged:: 3001
selinux attributes added
verify_ssl
If ``False``, remote https file sources (``https://``) and source_hash
will not attempt to validate the servers certificate. Default is True.
.. versionadded:: 3002
CLI Example:
.. code-block:: bash
salt '*' file.check_managed_changes /etc/httpd/conf.d/httpd.conf salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root, root, '755' jinja True None None base
"""
# If the source is a list then find which file exists
source, source_hash = source_list(
source, source_hash, saltenv # pylint: disable=W0633
)
sfn = ""
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
verify_ssl=verify_ssl,
**kwargs
)
# Ensure that user-provided hash string is lowercase
if source_sum and ("hsum" in source_sum):
source_sum["hsum"] = source_sum["hsum"].lower()
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if (
urllib.parse.urlparse(source).scheme
in (
"salt",
"file",
)
or source.startswith("/")
):
try:
mode = __salt__["cp.stat_file"](source, saltenv=saltenv, octal=True)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to stat %s: %s", sfn, exc)
changes = check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None,
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
):
"""
Check for the changes in the file metadata.
CLI Example:
.. code-block:: bash
salt '*' file.check_file_meta /etc/httpd/conf.d/httpd.conf None salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' None base
.. note::
Supported hash types include sha512, sha384, sha256, sha224, sha1, and
md5.
name
Path to file destination
sfn
Template-processed source file contents
source
URL to file source
source_sum
File checksum information as a dictionary
.. code-block:: yaml
{hash_type: md5, hsum: <md5sum>}
user
Destination file user owner
group
Destination file group owner
mode
Destination file permissions mode
attrs
Destination file attributes
.. versionadded:: 2018.3.0
saltenv
Salt environment used to resolve source files
contents
File contents
seuser
selinux user attribute
.. versionadded:: 3001
serole
selinux role attribute
.. versionadded:: 3001
setype
selinux type attribute
.. versionadded:: 3001
serange
selinux range attribute
.. versionadded:: 3001
verify_ssl
If ``False``, remote https file sources (``https://``)
will not attempt to validate the servers certificate. Default is True.
.. versionadded:: 3002
"""
changes = {}
if not source_sum:
source_sum = dict()
try:
lstats = stats(
name, hash_type=source_sum.get("hash_type", None), follow_symlinks=False
)
except CommandExecutionError:
lstats = {}
if not lstats:
changes["newfile"] = name
return changes
if "hsum" in source_sum:
if source_sum["hsum"] != lstats["sum"]:
if not sfn and source:
sfn = __salt__["cp.cache_file"](
source,
saltenv,
source_hash=source_sum["hsum"],
verify_ssl=verify_ssl,
)
if sfn:
try:
changes["diff"] = get_diff(
name, sfn, template=True, show_filenames=False
)
except CommandExecutionError as exc:
changes["diff"] = exc.strerror
else:
changes["sum"] = "Checksum differs"
if contents is not None:
# Write a tempfile with the static contents
if isinstance(contents, bytes):
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=False
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
tmp_.write(contents)
else:
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
with salt.utils.files.fopen(tmp, "w") as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Compare the static contents with the named file
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error("Failed to diff files: %s", exc)
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__["config.option"]("obfuscate_templates"):
changes["diff"] = "<Obfuscated Template>"
else:
changes["diff"] = differences
if not salt.utils.platform.is_windows():
# Check owner
if user is not None and user != lstats["user"] and user != lstats["uid"]:
changes["user"] = user
# Check group
if group is not None and group != lstats["group"] and group != lstats["gid"]:
changes["group"] = group
# Normalize the file mode
smode = salt.utils.files.normalize_mode(lstats["mode"])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes["mode"] = mode
if attrs:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs is not None:
if attrs is not None and (
diff_attrs[0] is not None or diff_attrs[1] is not None
):
changes["attrs"] = attrs
# Check selinux
if seuser or serole or setype or serange:
try:
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = get_selinux_context(name).split(":")
log.debug(
"Current selinux context user:%s role:%s type:%s range:%s",
current_seuser,
current_serole,
current_setype,
current_serange,
)
except ValueError as exc:
log.error("Unable to get current selinux attributes")
changes["selinux"] = exc.strerror
if seuser and seuser != current_seuser:
changes["selinux"] = {"user": seuser}
if serole and serole != current_serole:
changes["selinux"] = {"role": serole}
if setype and setype != current_setype:
changes["selinux"] = {"type": setype}
if serange and serange != current_serange:
changes["selinux"] = {"range": serange}
return changes
def get_diff(
file1,
file2,
saltenv="base",
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None,
):
"""
Return unified diff of two files
file1
The first file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases,
thuis had to be a file local to the minion.
file2
The second file to feed into the diff utility
.. versionchanged:: 2018.3.0
Can now be either a local or remote file. In earlier releases, this
had to be a file on the salt fileserver (i.e.
``salt://somefile.txt``)
show_filenames: True
Set to ``False`` to hide the filenames in the top two lines of the
diff.
show_changes: True
If set to ``False``, and there are differences, then instead of a diff
a simple message stating that show_changes is set to ``False`` will be
returned.
template: False
Set to ``True`` if two templates are being compared. This is not useful
except for within states, with the ``obfuscate_templates`` option set
to ``True``.
.. versionadded:: 2018.3.0
source_hash_file1
If ``file1`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
source_hash_file2
If ``file2`` is an http(s)/ftp URL and the file exists in the minion's
file cache, this option can be passed to keep the minion from
re-downloading the archive if the cached copy matches the specified
hash.
.. versionadded:: 2018.3.0
CLI Examples:
.. code-block:: bash
salt '*' file.get_diff /home/fred/.vimrc salt://users/fred/.vimrc
salt '*' file.get_diff /tmp/foo.txt /tmp/bar.txt
"""
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__["cp.cache_file"](
filename, saltenv, source_hash=source_hash
)
if cached_path is False:
errors.append(
"File {} not found".format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError("Failed to cache one or more files", info=errors)
args = []
for filename in paths:
try:
with salt.utils.files.fopen(filename, "rb") as fp_:
args.append(fp_.readlines())
except OSError as exc:
raise CommandExecutionError(
"Failed to read {}: {}".format(
salt.utils.stringutils.to_unicode(filename), exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__["config.option"]("obfuscate_templates"):
ret = "<Obfuscated Template>"
elif not show_changes:
ret = "<show_changes=False>"
else:
bdiff = _binary_replace(*paths) # pylint: disable=no-value-for-parameter
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(paths)
ret = __utils__["stringutils.get_diff"](*args)
return ret
return ""
def manage_file(
name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors="strict",
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
**kwargs
):
"""
Checks the destination against what was retrieved with get_managed and
makes the appropriate modifications (if necessary).
name
location to place the file
sfn
location of cached file on the minion
This is the path to the file stored on the minion. This file is placed
on the minion using cp.cache_file. If the hash sum of that file
matches the source_sum, we do not transfer the file to the minion
again.
This file is then grabbed and if it has template set, it renders the
file to be placed into the correct place on the system using
salt.files.utils.copyfile()
ret
The initial state return data structure. Pass in ``None`` to use the
default structure.
source
file reference on the master
source_sum
sum hash for source
user
user owner
group
group owner
backup
backup_mode
attrs
attributes to be set on file: '' means remove all of them
.. versionadded:: 2018.3.0
makedirs
make directories if they do not exist
template
format of templating
show_changes
Include diff in state return
contents:
contents to be placed in the file
dir_mode
mode for directories created with makedirs
skip_verify: False
If ``True``, hash verification of remote file sources (``http://``,
``https://``, ``ftp://``) will be skipped, and the ``source_hash``
argument will be ignored.
.. versionadded:: 2016.3.0
keep_mode: False
If ``True``, and the ``source`` is a file from the Salt fileserver (or
a local file on the minion), the mode of the destination file will be
set to the mode of the source file.
.. note:: keep_mode does not work with salt-ssh.
As a consequence of how the files are transferred to the minion, and
the inability to connect back to the master with salt-ssh, salt is
unable to stat the file as it exists on the fileserver and thus
cannot mirror the mode on the salt-ssh minion
encoding
If specified, then the specified encoding will be used. Otherwise, the
file will be encoded using the system locale (usually UTF-8). See
https://docs.python.org/3/library/codecs.html#standard-encodings for
the list of available encodings.
.. versionadded:: 2017.7.0
encoding_errors: 'strict'
Default is ```'strict'```.
See https://docs.python.org/2/library/codecs.html#codec-base-classes
for the error handling schemes.
.. versionadded:: 2017.7.0
seuser
selinux user attribute
.. versionadded:: 3001
serange
selinux range attribute
.. versionadded:: 3001
setype
selinux type attribute
.. versionadded:: 3001
serange
selinux range attribute
.. versionadded:: 3001
verify_ssl
If ``False``, remote https file sources (``https://``)
will not attempt to validate the servers certificate. Default is True.
.. versionadded:: 3002
CLI Example:
.. code-block:: bash
salt '*' file.manage_file /etc/httpd/conf.d/httpd.conf '' '{}' salt://http/httpd.conf '{hash_type: 'md5', 'hsum': <md5sum>}' root root '755' '' base ''
.. versionchanged:: 2014.7.0
``follow_symlinks`` option added
"""
name = os.path.expanduser(name)
if not ret:
ret = {"name": name, "changes": {}, "comment": "", "result": True}
# Ensure that user-provided hash string is lowercase
if source_sum and ("hsum" in source_sum):
source_sum["hsum"] = source_sum["hsum"].lower()
if source:
if not sfn:
# File is not present, cache it
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
htype = source_sum.get("hash_type", __opts__["hash_type"])
# Recalculate source sum now that file has been cached
source_sum = {"hash_type": htype, "hsum": get_hash(sfn, form=htype)}
if keep_mode:
if urllib.parse.urlparse(source).scheme in ("salt", "file", ""):
try:
mode = __salt__["cp.stat_file"](source, saltenv=saltenv, octal=True)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to stat %s: %s", sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(
real_name, source_sum.get("hash_type", __opts__["hash_type"])
)
else:
name_sum = None
# Check if file needs to be replaced
if source and (
name_sum is None
or source_sum.get("hsum", __opts__["hash_type"]) != name_sum
):
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({}). If the 'source_hash' value "
"refers to a remote file with multiple possible "
"matches, then it may be necessary to set "
"'source_hash_name'.".format(
source_sum["hash_type"], source, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__["config.option"]("obfuscate_templates"):
ret["changes"]["diff"] = "<Obfuscated Template>"
elif not show_changes:
ret["changes"]["diff"] = "<show_changes=False>"
else:
try:
ret["changes"]["diff"] = get_diff(
real_name, sfn, show_filenames=False
)
except CommandExecutionError as exc:
ret["changes"]["diff"] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(
sfn,
real_name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(sfn)
return _error(ret, "Failed to commit change: {}".format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
log.debug("File will be encoded with %s", encoding)
tmp_.write(
contents.encode(encoding=encoding, errors=encoding_errors)
)
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
try:
differences = get_diff(
real_name,
tmp,
show_filenames=False,
show_changes=show_changes,
template=True,
)
except CommandExecutionError as exc:
ret.setdefault("warnings", []).append(
"Failed to detect changes to file: {}".format(exc.strerror)
)
differences = ""
if differences:
ret["changes"]["diff"] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(
tmp,
real_name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(tmp)
return _error(ret, "Failed to commit change: {}".format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({})".format(
source_sum["hash_type"], name, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
try:
salt.utils.files.copyfile(
sfn,
name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(sfn)
return _error(ret, "Failed to commit change: {}".format(io_error))
ret["changes"]["diff"] = "Replace symbolic link with regular file"
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(
name,
ret,
user,
group,
mode,
attrs,
follow_symlinks,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
if ret["changes"]:
ret["comment"] = "File {} updated".format(salt.utils.data.decode(name))
elif not ret["changes"] and ret["result"]:
ret["comment"] = "File {} is in the correct state".format(
salt.utils.data.decode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret, "{} drive not present".format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
# listed via a shell.
mode_list = [x for x in str(mode)][-3:]
for idx, part in enumerate(mode_list):
if part != "0":
mode_list[idx] = str(int(part) | 1)
dir_mode = "".join(mode_list)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
makedirs_(
path=name,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
# pylint: enable=E1120,E1121,E1123
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
# Apply the new file
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({})".format(
source_sum["hash_type"], name, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
# It is a new file, set the diff accordingly
ret["changes"]["diff"] = "New file"
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret["changes"].pop("diff", None)
return _error(ret, "Parent directory not present")
else: # source != True
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
# No changes actually made
ret["changes"].pop("diff", None)
return _error(ret, "Parent directory not present")
# Create the file, user rw-only if mode will be set to prevent
# a small security race problem before the permissions are set
with salt.utils.files.set_umask(0o077 if mode else None):
# Create a new file when test is False and source is None
if contents is None:
if not __opts__["test"]:
if touch(name):
ret["changes"]["new"] = "file {} created".format(name)
ret["comment"] = "Empty file"
else:
return _error(ret, "Empty file {} not created".format(name))
else:
if not __opts__["test"]:
if touch(name):
ret["changes"]["diff"] = "New file"
else:
return _error(ret, "File {} not created".format(name))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
log.debug("File will be encoded with %s", encoding)
tmp_.write(
contents.encode(encoding=encoding, errors=encoding_errors)
)
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
# Copy into place
salt.utils.files.copyfile(
tmp, name, __salt__["config.backup_mode"](backup), __opts__["cachedir"]
)
__clean_tmp(tmp)
# Now copy the file contents if there is a source file
elif sfn:
salt.utils.files.copyfile(
sfn, name, __salt__["config.backup_mode"](backup), __opts__["cachedir"]
)
__clean_tmp(sfn)
# This is a new file, if no mode specified, use the umask to figure
# out what mode to use for the new file.
if mode is None and not salt.utils.platform.is_windows():
# Get current umask
mask = salt.utils.files.get_umask()
# Calculate the mode value that results from the umask
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(
name,
ret,
user,
group,
mode,
attrs,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
if not ret["comment"]:
ret["comment"] = "File " + name + " updated"
if __opts__["test"]:
ret["comment"] = "File " + name + " not updated"
elif not ret["changes"] and ret["result"]:
ret["comment"] = "File " + name + " is in the correct state"
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path, user=None, group=None, mode=None):
"""
Ensure that a directory is available.
CLI Example:
.. code-block:: bash
salt '*' file.mkdir /opt/jetty/context
"""
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
# If a caller such as managed() is invoked with makedirs=True, make
# sure that any created dirs are created with the same user and group
# to follow the principal of least surprise method.
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path, user=None, group=None, mode=None):
"""
Ensure that the directory containing this path is available.
.. note::
The path must end with a trailing slash otherwise the directory/directories
will be created up to the parent directory. For example if path is
``/opt/code``, then it would be treated as ``/opt/`` but if the path
ends with a trailing slash like ``/opt/code/``, then it would be
treated as ``/opt/code/``.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs /opt/code/
"""
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
# walk up the directory structure until we find the first existing
# directory
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
# There's nothing for us to do
msg = "Directory '{}' already exists".format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = "The path '{}' already exists and is not a directory".format(dirname)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
"Recursive creation for path '{}' would result in an "
"infinite loop. Please use an absolute path.".format(dirname)
)
# create parent directories from the topmost to the most deeply nested one
directories_to_create.reverse()
for directory_to_create in directories_to_create:
# all directories have the user, group and mode set!!
log.debug("Creating directory: %s", directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name, user=None, group=None, mode="0755"):
"""
Taken and modified from os.makedirs to set user, group and mode for each
directory created.
CLI Example:
.. code-block:: bash
salt '*' file.makedirs_perms /opt/code
"""
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
# be happy if someone already created the path
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name)
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
def get_devmm(name):
"""
Get major/minor info from a device
CLI Example:
.. code-block:: bash
salt '*' file.get_devmm /dev/chr
"""
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (os.major(stat_structure.st_rdev), os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
"""
Check if a file exists and is a character device.
CLI Example:
.. code-block:: bash
salt '*' file.is_chrdev /dev/chr
"""
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the character device does not exist in the first place
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name, major, minor, user=None, group=None, mode="0660"):
"""
.. versionadded:: 0.17.0
Create a character device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_chrdev /dev/chr 180 31
"""
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug(
"Creating character device name:%s major:%s minor:%s mode:%s",
name,
major,
minor,
mode,
)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Character device {} created.".format(name)}
ret["result"] = None
else:
if (
os.mknod(
name,
int(str(mode).lstrip("0Oo"), 8) | stat.S_IFCHR,
os.makedev(major, minor),
)
is None
):
ret["changes"] = {"new": "Character device {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created character device
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def is_blkdev(name):
"""
Check if a file exists and is a block device.
CLI Example:
.. code-block:: bash
salt '*' file.is_blkdev /dev/blk
"""
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the block device does not exist in the first place
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name, major, minor, user=None, group=None, mode="0660"):
"""
.. versionadded:: 0.17.0
Create a block device.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_blkdev /dev/blk 8 999
"""
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug(
"Creating block device name:%s major:%s minor:%s mode:%s",
name,
major,
minor,
mode,
)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Block device {} created.".format(name)}
ret["result"] = None
else:
if (
os.mknod(
name,
int(str(mode).lstrip("0Oo"), 8) | stat.S_IFBLK,
os.makedev(major, minor),
)
is None
):
ret["changes"] = {"new": "Block device {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created block device
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def is_fifo(name):
"""
Check if a file exists and is a FIFO.
CLI Example:
.. code-block:: bash
salt '*' file.is_fifo /dev/fifo
"""
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name, user=None, group=None, mode="0660"):
"""
.. versionadded:: 0.17.0
Create a FIFO pipe.
CLI Example:
.. code-block:: bash
salt '*' file.mknod_fifo /dev/fifo
"""
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug("Creating FIFO name: %s", name)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Fifo pipe {} created.".format(name)}
ret["result"] = None
else:
if os.mkfifo(name, int(str(mode).lstrip("0Oo"), 8)) is None:
ret["changes"] = {"new": "Fifo pipe {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created fifo
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode="0600"):
"""
.. versionadded:: 0.17.0
Create a block device, character device, or fifo pipe.
Identical to the gnu mknod.
CLI Examples:
.. code-block:: bash
salt '*' file.mknod /dev/chr c 180 31
salt '*' file.mknod /dev/blk b 8 999
salt '*' file.nknod /dev/fifo p
"""
ret = False
makedirs_(name, user, group)
if ntype == "c":
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == "b":
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == "p":
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
"Node type unavailable: '{}'. Available node types are "
"character ('c'), block ('b'), and pipe ('p').".format(ntype)
)
return ret
def list_backups(path, limit=None):
"""
.. versionadded:: 0.17.0
Lists the previous versions of a file backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The path on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups /foo/bar/baz.txt
"""
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error("file.list_backups: 'limit' value must be numeric")
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(":", "_")
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [
x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))
]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = "{}_%a_%b_%d_%H-%M-%S_%f_%Y".format(basename)
else:
strpfmt = "{}_%a_%b_%d_%H:%M:%S_%f_%Y".format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = "%a %b %d %Y %H-%M-%S.%f"
else:
str_format = "%a %b %d %Y %H:%M:%S.%f"
files.setdefault(timestamp, {})["Backup Time"] = timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]["Size"] = os.stat(location).st_size
files[timestamp]["Location"] = location
return dict(
list(
zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]],
)
)
)
list_backup = salt.utils.functools.alias_function(list_backups, "list_backup")
def list_backups_dir(path, limit=None):
"""
Lists the previous versions of a directory backed up using Salt's :ref:`file
state backup <file-state-backups>` system.
path
The directory on the minion to check for backups
limit
Limit the number of results to the most recent N backups
CLI Example:
.. code-block:: bash
salt '*' file.list_backups_dir /foo/bar/baz/
"""
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error("file.list_backups_dir: 'limit' value must be numeric")
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
# Figure out full path of location of backup folder in minion cache
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = {
i: len(list(n))
for i, n in itertools.groupby(
[x.split("_")[0] for x in sorted(os.listdir(bkdir))]
)
}
ff = os.listdir(bkdir)
for i, n in f.items():
ssfile = {}
for x in sorted(ff):
basename = x.split("_")[0]
if i == basename:
strpfmt = "{}_%a_%b_%d_%H:%M:%S_%f_%Y".format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
# Folder didn't match the strp format string, so it's not a backup
# for this folder. Move on to the next one.
continue
ssfile.setdefault(timestamp, {})["Backup Time"] = timestamp.strftime(
"%a %b %d %Y %H:%M:%S.%f"
)
location = os.path.join(bkdir, x)
ssfile[timestamp]["Size"] = os.stat(location).st_size
ssfile[timestamp]["Location"] = location
sfiles = dict(
list(
zip(
list(range(n)),
[ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]],
)
)
)
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
"""
.. versionadded:: 0.17.0
Restore a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to restore, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.restore_backup /foo/bar/baz.txt 0
"""
path = os.path.expanduser(path)
# Note: This only supports minion backups, so this function will need to be
# modified if/when master backups are implemented.
ret = {"result": False, "comment": "Invalid backup_id '{}'".format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret["comment"] = "backup_id '{}' does not exist for {}".format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup["Location"], path)
except OSError as exc:
ret["comment"] = "Unable to restore {} to {}: {}".format(
backup["Location"], path, exc
)
return ret
else:
ret["result"] = True
ret["comment"] = "Successfully restored {} to {}".format(
backup["Location"], path
)
# Try to set proper ownership
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except OSError:
ret["comment"] += ", but was unable to set ownership"
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
"""
.. versionadded:: 0.17.0
Delete a previous version of a file that was backed up using Salt's
:ref:`file state backup <file-state-backups>` system.
path
The path on the minion to check for backups
backup_id
The numeric id for the backup you wish to delete, as found using
:mod:`file.list_backups <salt.modules.file.list_backups>`
CLI Example:
.. code-block:: bash
salt '*' file.delete_backup /var/cache/salt/minion/file_backup/home/foo/bar/baz.txt 0
"""
path = os.path.expanduser(path)
ret = {"result": False, "comment": "Invalid backup_id '{}'".format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret["comment"] = "backup_id '{}' does not exist for {}".format(backup_id, path)
return ret
try:
os.remove(backup["Location"])
except OSError as exc:
ret["comment"] = "Unable to remove {}: {}".format(backup["Location"], exc)
else:
ret["result"] = True
ret["comment"] = "Successfully removed {}".format(backup["Location"])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, "remove_backup")
def grep(path, pattern, *opts):
"""
Grep for a string in the specified file
.. note::
This function's return value is slated for refinement in future
versions of Salt
path
Path to the file to be searched
.. note::
Globbing is supported (i.e. ``/var/log/foo/*.log``, but if globbing
is being used then the path should be quoted to keep the shell from
attempting to expand the glob expression.
pattern
Pattern to match. For example: ``test``, or ``a[0-5]``
opts
Additional command-line flags to pass to the grep command. For example:
``-v``, or ``-i -B2``
.. note::
The options should come after a double-dash (as shown in the
examples below) to keep Salt's own argument parser from
interpreting them.
CLI Example:
.. code-block:: bash
salt '*' file.grep /etc/passwd nobody
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i
salt '*' file.grep /etc/sysconfig/network-scripts/ifcfg-eth0 ipaddr -- -i -B2
salt '*' file.grep "/etc/sysconfig/network-scripts/*" ipaddr -- -i -l
"""
path = os.path.expanduser(path)
# Backup the path in case the glob returns nothing
_path = path
path = glob.glob(path)
# If the list is empty no files exist
# so we revert back to the original path
# so the result is an error.
if not path:
path = _path
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
"Passing multiple command line arguments in a single string "
"is not supported, please pass the following arguments "
"separately: {}".format(opt)
)
split_opts.extend(split)
if isinstance(path, list):
cmd = ["grep"] + split_opts + [pattern] + path
else:
cmd = ["grep"] + split_opts + [pattern, path]
try:
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
except OSError as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
"""
Return a list of all physical open files on the system.
CLI Examples:
.. code-block:: bash
salt '*' file.open_files
salt '*' file.open_files by_pid=True
"""
# First we collect valid PIDs
pids = {}
procfs = os.listdir("/proc/")
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
# Not a valid PID, move on
pass
# Then we look at the open files for each PID
files = {}
for pid in pids:
ppath = "/proc/{}".format(pid)
try:
tids = os.listdir("{}/task".format(ppath))
except OSError:
continue
# Collect the names of all of the file descriptors
fd_ = []
# try:
# fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid)))
# except Exception: # pylint: disable=broad-except
# pass
for fpath in os.listdir("{}/fd".format(ppath)):
fd_.append("{}/fd/{}".format(ppath, fpath))
for tid in tids:
try:
fd_.append(os.path.realpath("{}/task/{}/exe".format(ppath, tid)))
except OSError:
continue
for tpath in os.listdir("{}/task/{}/fd".format(ppath, tid)):
fd_.append("{}/task/{}/fd/{}".format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
# Loop through file descriptors and return useful data for each file
for fdpath in fd_:
# Sometimes PIDs and TIDs disappear before we can query them
try:
name = os.path.realpath(fdpath)
# Running stat on the file cuts out all of the sockets and
# deleted files from the list
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
# We still want to know which PIDs are using each file
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
"""
Return the relative parent directory path symbol for underlying OS
.. versionadded:: 2014.7.0
This can be useful when constructing Salt Formulas.
.. code-block:: jinja
{% set pardir = salt['file.pardir']() %}
{% set final_path = salt['file.join']('subdir', pardir, 'confdir') %}
CLI Example:
.. code-block:: bash
salt '*' file.pardir
"""
return os.path.pardir
def normpath(path):
"""
Returns Normalize path, eliminating double slashes, etc.
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.normpath'](tpldir + '/../vars.jinja') import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.normpath 'a/b/c/..'
"""
return os.path.normpath(path)
def basename(path):
"""
Returns the final component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- set filename = salt['file.basename'](source_file) %}
CLI Example:
.. code-block:: bash
salt '*' file.basename 'test/test.config'
"""
return os.path.basename(path)
def dirname(path):
"""
Returns the directory component of a pathname
.. versionadded:: 2015.5.0
This can be useful at the CLI but is frequently useful when scripting.
.. code-block:: jinja
{%- from salt['file.dirname'](tpldir) + '/vars.jinja' import parent_vars %}
CLI Example:
.. code-block:: bash
salt '*' file.dirname 'test/path/filename.config'
"""
return os.path.dirname(path)
def join(*args):
"""
Return a normalized file system path for the underlying OS
.. versionadded:: 2014.7.0
This can be useful at the CLI but is frequently useful when scripting
combining path variables:
.. code-block:: jinja
{% set www_root = '/var' %}
{% set app_dir = 'myapp' %}
myapp_config:
file:
- managed
- name: {{ salt['file.join'](www_root, app_dir, 'config.yaml') }}
CLI Example:
.. code-block:: bash
salt '*' file.join '/' 'usr' 'local' 'bin'
"""
return os.path.join(*args)
def move(src, dst):
"""
Move a file or directory
CLI Example:
.. code-block:: bash
salt '*' file.move /path/to/src /path/to/dst
"""
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("Source path must be absolute.")
if not os.path.isabs(dst):
raise SaltInvocationError("Destination path must be absolute.")
ret = {
"result": True,
"comment": "'{}' moved to '{}'".format(src, dst),
}
try:
shutil.move(src, dst)
except OSError as exc:
raise CommandExecutionError(
"Unable to move '{}' to '{}': {}".format(src, dst, exc)
)
return ret
def diskusage(path):
"""
Recursively calculate disk usage of path and return it
in bytes
CLI Example:
.. code-block:: bash
salt '*' file.diskusage /path/to/check
"""
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in salt.utils.path.os_walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| 31.522921
| 178
| 0.560072
|
import datetime
import errno
import fnmatch
import glob
import hashlib
import itertools
import logging
import mmap
import operator
import os
import re
import shutil
import stat
import string
import sys
import tempfile
import time
import urllib.parse
from collections import namedtuple
from collections.abc import Iterable, Mapping
from functools import reduce
import salt.utils.args
import salt.utils.atomicfile
import salt.utils.data
import salt.utils.filebuffer
import salt.utils.files
import salt.utils.find
import salt.utils.functools
import salt.utils.hashutils
import salt.utils.http
import salt.utils.itertools
import salt.utils.path
import salt.utils.platform
import salt.utils.stringutils
import salt.utils.templates
import salt.utils.url
import salt.utils.user
import salt.utils.versions
from salt.exceptions import CommandExecutionError, MinionError, SaltInvocationError
from salt.exceptions import get_error_message as _get_error_message
from salt.utils.files import HASHES, HASHES_REVMAP
try:
import grp
import pwd
except ImportError:
pass
log = logging.getLogger(__name__)
__func_alias__ = {"makedirs_": "makedirs"}
AttrChanges = namedtuple("AttrChanges", "added,removed")
def __virtual__():
if salt.utils.platform.is_windows():
return (
False,
"The file execution module cannot be loaded: only available on "
"non-Windows systems - use win_file instead.",
)
return True
def __clean_tmp(sfn):
if sfn.startswith(
os.path.join(tempfile.gettempdir(), salt.utils.files.TEMPFILE_PREFIX)
):
all_roots = itertools.chain.from_iterable(__opts__["file_roots"].values())
in_roots = any(sfn.startswith(root) for root in all_roots)
# Only clean up files that exist
if os.path.exists(sfn) and not in_roots:
os.remove(sfn)
def _error(ret, err_msg):
ret["result"] = False
ret["comment"] = err_msg
return ret
def _binary_replace(old, new):
old_isbin = not __utils__["files.is_text"](old)
new_isbin = not __utils__["files.is_text"](new)
if any((old_isbin, new_isbin)):
if all((old_isbin, new_isbin)):
return "Replace binary file"
elif old_isbin:
return "Replace binary file with text file"
elif new_isbin:
return "Replace text file with binary file"
return ""
def _get_bkroot():
# Get the cachedir from the minion config
return os.path.join(__salt__["config.get"]("cachedir"), "file_backup")
def _splitlines_preserving_trailing_newline(str):
lines = str.splitlines()
if str.endswith("\n") or str.endswith("\r"):
lines.append("")
return lines
def _chattr_version():
# There's no really *good* way to get the version of chattr installed.
# from the package manager, but there's no guarantee that it was
tune2fs = salt.utils.path.which("tune2fs")
if not tune2fs or salt.utils.platform.is_aix():
return None
cmd = [tune2fs]
result = __salt__["cmd.run"](cmd, ignore_retcode=True, python_shell=False)
match = re.search(
r"tune2fs (?P<version>[0-9\.]+)",
salt.utils.stringutils.to_str(result),
)
if match is None:
version = None
else:
version = match.group("version")
return version
def _chattr_has_extended_attrs():
ver = _chattr_version()
if ver is None:
return False
needed_version = salt.utils.versions.LooseVersion("1.41.12")
chattr_version = salt.utils.versions.LooseVersion(ver)
return chattr_version > needed_version
def gid_to_group(gid):
try:
gid = int(gid)
except ValueError:
gid = group_to_gid(gid)
if gid == "":
# Don't even bother to feed it to grp
return ""
try:
return grp.getgrgid(gid).gr_name
except (KeyError, NameError):
return gid
def group_to_gid(group):
if group is None:
return ""
try:
if isinstance(group, int):
return group
return grp.getgrnam(group).gr_gid
except KeyError:
return ""
def get_gid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"gid", -1
)
def get_group(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"group", False
)
def uid_to_user(uid):
try:
return pwd.getpwuid(uid).pw_name
except (KeyError, NameError):
return uid
def user_to_uid(user):
if user is None:
user = salt.utils.user.get_user()
try:
if isinstance(user, int):
return user
return pwd.getpwnam(user).pw_uid
except KeyError:
return ""
def get_uid(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"uid", -1
)
def get_user(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"user", False
)
def get_mode(path, follow_symlinks=True):
return stats(os.path.expanduser(path), follow_symlinks=follow_symlinks).get(
"mode", ""
)
def set_mode(path, mode):
path = os.path.expanduser(path)
mode = str(mode).lstrip("0Oo")
if not mode:
mode = "0"
if not os.path.exists(path):
raise CommandExecutionError("{}: File not found".format(path))
try:
os.chmod(path, int(mode, 8))
except Exception:
return "Invalid Mode " + mode
return get_mode(path)
def lchown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ""
if uid == "":
if user:
err += "User does not exist\n"
else:
uid = -1
if gid == "":
if group:
err += "Group does not exist\n"
else:
gid = -1
return os.lchown(path, uid, gid)
def chown(path, user, group):
path = os.path.expanduser(path)
uid = user_to_uid(user)
gid = group_to_gid(group)
err = ""
if uid == "":
if user:
err += "User does not exist\n"
else:
uid = -1
if gid == "":
if group:
err += "Group does not exist\n"
else:
gid = -1
if not os.path.exists(path):
try:
return os.lchown(path, uid, gid)
except OSError:
pass
err += "File not found"
if err:
return err
return os.chown(path, uid, gid)
def chgrp(path, group):
path = os.path.expanduser(path)
user = get_user(path)
return chown(path, user, group)
def _cmp_attrs(path, attrs):
if salt.utils.platform.is_aix():
return None
try:
lattrs = lsattr(path).get(path, "")
except AttributeError:
return None
new = set(attrs)
old = set(lattrs)
if "e" in old:
new.add("e")
return AttrChanges(
added="".join(new - old) or None,
removed="".join(old - new) or None,
)
def lsattr(path):
if not salt.utils.path.which("lsattr") or salt.utils.platform.is_aix():
return None
if not os.path.exists(path):
raise SaltInvocationError("File or directory does not exist: " + path)
cmd = ["lsattr", path]
result = __salt__["cmd.run"](cmd, ignore_retcode=True, python_shell=False)
results = {}
for line in result.splitlines():
if not line.startswith("lsattr: "):
attrs, file = line.split(None, 1)
if _chattr_has_extended_attrs():
pattern = r"[aAcCdDeijPsStTu]"
else:
pattern = r"[acdijstuADST]"
results[file] = re.findall(pattern, attrs)
return results
def chattr(*files, **kwargs):
operator = kwargs.pop("operator", None)
attributes = kwargs.pop("attributes", None)
flags = kwargs.pop("flags", None)
version = kwargs.pop("version", None)
if (operator is None) or (operator not in ("add", "remove")):
raise SaltInvocationError(
"Need an operator: 'add' or 'remove' to modify attributes."
)
if attributes is None:
raise SaltInvocationError("Need attributes: [aAcCdDeijPsStTu]")
cmd = ["chattr"]
if operator == "add":
attrs = "+{}".format(attributes)
elif operator == "remove":
attrs = "-{}".format(attributes)
cmd.append(attrs)
if flags is not None:
cmd.append("-{}".format(flags))
if version is not None:
cmd.extend(["-v", version])
cmd.extend(files)
result = __salt__["cmd.run"](cmd, python_shell=False)
if bool(result):
return False
return True
def get_sum(path, form="sha256"):
path = os.path.expanduser(path)
if not os.path.isfile(path):
return "File not found"
return salt.utils.hashutils.get_hash(path, form, 4096)
def get_hash(path, form="sha256", chunk_size=65536):
return salt.utils.hashutils.get_hash(os.path.expanduser(path), form, chunk_size)
def get_source_sum(
file_name="",
source="",
source_hash=None,
source_hash_name=None,
saltenv="base",
verify_ssl=True,
):
def _invalid_source_hash_format():
raise CommandExecutionError(
"Source hash {} format is invalid. The supported formats are: "
"1) a hash, 2) an expression in the format <hash_type>=<hash>, or "
"3) either a path to a local file containing hashes, or a URI of "
"a remote hash file. Supported protocols for remote hash files "
"are: {}. The hash may also not be of a valid length, the "
"following are supported hash types and lengths: {}.".format(
source_hash,
", ".join(salt.utils.files.VALID_PROTOS),
", ".join(
[
"{} ({})".format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)
]
),
)
)
hash_fn = None
if os.path.isabs(source_hash):
hash_fn = source_hash
else:
try:
proto = urllib.parse.urlparse(source_hash).scheme
if proto in salt.utils.files.VALID_PROTOS:
hash_fn = __salt__["cp.cache_file"](
source_hash, saltenv, verify_ssl=verify_ssl
)
if not hash_fn:
raise CommandExecutionError(
"Source hash file {} not found".format(source_hash)
)
else:
if proto != "":
# (like md5=<md5 checksum here>), but in those cases, the
# protocol will be an empty string, in which case we avoid
# this error condition.
_invalid_source_hash_format()
except (AttributeError, TypeError):
_invalid_source_hash_format()
if hash_fn is not None:
ret = extract_hash(hash_fn, "", file_name, source, source_hash_name)
if ret is None:
_invalid_source_hash_format()
ret["hsum"] = ret["hsum"].lower()
return ret
else:
# The source_hash is a hash expression
ret = {}
try:
ret["hash_type"], ret["hsum"] = [
x.strip() for x in source_hash.split("=", 1)
]
except AttributeError:
_invalid_source_hash_format()
except ValueError:
# No hash type, try to figure out by hash length
if not re.match("^[{}]+$".format(string.hexdigits), source_hash):
_invalid_source_hash_format()
ret["hsum"] = source_hash
source_hash_len = len(source_hash)
if source_hash_len in HASHES_REVMAP:
ret["hash_type"] = HASHES_REVMAP[source_hash_len]
else:
_invalid_source_hash_format()
if ret["hash_type"] not in HASHES:
raise CommandExecutionError(
"Invalid hash type '{}'. Supported hash types are: {}. "
"Either remove the hash type and simply use '{}' as the "
"source_hash, or change the hash type to a supported type.".format(
ret["hash_type"], ", ".join(HASHES), ret["hsum"]
)
)
else:
hsum_len = len(ret["hsum"])
if hsum_len not in HASHES_REVMAP:
_invalid_source_hash_format()
elif hsum_len != HASHES[ret["hash_type"]]:
raise CommandExecutionError(
"Invalid length ({}) for hash type '{}'. Either "
"remove the hash type and simply use '{}' as the "
"source_hash, or change the hash type to '{}'".format(
hsum_len,
ret["hash_type"],
ret["hsum"],
HASHES_REVMAP[hsum_len],
)
)
ret["hsum"] = ret["hsum"].lower()
return ret
def check_hash(path, file_hash):
path = os.path.expanduser(path)
if not isinstance(file_hash, str):
raise SaltInvocationError("hash must be a string")
for sep in (":", "="):
if sep in file_hash:
hash_type, hash_value = file_hash.split(sep, 1)
break
else:
hash_value = file_hash
hash_len = len(file_hash)
hash_type = HASHES_REVMAP.get(hash_len)
if hash_type is None:
raise SaltInvocationError(
"Hash {} (length: {}) could not be matched to a supported "
"hash type. The supported hash types and lengths are: "
"{}".format(
file_hash,
hash_len,
", ".join(
[
"{} ({})".format(HASHES_REVMAP[x], x)
for x in sorted(HASHES_REVMAP)
]
),
)
)
return get_hash(path, hash_type) == hash_value
def find(path, *args, **kwargs):
if "delete" in args:
kwargs["delete"] = "f"
elif "print" in args:
kwargs["print"] = "path"
try:
finder = salt.utils.find.Finder(kwargs)
except ValueError as ex:
return "error: {}".format(ex)
ret = [
item
for i in [finder.find(p) for p in glob.glob(os.path.expanduser(path))]
for item in i
]
ret.sort()
return ret
def _sed_esc(string, escape_all=False):
special_chars = "^.[$()|*+?{"
string = string.replace("'", "'\"'\"'").replace("/", "\\/")
if escape_all is True:
for char in special_chars:
string = string.replace(char, "\\" + char)
return string
def sed(
path,
before,
after,
limit="",
backup=".bak",
options="-r -e",
flags="g",
escape_all=False,
negate_match=False,
):
# Largely inspired by Fabric's contrib.files.sed()
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
after = _sed_esc(after, escape_all)
limit = _sed_esc(limit, escape_all)
if sys.platform == "darwin":
options = options.replace("-r", "-E")
cmd = ["sed"]
cmd.append("-i{}".format(backup) if backup else "-i")
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r"{limit}{negate_match}s/{before}/{after}/{flags}".format(
limit="/{}/ ".format(limit) if limit else "",
negate_match="!" if negate_match else "",
before=before,
after=after,
flags=flags,
)
)
cmd.append(path)
return __salt__["cmd.run_all"](cmd, python_shell=False)
def sed_contains(path, text, limit="", flags="g"):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
before = _sed_esc(str(text), False)
limit = _sed_esc(str(limit), False)
options = "-n -r -e"
if sys.platform == "darwin":
options = options.replace("-r", "-E")
cmd = ["sed"]
cmd.extend(salt.utils.args.shlex_split(options))
cmd.append(
r"{limit}s/{before}/$/{flags}".format(
limit="/{}/ ".format(limit) if limit else "",
before=before,
flags="p{}".format(flags),
)
)
cmd.append(path)
result = __salt__["cmd.run"](cmd, python_shell=False)
return bool(result)
def psed(
path,
before,
after,
limit="",
backup=".bak",
flags="gMS",
escape_all=False,
multi=False,
):
# Largely inspired by Fabric's contrib.files.sed()
path = os.path.expanduser(path)
multi = bool(multi)
before = str(before)
after = str(after)
before = _sed_esc(before, escape_all)
limit = _sed_esc(limit, escape_all)
shutil.copy2(path, "{}{}".format(path, backup))
with salt.utils.files.fopen(path, "w") as ofile:
with salt.utils.files.fopen("{}{}".format(path, backup), "r") as ifile:
if multi is True:
for line in ifile.readline():
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(line),
before,
after,
limit,
flags,
)
)
)
else:
ofile.write(
salt.utils.stringutils.to_str(
_psed(
salt.utils.stringutils.to_unicode(ifile.read()),
before,
after,
limit,
flags,
)
)
)
RE_FLAG_TABLE = {"I": re.I, "L": re.L, "M": re.M, "S": re.S, "U": re.U, "X": re.X}
def _psed(text, before, after, limit, flags):
atext = text
if limit:
limit = re.compile(limit)
comps = text.split(limit)
atext = "".join(comps[1:])
count = 1
if "g" in flags:
count = 0
flags = flags.replace("g", "")
aflags = 0
for flag in flags:
aflags |= RE_FLAG_TABLE[flag]
before = re.compile(before, flags=aflags)
text = re.sub(before, after, atext, count=count)
return text
def uncomment(path, regex, char="#", backup=".bak"):
return comment_line(path=path, regex=regex, char=char, cmnt=False, backup=backup)
def comment(path, regex, char="#", backup=".bak"):
return comment_line(path=path, regex=regex, char=char, cmnt=True, backup=backup)
def comment_line(path, regex, char="#", cmnt=True, backup=".bak"):
if cmnt:
regex = "{}({}){}".format(
"^" if regex.startswith("^") else "",
regex.lstrip("^").rstrip("$"),
"$" if regex.endswith("$") else "",
)
else:
regex = r"^{}\s*({}){}".format(
char, regex.lstrip("^").rstrip("$"), "$" if regex.endswith("$") else ""
)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
raise SaltInvocationError("File not found: {}".format(path))
if not __utils__["files.is_text"](path):
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
found = False
orig_file = []
new_file = []
bufsize = os.path.getsize(path)
try:
with salt.utils.files.fopen(path, mode="rb", buffering=bufsize) as r_file:
for line in r_file:
line = salt.utils.stringutils.to_unicode(line)
if re.match(regex, line):
orig_file.append(line)
if cmnt:
new_file.append("{}{}".format(char, line))
else:
new_file.append(line.lstrip(char))
found = True
except OSError as exc:
raise CommandExecutionError(
"Unable to open file '{}'. Exception: {}".format(path, exc)
)
if not found:
return False
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=False)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
try:
mode = "w"
with salt.utils.files.fopen(path, mode=mode, buffering=bufsize) as w_file:
try:
with salt.utils.files.fopen(
temp_file, mode="rb", buffering=bufsize
) as r_file:
for line in r_file:
line = salt.utils.stringutils.to_unicode(line)
try:
if re.match(regex, line):
if cmnt:
wline = "{}{}".format(char, line)
else:
wline = line.lstrip(char)
else:
wline = line
wline = salt.utils.stringutils.to_str(wline)
w_file.write(wline)
except OSError as exc:
raise CommandExecutionError(
"Unable to write file '{}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
if backup:
backup_name = "{}{}".format(path, backup)
try:
shutil.move(temp_file, backup_name)
except OSError as exc:
raise CommandExecutionError(
"Unable to move the temp file '{}' to the "
"backup file '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
else:
os.remove(temp_file)
if not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
return __utils__["stringutils.get_diff"](orig_file, new_file)
def _get_flags(flags):
if isinstance(flags, str):
flags = [flags]
if isinstance(flags, Iterable) and not isinstance(flags, Mapping):
_flags_acc = []
for flag in flags:
_flag = getattr(re, str(flag).upper())
if not isinstance(_flag, int):
raise SaltInvocationError("Invalid re flag given: {}".format(flag))
_flags_acc.append(_flag)
return reduce(operator.__or__, _flags_acc)
elif isinstance(flags, int):
return flags
else:
raise SaltInvocationError(
'Invalid re flags: "{}", must be given either as a single flag '
"string, a list of strings, or as an integer".format(flags)
)
def _add_flags(flags, new_flags):
flags = _get_flags(flags)
new_flags = _get_flags(new_flags)
return flags | new_flags
def _mkstemp_copy(path, preserve_inode=True):
temp_file = None
try:
temp_file = salt.utils.files.mkstemp(prefix=salt.utils.files.TEMPFILE_PREFIX)
except OSError as exc:
raise CommandExecutionError(
"Unable to create temp file. Exception: {}".format(exc)
)
if preserve_inode:
try:
shutil.copy2(path, temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to copy file '{}' to the temp file '{}'. Exception: {}".format(
path, temp_file, exc
)
)
else:
try:
shutil.move(path, temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to move file '{}' to the temp file '{}'. Exception: {}".format(
path, temp_file, exc
)
)
return temp_file
def _regex_to_static(src, regex):
if not src or not regex:
return None
try:
compiled = re.compile(regex, re.DOTALL)
src = [line for line in src if compiled.search(line) or line.count(regex)]
except Exception as ex:
raise CommandExecutionError("{}: '{}'".format(_get_error_message(ex), regex))
return src
def _assert_occurrence(probe, target, amount=1):
occ = len(probe)
if occ > amount:
msg = "more than"
elif occ < amount:
msg = "less than"
elif not occ:
msg = "no"
else:
msg = None
if msg:
raise CommandExecutionError(
'Found {} expected occurrences in "{}" expression'.format(msg, target)
)
return occ
def _set_line_indent(src, line, indent):
if not indent:
return line
idt = []
for c in src:
if c not in ["\t", " "]:
break
idt.append(c)
return "".join(idt) + line.lstrip()
def _get_eol(line):
match = re.search("((?<!\r)\n|\r(?!\n)|\r\n)$", line)
return match and match.group() or ""
def _set_line_eol(src, line):
line_ending = _get_eol(src) or os.linesep
return line.rstrip() + line_ending
def _set_line(
lines,
content=None,
match=None,
mode=None,
location=None,
before=None,
after=None,
indent=True,
):
if mode not in ("insert", "ensure", "delete", "replace"):
if mode is None:
raise CommandExecutionError(
"Mode was not defined. How to process the file?"
)
else:
raise CommandExecutionError("Unknown mode: {}".format(mode))
if mode != "delete" and content is None:
raise CommandExecutionError("Content can only be empty if mode is delete")
if not match and before is None and after is None:
match = content
after = _regex_to_static(lines, after)
before = _regex_to_static(lines, before)
match = _regex_to_static(lines, match)
if not lines and mode in ("delete", "replace"):
log.warning("Cannot find text to %s. File is empty.", mode)
lines = []
elif mode == "delete" and match:
lines = [line for line in lines if line != match[0]]
elif mode == "replace" and match:
idx = lines.index(match[0])
original_line = lines.pop(idx)
lines.insert(idx, _set_line_indent(original_line, content, indent))
elif mode == "insert":
if before is None and after is None and location is None:
raise CommandExecutionError(
'On insert either "location" or "before/after" conditions are'
" required.",
)
if location:
if location == "end":
if lines:
lines.append(_set_line_indent(lines[-1], content, indent))
else:
lines.append(content)
elif location == "start":
if lines:
lines.insert(0, _set_line_eol(lines[0], content))
else:
lines = [content + os.linesep]
else:
if before and after:
_assert_occurrence(before, "before")
_assert_occurrence(after, "after")
first = lines.index(after[0])
last = lines.index(before[0])
lines.insert(last, _set_line_indent(lines[last], content, indent))
elif after:
_assert_occurrence(after, "after")
idx = lines.index(after[0])
next_line = None if idx + 1 >= len(lines) else lines[idx + 1]
if next_line is None or next_line.rstrip("\r\n") != content.rstrip(
"\r\n"
):
lines.insert(idx + 1, _set_line_indent(lines[idx], content, indent))
elif before:
_assert_occurrence(before, "before")
idx = lines.index(before[0])
prev_line = lines[idx - 1]
if prev_line.rstrip("\r\n") != content.rstrip("\r\n"):
lines.insert(idx, _set_line_indent(lines[idx], content, indent))
else:
raise CommandExecutionError("Neither before or after was found in file")
elif mode == "ensure":
if before and after:
_assert_occurrence(after, "after")
_assert_occurrence(before, "before")
after_index = lines.index(after[0])
before_index = lines.index(before[0])
already_there = any(line.lstrip() == content for line in lines)
if not already_there:
if after_index + 1 == before_index:
lines.insert(
after_index + 1,
_set_line_indent(lines[after_index], content, indent),
)
elif after_index + 2 == before_index:
# behavior -W. Werner, 2019-06-28
lines[after_index + 1] = _set_line_indent(
lines[after_index], content, indent
)
else:
raise CommandExecutionError(
"Found more than one line between boundaries"
' "before" and "after".'
)
elif before:
_assert_occurrence(before, "before")
before_index = lines.index(before[0])
if before_index == 0 or lines[before_index - 1].rstrip(
"\r\n"
) != content.rstrip("\r\n"):
lines.insert(
before_index,
_set_line_indent(lines[before_index - 1], content, indent),
)
elif after:
_assert_occurrence(after, "after")
after_index = lines.index(after[0])
is_last_line = after_index + 1 >= len(lines)
if is_last_line or lines[after_index + 1].rstrip("\r\n") != content.rstrip(
"\r\n"
):
lines.insert(
after_index + 1,
_set_line_indent(lines[after_index], content, indent),
)
else:
raise CommandExecutionError(
"Wrong conditions? Unable to ensure line without knowing where"
" to put it before and/or after."
)
return lines
def line(
path,
content=None,
match=None,
mode=None,
location=None,
before=None,
after=None,
show_changes=True,
backup=False,
quiet=False,
indent=True,
):
# pylint: disable=W1401
# pylint: enable=W1401
path = os.path.realpath(os.path.expanduser(path))
if not os.path.isfile(path):
if not quiet:
raise CommandExecutionError(
'File "{}" does not exists or is not a file.'.format(path)
)
return False # No changes had happened
mode = mode and mode.lower() or mode
if mode not in ["insert", "ensure", "delete", "replace"]:
if mode is None:
raise CommandExecutionError(
"Mode was not defined. How to process the file?"
)
else:
raise CommandExecutionError('Unknown mode: "{}"'.format(mode))
# We've set the content to be empty in the function params but we want to make sure
mpty_content_modes = ["delete"]
if mode not in empty_content_modes and content is None:
raise CommandExecutionError(
'Content can only be empty if mode is "{}"'.format(
", ".join(empty_content_modes)
)
)
del empty_content_modes
if before is None and after is None and not match:
match = content
with salt.utils.files.fopen(path, mode="r") as fp_:
body = salt.utils.data.decode_list(fp_.readlines())
body_before = hashlib.sha256(
salt.utils.stringutils.to_bytes("".join(body))
).hexdigest()
if body and _get_eol(body[-1]):
body.append("")
if os.stat(path).st_size == 0 and mode in ("delete", "replace"):
log.warning("Cannot find text to %s. File '%s' is empty.", mode, path)
body = []
body = _set_line(
lines=body,
content=content,
match=match,
mode=mode,
location=location,
before=before,
after=after,
indent=indent,
)
if body:
for idx, line in enumerate(body):
if not _get_eol(line) and idx + 1 < len(body):
prev = idx and idx - 1 or 1
body[idx] = _set_line_eol(body[prev], line)
if "" == body[-1]:
body.pop()
changed = (
body_before
!= hashlib.sha256(salt.utils.stringutils.to_bytes("".join(body))).hexdigest()
)
if backup and changed and __opts__["test"] is False:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=True)
shutil.move(
temp_file,
"{}.{}".format(
path, time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
),
)
except OSError as exc:
raise CommandExecutionError(
"Unable to create the backup file of {}. Exception: {}".format(
path, exc
)
)
changes_diff = None
if changed:
if show_changes:
with salt.utils.files.fopen(path, "r") as fp_:
path_content = salt.utils.data.decode_list(fp_.read().splitlines(True))
changes_diff = __utils__["stringutils.get_diff"](path_content, body)
if __opts__["test"] is False:
fh_ = None
try:
mode = "w"
body = salt.utils.data.decode_list(body, to_str=True)
fh_ = salt.utils.atomicfile.atomic_open(path, mode)
fh_.writelines(body)
finally:
if fh_:
fh_.close()
return show_changes and changes_diff or changed
def replace(
path,
pattern,
repl,
count=0,
flags=8,
bufsize=1,
append_if_not_found=False,
prepend_if_not_found=False,
not_found_content=None,
backup=".bak",
dry_run=False,
search_only=False,
show_changes=True,
ignore_if_missing=False,
preserve_inode=True,
backslash_literal=False,
):
symlink = False
if is_link(path):
symlink = True
target_path = os.readlink(path)
given_path = os.path.expanduser(path)
path = os.path.realpath(os.path.expanduser(path))
if not os.path.exists(path):
if ignore_if_missing:
return False
else:
raise SaltInvocationError("File not found: {}".format(path))
if not __utils__["files.is_text"](path):
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
if search_only and (append_if_not_found or prepend_if_not_found):
raise SaltInvocationError(
"search_only cannot be used with append/prepend_if_not_found"
)
if append_if_not_found and prepend_if_not_found:
raise SaltInvocationError(
"Only one of append and prepend_if_not_found is permitted"
)
flags_num = _get_flags(flags)
cpattern = re.compile(salt.utils.stringutils.to_bytes(pattern), flags_num)
filesize = os.path.getsize(path)
if bufsize == "file":
bufsize = filesize
has_changes = False
orig_file = []
new_file = []
if not salt.utils.platform.is_windows():
pre_user = get_user(path)
pre_group = get_group(path)
pre_mode = salt.utils.files.normalize_mode(get_mode(path))
repl = salt.utils.stringutils.to_bytes(str(repl))
if not_found_content:
not_found_content = salt.utils.stringutils.to_bytes(not_found_content)
found = False
temp_file = None
content = (
salt.utils.stringutils.to_unicode(not_found_content)
if not_found_content and (prepend_if_not_found or append_if_not_found)
else salt.utils.stringutils.to_unicode(repl)
)
try:
r_data = None
with salt.utils.files.fopen(path, mode="rb", buffering=bufsize) as r_file:
try:
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ)
except (ValueError, OSError):
r_data = salt.utils.stringutils.to_bytes("".join(r_file))
if search_only:
if re.search(cpattern, r_data):
return True
else:
return False
else:
result, nrepl = re.subn(
cpattern,
repl.replace("\\", "\\\\") if backslash_literal else repl,
r_data,
count,
)
if nrepl > 0:
found = True
has_changes = True if pattern != repl else has_changes
if prepend_if_not_found or append_if_not_found:
if re.search(
salt.utils.stringutils.to_bytes(
"^{}($|(?=\r\n))".format(re.escape(content))
),
r_data,
flags=flags_num,
):
found = True
orig_file = (
r_data.read(filesize).splitlines(True)
if isinstance(r_data, mmap.mmap)
else r_data.splitlines(True)
)
new_file = result.splitlines(True)
except OSError as exc:
raise CommandExecutionError(
"Unable to open file '{}'. Exception: {}".format(path, exc)
)
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
if has_changes and not dry_run:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
r_data = None
try:
with salt.utils.files.fopen(path, mode="w", buffering=bufsize) as w_file:
try:
with salt.utils.files.fopen(
temp_file, mode="r", buffering=bufsize
) as r_file:
r_data = mmap.mmap(r_file.fileno(), 0, access=mmap.ACCESS_READ)
result, nrepl = re.subn(
cpattern,
repl.replace("\\", "\\\\") if backslash_literal else repl,
r_data,
count,
)
try:
w_file.write(salt.utils.stringutils.to_str(result))
except OSError as exc:
raise CommandExecutionError(
"Unable to write file '{}'. Contents may "
"be truncated. Temporary file contains copy "
"at '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
finally:
if r_data and isinstance(r_data, mmap.mmap):
r_data.close()
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
if not found and (append_if_not_found or prepend_if_not_found):
if not_found_content is None:
not_found_content = repl
if prepend_if_not_found:
new_file.insert(
0, not_found_content + salt.utils.stringutils.to_bytes(os.linesep)
)
else:
if 0 != len(new_file):
if not new_file[-1].endswith(
salt.utils.stringutils.to_bytes(os.linesep)
):
new_file[-1] += salt.utils.stringutils.to_bytes(os.linesep)
new_file.append(
not_found_content + salt.utils.stringutils.to_bytes(os.linesep)
)
has_changes = True
if not dry_run:
try:
temp_file = _mkstemp_copy(path=path, preserve_inode=preserve_inode)
except OSError as exc:
raise CommandExecutionError("Exception: {}".format(exc))
try:
fh_ = salt.utils.atomicfile.atomic_open(path, "wb")
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line))
finally:
fh_.close()
if backup and has_changes and not dry_run:
backup_name = "{}{}".format(path, backup)
try:
shutil.move(temp_file, backup_name)
except OSError as exc:
raise CommandExecutionError(
"Unable to move the temp file '{}' to the "
"backup file '{}'. "
"Exception: {}".format(path, temp_file, exc)
)
if symlink:
symlink_backup = "{}{}".format(given_path, backup)
target_backup = "{}{}".format(target_path, backup)
try:
os.symlink(target_backup, symlink_backup)
except OSError:
os.remove(symlink_backup)
os.symlink(target_backup, symlink_backup)
except Exception:
raise CommandExecutionError(
"Unable create backup symlink '{}'. "
"Target was '{}'. "
"Exception: {}".format(symlink_backup, target_backup, exc)
)
elif temp_file:
try:
os.remove(temp_file)
except OSError as exc:
raise CommandExecutionError(
"Unable to delete temp file '{}'. Exception: {}".format(temp_file, exc)
)
if not dry_run and not salt.utils.platform.is_windows():
check_perms(path, None, pre_user, pre_group, pre_mode)
differences = __utils__["stringutils.get_diff"](orig_file, new_file)
if show_changes:
return differences
# (for situations where the pattern also matches the repl). Revert the
# has_changes flag to False if the final result is unchanged.
if not differences:
has_changes = False
return has_changes
def blockreplace(
path,
marker_start="#-- start managed zone --",
marker_end="#-- end managed zone --",
content="",
append_if_not_found=False,
prepend_if_not_found=False,
backup=".bak",
dry_run=False,
show_changes=True,
append_newline=False,
insert_before_match=None,
insert_after_match=None,
):
exclusive_params = [
append_if_not_found,
prepend_if_not_found,
bool(insert_before_match),
bool(insert_after_match),
]
if sum(exclusive_params) > 1:
raise SaltInvocationError(
"Only one of append_if_not_found, prepend_if_not_found,"
" insert_before_match, and insert_after_match is permitted"
)
path = os.path.expanduser(path)
if not os.path.exists(path):
raise SaltInvocationError("File not found: {}".format(path))
try:
file_encoding = __utils__["files.get_encoding"](path)
except CommandExecutionError:
file_encoding = None
if __utils__["files.is_binary"](path):
if not file_encoding:
raise SaltInvocationError(
"Cannot perform string replacements on a binary file: {}".format(path)
)
if insert_before_match or insert_after_match:
if insert_before_match:
if not isinstance(insert_before_match, str):
raise CommandExecutionError(
"RegEx expected in insert_before_match parameter."
)
elif insert_after_match:
if not isinstance(insert_after_match, str):
raise CommandExecutionError(
"RegEx expected in insert_after_match parameter."
)
if append_newline is None and not content.endswith((os.linesep, "\n")):
append_newline = True
# Split the content into a list of lines, removing newline characters. To
# ensure that we handle both Windows and POSIX newlines, first split on
# Windows newlines, and then split on POSIX newlines.
split_content = []
for win_line in content.split("\r\n"):
for content_line in win_line.split("\n"):
split_content.append(content_line)
line_count = len(split_content)
has_changes = False
orig_file = []
new_file = []
in_block = False
block_found = False
linesep = None
def _add_content(linesep, lines=None, include_marker_start=True, end_line=None):
if lines is None:
lines = []
include_marker_start = True
if end_line is None:
end_line = marker_end
end_line = end_line.rstrip("\r\n") + linesep
if include_marker_start:
lines.append(marker_start + linesep)
if split_content:
for index, content_line in enumerate(split_content, 1):
if index != line_count:
lines.append(content_line + linesep)
else:
# We're on the last line of the content block
if append_newline:
lines.append(content_line + linesep)
lines.append(end_line)
else:
lines.append(content_line + end_line)
else:
lines.append(end_line)
return lines
try:
with salt.utils.files.fopen(
path, "r", encoding=file_encoding, newline=""
) as fi_file:
for line in fi_file:
write_line_to_new_file = True
if linesep is None:
if line.endswith("\r\n"):
linesep = "\r\n"
elif line.endswith("\n"):
linesep = "\n"
else:
linesep = os.linesep
if marker_start in line:
# We've entered the content block
in_block = True
else:
if in_block:
# the new file until we have exited the block.
write_line_to_new_file = False
marker_end_pos = line.find(marker_end)
if marker_end_pos != -1:
# End of block detected
in_block = False
# We've found and exited the block
block_found = True
_add_content(
linesep,
lines=new_file,
include_marker_start=False,
end_line=line[marker_end_pos:],
)
orig_file.append(line)
if write_line_to_new_file:
new_file.append(line)
except OSError as exc:
raise CommandExecutionError("Failed to read from {}: {}".format(path, exc))
finally:
if linesep is None:
# prepend/append later on.
linesep = os.linesep
try:
fi_file.close()
except Exception: # pylint: disable=broad-except
pass
if in_block:
# unterminated block => bad, always fail
raise CommandExecutionError(
"Unterminated marked block. End of file reached before marker_end."
)
if not block_found:
if prepend_if_not_found:
# add the markers and content at the beginning of file
prepended_content = _add_content(linesep)
prepended_content.extend(new_file)
new_file = prepended_content
block_found = True
elif append_if_not_found:
# Make sure we have a newline at the end of the file
if new_file:
if not new_file[-1].endswith(linesep):
new_file[-1] += linesep
# add the markers and content at the end of file
_add_content(linesep, lines=new_file)
block_found = True
elif insert_before_match or insert_after_match:
match_regex = insert_before_match or insert_after_match
match_idx = [
i for i, item in enumerate(orig_file) if re.search(match_regex, item)
]
if match_idx:
match_idx = match_idx[0]
for line in _add_content(linesep):
if insert_after_match:
match_idx += 1
new_file.insert(match_idx, line)
if insert_before_match:
match_idx += 1
block_found = True
else:
raise CommandExecutionError(
"Cannot edit marked block. Markers were not found in file."
)
if block_found:
diff = __utils__["stringutils.get_diff"](orig_file, new_file)
has_changes = diff != ""
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms["user"] = get_user(path)
perms["group"] = get_group(path)
perms["mode"] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = "{}{}".format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=backup_path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(
name=backup_path,
ret=None,
user=perms["user"],
group=perms["group"],
mode=perms["mode"],
)
if not block_found:
raise CommandExecutionError(
"Cannot edit marked block. Markers were not found in file."
)
diff = __utils__["stringutils.get_diff"](orig_file, new_file)
has_changes = diff != ""
if has_changes and not dry_run:
# changes detected
# backup file attrs
perms = {}
perms["user"] = get_user(path)
perms["group"] = get_group(path)
perms["mode"] = salt.utils.files.normalize_mode(get_mode(path))
# backup old content
if backup is not False:
backup_path = "{}{}".format(path, backup)
shutil.copy2(path, backup_path)
# copy2 does not preserve ownership
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=backup_path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(
backup_path, None, perms["user"], perms["group"], perms["mode"]
)
# write new content in the file while avoiding partial reads
try:
fh_ = salt.utils.atomicfile.atomic_open(path, "wb")
for line in new_file:
fh_.write(salt.utils.stringutils.to_bytes(line, encoding=file_encoding))
finally:
fh_.close()
# this may have overwritten file attrs
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1123
check_perms(path=path, ret=None, owner=perms["user"])
# pylint: enable=E1120,E1123
else:
check_perms(path, None, perms["user"], perms["group"], perms["mode"])
if show_changes:
return diff
return has_changes
def search(path, pattern, flags=8, bufsize=1, ignore_if_missing=False, multiline=False):
if multiline:
flags = _add_flags(flags, "MULTILINE")
bufsize = "file"
# This function wraps file.replace on purpose in order to enforce
# consistent usage, compatible regex's, expected behavior, *and* bugs. :)
return replace(
path,
pattern,
"",
flags=flags,
bufsize=bufsize,
dry_run=True,
search_only=True,
show_changes=False,
ignore_if_missing=ignore_if_missing,
)
def patch(originalfile, patchfile, options="", dry_run=False):
patchpath = salt.utils.path.which("patch")
if not patchpath:
raise CommandExecutionError(
"patch executable not found. Is the distribution's patch package installed?"
)
cmd = [patchpath]
cmd.extend(salt.utils.args.shlex_split(options))
if dry_run:
if __grains__["kernel"] in ("FreeBSD", "OpenBSD"):
cmd.append("-C")
else:
cmd.append("--dry-run")
# this argument prevents interactive prompts when the patch fails to apply.
# the exit code will still be greater than 0 if that is the case.
if "-N" not in cmd and "--forward" not in cmd:
cmd.append("--forward")
has_rejectfile_option = False
for option in cmd:
if (
option == "-r"
or option.startswith("-r ")
or option.startswith("--reject-file")
):
has_rejectfile_option = True
break
# by default, patch will write rejected patch files to <filename>.rej.
# this option prevents that.
if not has_rejectfile_option:
cmd.append("--reject-file=-")
cmd.extend(["-i", patchfile])
if os.path.isdir(originalfile):
cmd.extend(["-d", originalfile])
has_strip_option = False
for option in cmd:
if option.startswith("-p") or option.startswith("--strip="):
has_strip_option = True
break
if not has_strip_option:
cmd.append("--strip=0")
else:
cmd.append(originalfile)
return __salt__["cmd.run_all"](cmd, python_shell=False)
def contains(path, text):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
stripped_text = str(text).strip()
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if stripped_text in chunk:
return True
return False
except OSError:
return False
def contains_regex(path, regex, lchar=""):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.files.fopen(path, "r") as target:
for line in target:
line = salt.utils.stringutils.to_unicode(line)
if lchar:
line = line.lstrip(lchar)
if re.search(regex, line):
return True
return False
except OSError:
return False
def contains_glob(path, glob_expr):
path = os.path.expanduser(path)
if not os.path.exists(path):
return False
try:
with salt.utils.filebuffer.BufferedReader(path) as breader:
for chunk in breader:
if fnmatch.fnmatch(chunk, glob_expr):
return True
return False
except OSError:
return False
def append(path, *args, **kwargs):
path = os.path.expanduser(path)
# Largely inspired by Fabric's contrib.files.append()
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
with salt.utils.files.fopen(path, "rb+") as ofile:
linesep = salt.utils.stringutils.to_bytes(os.linesep)
try:
ofile.seek(-len(linesep), os.SEEK_END)
except OSError as exc:
if exc.errno in (errno.EINVAL, errno.ESPIPE):
pass
else:
raise
else:
if ofile.read(len(linesep)) != linesep:
ofile.seek(0, os.SEEK_END)
ofile.write(linesep)
with salt.utils.files.fopen(path, "a") as ofile:
for new_line in args:
ofile.write(
salt.utils.stringutils.to_str("{}{}".format(new_line, os.linesep))
)
return 'Wrote {} lines to "{}"'.format(len(args), path)
def prepend(path, *args, **kwargs):
path = os.path.expanduser(path)
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
try:
with salt.utils.files.fopen(path) as fhr:
contents = [
salt.utils.stringutils.to_unicode(line) for line in fhr.readlines()
]
except OSError:
contents = []
preface = []
for line in args:
preface.append("{}\n".format(line))
with salt.utils.files.fopen(path, "w") as ofile:
contents = preface + contents
ofile.write(salt.utils.stringutils.to_str("".join(contents)))
return 'Prepended {} lines to "{}"'.format(len(args), path)
def write(path, *args, **kwargs):
path = os.path.expanduser(path)
if "args" in kwargs:
if isinstance(kwargs["args"], list):
args = kwargs["args"]
else:
args = [kwargs["args"]]
contents = []
for line in args:
contents.append("{}\n".format(line))
with salt.utils.files.fopen(path, "w") as ofile:
ofile.write(salt.utils.stringutils.to_str("".join(contents)))
return 'Wrote {} lines to "{}"'.format(len(contents), path)
def touch(name, atime=None, mtime=None):
name = os.path.expanduser(name)
if atime and atime.isdigit():
atime = int(atime)
if mtime and mtime.isdigit():
mtime = int(mtime)
try:
if not os.path.exists(name):
with salt.utils.files.fopen(name, "a"):
pass
if not atime and not mtime:
times = None
elif not mtime and atime:
times = (atime, time.time())
elif not atime and mtime:
times = (time.time(), mtime)
else:
times = (atime, mtime)
os.utime(name, times)
except TypeError:
raise SaltInvocationError("atime and mtime must be integers")
except OSError as exc:
raise CommandExecutionError(exc.strerror)
return os.path.exists(name)
def seek_read(path, size, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_RDONLY)
try:
os.lseek(seek_fh, int(offset), 0)
data = os.read(seek_fh, int(size))
finally:
os.close(seek_fh)
return data
def seek_write(path, data, offset):
path = os.path.expanduser(path)
seek_fh = os.open(path, os.O_WRONLY)
try:
os.lseek(seek_fh, int(offset), 0)
ret = os.write(seek_fh, data)
os.fsync(seek_fh)
finally:
os.close(seek_fh)
return ret
def truncate(path, length):
path = os.path.expanduser(path)
with salt.utils.files.fopen(path, "rb+") as seek_fh:
seek_fh.truncate(int(length))
def link(src, path):
src = os.path.expanduser(src)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
try:
os.link(src, path)
return True
except OSError as E:
raise CommandExecutionError("Could not create '{}': {}".format(path, E))
return False
def is_hardlink(path):
res = lstat(os.path.expanduser(path))
return res and res["st_nlink"] > 1
def is_link(path):
return os.path.islink(os.path.expanduser(path))
def symlink(src, path):
path = os.path.expanduser(path)
try:
if os.path.normpath(os.readlink(path)) == os.path.normpath(src):
log.debug("link already in correct state: %s -> %s", path, src)
return True
except OSError:
pass
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
try:
os.symlink(src, path)
return True
except OSError:
raise CommandExecutionError("Could not create '{}'".format(path))
return False
def rename(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
try:
os.rename(src, dst)
return True
except OSError:
raise CommandExecutionError("Could not rename '{}' to '{}'".format(src, dst))
return False
def copy(src, dst, recurse=False, remove_existing=False):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("File path must be absolute.")
if not os.path.exists(src):
raise CommandExecutionError("No such file or directory '{}'".format(src))
if not salt.utils.platform.is_windows():
pre_user = get_user(src)
pre_group = get_group(src)
pre_mode = salt.utils.files.normalize_mode(get_mode(src))
try:
if (os.path.exists(dst) and os.path.isdir(dst)) or os.path.isdir(src):
if not recurse:
raise SaltInvocationError(
"Cannot copy overwriting a directory without recurse flag set to"
" true!"
)
if remove_existing:
if os.path.exists(dst):
shutil.rmtree(dst)
shutil.copytree(src, dst)
else:
salt.utils.files.recursive_copy(src, dst)
else:
shutil.copyfile(src, dst)
except OSError:
raise CommandExecutionError("Could not copy '{}' to '{}'".format(src, dst))
if not salt.utils.platform.is_windows():
check_perms(dst, None, pre_user, pre_group, pre_mode)
return True
def lstat(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to file must be absolute.")
try:
lst = os.lstat(path)
return {
key: getattr(lst, key)
for key in (
"st_atime",
"st_ctime",
"st_gid",
"st_mode",
"st_mtime",
"st_nlink",
"st_size",
"st_uid",
)
}
except Exception:
return {}
def access(path, mode):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to link must be absolute.")
modes = {"f": os.F_OK, "r": os.R_OK, "w": os.W_OK, "x": os.X_OK}
if mode in modes:
return os.access(path, modes[mode])
elif mode in modes.values():
return os.access(path, mode)
else:
raise SaltInvocationError("Invalid mode specified.")
def read(path, binary=False):
access_mode = "r"
if binary is True:
access_mode += "b"
with salt.utils.files.fopen(path, access_mode) as file_obj:
if binary is True:
return file_obj.read()
else:
return salt.utils.stringutils.to_unicode(file_obj.read())
def readlink(path, canonicalize=False):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Path to link must be absolute.")
if not os.path.islink(path):
raise SaltInvocationError("A valid link was not specified.")
if canonicalize:
return os.path.realpath(path)
else:
return os.readlink(path)
def readdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("Dir path must be absolute.")
if not os.path.isdir(path):
raise SaltInvocationError("A valid directory was not specified.")
dirents = [".", ".."]
dirents.extend(os.listdir(path))
return dirents
def statvfs(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
try:
stv = os.statvfs(path)
return {
key: getattr(stv, key)
for key in (
"f_bavail",
"f_bfree",
"f_blocks",
"f_bsize",
"f_favail",
"f_ffree",
"f_files",
"f_flag",
"f_frsize",
"f_namemax",
)
}
except OSError:
raise CommandExecutionError("Could not statvfs '{}'".format(path))
return False
def stats(path, hash_type=None, follow_symlinks=True):
path = os.path.expanduser(path)
ret = {}
if not os.path.exists(path):
try:
pstat = os.lstat(path)
except OSError:
raise CommandExecutionError("Path not found: {}".format(path))
else:
if follow_symlinks:
pstat = os.stat(path)
else:
pstat = os.lstat(path)
ret["inode"] = pstat.st_ino
ret["uid"] = pstat.st_uid
ret["gid"] = pstat.st_gid
ret["group"] = gid_to_group(pstat.st_gid)
ret["user"] = uid_to_user(pstat.st_uid)
ret["atime"] = pstat.st_atime
ret["mtime"] = pstat.st_mtime
ret["ctime"] = pstat.st_ctime
ret["size"] = pstat.st_size
ret["mode"] = salt.utils.files.normalize_mode(oct(stat.S_IMODE(pstat.st_mode)))
if hash_type:
ret["sum"] = get_hash(path, hash_type)
ret["type"] = "file"
if stat.S_ISDIR(pstat.st_mode):
ret["type"] = "dir"
if stat.S_ISCHR(pstat.st_mode):
ret["type"] = "char"
if stat.S_ISBLK(pstat.st_mode):
ret["type"] = "block"
if stat.S_ISREG(pstat.st_mode):
ret["type"] = "file"
if stat.S_ISLNK(pstat.st_mode):
ret["type"] = "link"
if stat.S_ISFIFO(pstat.st_mode):
ret["type"] = "pipe"
if stat.S_ISSOCK(pstat.st_mode):
ret["type"] = "socket"
ret["target"] = os.path.realpath(path)
return ret
def rmdir(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute.")
if not os.path.isdir(path):
raise SaltInvocationError("A valid directory was not specified.")
try:
os.rmdir(path)
return True
except OSError as exc:
return exc.strerror
def remove(path):
path = os.path.expanduser(path)
if not os.path.isabs(path):
raise SaltInvocationError("File path must be absolute: {}".format(path))
try:
if os.path.islink(path) or (os.path.exists(path) and not os.path.isdir(path)):
os.remove(path)
return True
elif os.path.isdir(path):
shutil.rmtree(path)
return True
except OSError as exc:
raise CommandExecutionError("Could not remove '{}': {}".format(path, exc))
return False
def directory_exists(path):
return os.path.isdir(os.path.expanduser(path))
def file_exists(path):
return os.path.isfile(os.path.expanduser(path))
def path_exists_glob(path):
return True if glob.glob(os.path.expanduser(path)) else False
def restorecon(path, recursive=False):
if recursive:
cmd = ["restorecon", "-FR", path]
else:
cmd = ["restorecon", "-F", path]
return not __salt__["cmd.retcode"](cmd, python_shell=False)
def get_selinux_context(path):
cmd_ret = __salt__["cmd.run_all"](["stat", "-c", "%C", path], python_shell=False)
if cmd_ret["retcode"] == 0:
ret = cmd_ret["stdout"]
else:
ret = "No selinux context information is available for {}".format(path)
return ret
def set_selinux_context(
path,
user=None,
role=None,
type=None,
range=None,
persist=False,
):
if not any((user, role, type, range)):
return False
if persist:
fcontext_result = __salt__["selinux.fcontext_add_policy"](
path, sel_type=type, sel_user=user, sel_level=range
)
if fcontext_result.get("retcode", None) != 0:
raise CommandExecutionError(
"Problem setting fcontext: {}".format(fcontext_result)
)
cmd = ["chcon"]
if user:
cmd.extend(["-u", user])
if role:
cmd.extend(["-r", role])
if type:
cmd.extend(["-t", type])
if range:
cmd.extend(["-l", range])
cmd.append(path)
ret = not __salt__["cmd.retcode"](cmd, python_shell=False)
if ret:
return get_selinux_context(path)
else:
return ret
def source_list(source, source_hash, saltenv):
contextkey = "{}_|-{}_|-{}".format(source, source_hash, saltenv)
if contextkey in __context__:
return __context__[contextkey]
if isinstance(source, list):
mfiles = [(f, saltenv) for f in __salt__["cp.list_master"](saltenv)]
mdirs = [(d, saltenv) for d in __salt__["cp.list_master_dirs"](saltenv)]
for single in source:
if isinstance(single, dict):
single = next(iter(single))
path, senv = salt.utils.url.parse(single)
if senv:
mfiles += [(f, senv) for f in __salt__["cp.list_master"](senv)]
mdirs += [(d, senv) for d in __salt__["cp.list_master_dirs"](senv)]
ret = None
for single in source:
if isinstance(single, dict):
if len(single) != 1:
continue
single_src = next(iter(single))
single_hash = single[single_src] if single[single_src] else source_hash
urlparsed_single_src = urllib.parse.urlparse(single_src)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_single_src.scheme.lower() in string.ascii_lowercase:
urlparsed_single_src = urllib.parse.urlparse(
"file://" + single_src
)
proto = urlparsed_single_src.scheme
if proto == "salt":
path, senv = salt.utils.url.parse(single_src)
if not senv:
senv = saltenv
if (path, saltenv) in mfiles or (path, saltenv) in mdirs:
ret = (single_src, single_hash)
break
elif proto.startswith("http") or proto == "ftp":
query_res = salt.utils.http.query(
single_src, method="HEAD", decode_body=False
)
if "error" not in query_res:
ret = (single_src, single_hash)
break
elif proto == "file" and (
os.path.exists(urlparsed_single_src.netloc)
or os.path.exists(urlparsed_single_src.path)
or os.path.exists(
os.path.join(
urlparsed_single_src.netloc, urlparsed_single_src.path
)
)
):
ret = (single_src, single_hash)
break
elif single_src.startswith(os.sep) and os.path.exists(single_src):
ret = (single_src, single_hash)
break
elif isinstance(single, str):
path, senv = salt.utils.url.parse(single)
if not senv:
senv = saltenv
if (path, senv) in mfiles or (path, senv) in mdirs:
ret = (single, source_hash)
break
urlparsed_src = urllib.parse.urlparse(single)
if salt.utils.platform.is_windows():
# protocol indicator (file://). The scheme will be the
# drive letter instead of the protocol. So, we'll add the
if urlparsed_src.scheme.lower() in string.ascii_lowercase:
urlparsed_src = urllib.parse.urlparse("file://" + single)
proto = urlparsed_src.scheme
if proto == "file" and (
os.path.exists(urlparsed_src.netloc)
or os.path.exists(urlparsed_src.path)
or os.path.exists(
os.path.join(urlparsed_src.netloc, urlparsed_src.path)
)
):
ret = (single, source_hash)
break
elif proto.startswith("http") or proto == "ftp":
query_res = salt.utils.http.query(
single, method="HEAD", decode_body=False
)
if "error" not in query_res:
ret = (single, source_hash)
break
elif single.startswith(os.sep) and os.path.exists(single):
ret = (single, source_hash)
break
if ret is None:
raise CommandExecutionError("none of the specified sources were found")
else:
ret = (source, source_hash)
__context__[contextkey] = ret
return ret
def apply_template_on_contents(contents, template, context, defaults, saltenv):
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
contents = salt.utils.templates.TEMPLATE_REGISTRY[template](
contents,
from_str=True,
to_str=True,
context=context_dict,
saltenv=saltenv,
grains=__opts__["grains"],
pillar=__pillar__,
salt=__salt__,
opts=__opts__,
)["data"]
if isinstance(contents, bytes):
contents = contents.decode("utf-8")
else:
ret = {}
ret["result"] = False
ret["comment"] = "Specified template format {} is not supported".format(
template
)
return ret
return contents
def get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify=False,
verify_ssl=True,
**kwargs
):
sfn = ""
source_sum = {}
def _get_local_file_source_sum(path):
return {"hsum": get_hash(path, form="sha256"), "hash_type": "sha256"}
if source:
urlparsed_source = urllib.parse.urlparse(source)
if urlparsed_source.scheme in salt.utils.files.VALID_PROTOS:
parsed_scheme = urlparsed_source.scheme
else:
parsed_scheme = ""
parsed_path = os.path.join(
urlparsed_source.netloc, urlparsed_source.path
).rstrip(os.sep)
unix_local_source = parsed_scheme in ("file", "")
if parsed_scheme == "":
parsed_path = sfn = source
if not os.path.exists(sfn):
msg = "Local file source {} does not exist".format(sfn)
return "", {}, msg
elif parsed_scheme == "file":
sfn = parsed_path
if not os.path.exists(sfn):
msg = "Local file source {} does not exist".format(sfn)
return "", {}, msg
if parsed_scheme and parsed_scheme.lower() in string.ascii_lowercase:
parsed_path = ":".join([parsed_scheme, parsed_path])
parsed_scheme = "file"
if parsed_scheme == "salt":
source_sum = __salt__["cp.hash_file"](source, saltenv)
if not source_sum:
return (
"",
{},
"Source file {} not found in saltenv '{}'".format(source, saltenv),
)
elif not source_hash and unix_local_source:
source_sum = _get_local_file_source_sum(parsed_path)
elif not source_hash and source.startswith(os.sep):
# This should happen on Windows
source_sum = _get_local_file_source_sum(source)
else:
if not skip_verify:
if source_hash:
try:
source_sum = get_source_sum(
name,
source,
source_hash,
source_hash_name,
saltenv,
verify_ssl=verify_ssl,
)
except CommandExecutionError as exc:
return "", {}, exc.strerror
else:
msg = (
"Unable to verify upstream hash of source file {}, "
"please set source_hash or set skip_verify to True".format(
salt.utils.url.redact_http_basic_auth(source)
)
)
return "", {}, msg
if source and (template or parsed_scheme in salt.utils.files.REMOTE_PROTOS):
# Check if we have the template or remote file cached
cache_refetch = False
cached_dest = __salt__["cp.is_cached"](source, saltenv)
if cached_dest and (source_hash or skip_verify):
htype = source_sum.get("hash_type", "sha256")
cached_sum = get_hash(cached_dest, form=htype)
if skip_verify:
# prev: if skip_verify or cached_sum == source_sum['hsum']:
# but `cached_sum == source_sum['hsum']` is elliptical as prev if
sfn = cached_dest
source_sum = {"hsum": cached_sum, "hash_type": htype}
elif cached_sum != source_sum.get("hsum", __opts__["hash_type"]):
cache_refetch = True
else:
sfn = cached_dest
# If we didn't have the template or remote file, or the file has been
if not sfn or cache_refetch:
try:
sfn = __salt__["cp.cache_file"](
source,
saltenv,
source_hash=source_sum.get("hsum"),
verify_ssl=verify_ssl,
)
except Exception as exc:
_source = salt.utils.url.redact_http_basic_auth(source)
return "", {}, "Failed to cache {}: {}".format(_source, exc)
if not sfn or not os.path.exists(sfn):
_source = salt.utils.url.redact_http_basic_auth(source)
return sfn, {}, "Source file '{}' not found".format(_source)
if sfn == name:
raise SaltInvocationError("Source file cannot be the same as destination")
if template:
if template in salt.utils.templates.TEMPLATE_REGISTRY:
context_dict = defaults if defaults else {}
if context:
context_dict.update(context)
data = salt.utils.templates.TEMPLATE_REGISTRY[template](
sfn,
name=name,
source=source,
user=user,
group=group,
mode=mode,
attrs=attrs,
saltenv=saltenv,
context=context_dict,
salt=__salt__,
pillar=__pillar__,
grains=__opts__["grains"],
opts=__opts__,
**kwargs
)
else:
return (
sfn,
{},
"Specified template format {} is not supported".format(template),
)
if data["result"]:
sfn = data["data"]
hsum = get_hash(sfn, form="sha256")
source_sum = {"hash_type": "sha256", "hsum": hsum}
else:
__clean_tmp(sfn)
return sfn, {}, data["data"]
return sfn, source_sum, ""
def extract_hash(
hash_fn, hash_type="sha256", file_name="", source="", source_hash_name=None
):
hash_len = HASHES.get(hash_type)
if hash_len is None:
if hash_type:
log.warning(
"file.extract_hash: Unsupported hash_type '%s', falling "
"back to matching any supported hash_type",
hash_type,
)
hash_type = ""
hash_len_expr = "{},{}".format(min(HASHES_REVMAP), max(HASHES_REVMAP))
else:
hash_len_expr = str(hash_len)
filename_separators = string.whitespace + r"\/*"
if source_hash_name:
if not isinstance(source_hash_name, str):
source_hash_name = str(source_hash_name)
source_hash_name_idx = (len(source_hash_name) + 1) * -1
log.debug(
"file.extract_hash: Extracting %s hash for file matching "
"source_hash_name '%s'",
"any supported" if not hash_type else hash_type,
source_hash_name,
)
if file_name:
if not isinstance(file_name, str):
file_name = str(file_name)
file_name_basename = os.path.basename(file_name)
file_name_idx = (len(file_name_basename) + 1) * -1
if source:
if not isinstance(source, str):
source = str(source)
urlparsed_source = urllib.parse.urlparse(source)
source_basename = os.path.basename(
urlparsed_source.path or urlparsed_source.netloc
)
source_idx = (len(source_basename) + 1) * -1
basename_searches = [x for x in (file_name, source) if x]
if basename_searches:
log.debug(
"file.extract_hash: %s %s hash for file matching%s: %s",
"If no source_hash_name match found, will extract"
if source_hash_name
else "Extracting",
"any supported" if not hash_type else hash_type,
"" if len(basename_searches) == 1 else " either of the following",
", ".join(basename_searches),
)
partial = None
found = {}
with salt.utils.files.fopen(hash_fn, "r") as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line.strip())
hash_re = r"(?i)(?<![a-z0-9])([a-f0-9]{" + hash_len_expr + "})(?![a-z0-9])"
hash_match = re.search(hash_re, line)
matched = None
if hash_match:
matched_hsum = hash_match.group(1)
if matched_hsum is not None:
matched_type = HASHES_REVMAP.get(len(matched_hsum))
if matched_type is None:
# to match one of the supported hash types.
matched = None
else:
matched = {"hsum": matched_hsum, "hash_type": matched_type}
if matched is None:
log.debug(
"file.extract_hash: In line '%s', no %shash found",
line,
"" if not hash_type else hash_type + " ",
)
continue
if partial is None:
partial = matched
def _add_to_matches(found, line, match_type, value, matched):
log.debug(
"file.extract_hash: Line '%s' matches %s '%s'",
line,
match_type,
value,
)
found.setdefault(match_type, []).append(matched)
hash_matched = False
if source_hash_name:
if line.endswith(source_hash_name):
# Checking the character before where the basename
# should start for either whitespace or a path
# separator. We can't just rsplit on spaces/whitespace,
try:
if line[source_hash_name_idx] in string.whitespace:
_add_to_matches(
found,
line,
"source_hash_name",
source_hash_name,
matched,
)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source_hash_name) + r"\s+", line):
_add_to_matches(
found, line, "source_hash_name", source_hash_name, matched
)
hash_matched = True
if file_name:
if line.endswith(file_name_basename):
# because the filename may contain spaces.
try:
if line[file_name_idx] in filename_separators:
_add_to_matches(
found, line, "file_name", file_name, matched
)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(file_name) + r"\s+", line):
_add_to_matches(found, line, "file_name", file_name, matched)
hash_matched = True
if source:
if line.endswith(source_basename):
# Same as above, we can't just do an rsplit here.
try:
if line[source_idx] in filename_separators:
_add_to_matches(found, line, "source", source, matched)
hash_matched = True
except IndexError:
pass
elif re.match(re.escape(source) + r"\s+", line):
_add_to_matches(found, line, "source", source, matched)
hash_matched = True
if not hash_matched:
log.debug(
"file.extract_hash: Line '%s' contains %s hash "
"'%s', but line did not meet the search criteria",
line,
matched["hash_type"],
matched["hsum"],
)
for found_type, found_str in (
("source_hash_name", source_hash_name),
("file_name", file_name),
("source", source),
):
if found_type in found:
if len(found[found_type]) > 1:
log.debug(
"file.extract_hash: Multiple %s matches for %s: %s",
found_type,
found_str,
", ".join(
[
"{} ({})".format(x["hsum"], x["hash_type"])
for x in found[found_type]
]
),
)
ret = found[found_type][0]
log.debug(
"file.extract_hash: Returning %s hash '%s' as a match of %s",
ret["hash_type"],
ret["hsum"],
found_str,
)
return ret
if partial:
log.debug(
"file.extract_hash: Returning the partially identified %s hash '%s'",
partial["hash_type"],
partial["hsum"],
)
return partial
log.debug("file.extract_hash: No matches, returning None")
return None
def check_perms(
name,
ret,
user,
group,
mode,
attrs=None,
follow_symlinks=False,
seuser=None,
serole=None,
setype=None,
serange=None,
):
name = os.path.expanduser(name)
if not ret:
ret = {"name": name, "changes": {}, "comment": [], "result": True}
orig_comment = ""
else:
orig_comment = ret["comment"]
ret["comment"] = []
perms = {}
cur = stats(name, follow_symlinks=follow_symlinks)
perms["luser"] = cur["user"]
perms["lgroup"] = cur["group"]
perms["lmode"] = salt.utils.files.normalize_mode(cur["mode"])
is_dir = os.path.isdir(name)
is_link = os.path.islink(name)
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user) != user_to_uid(perms["luser"])
) or (not salt.utils.platform.is_windows() and user != perms["luser"]):
perms["cuser"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group) != group_to_gid(perms["lgroup"])
) or (not salt.utils.platform.is_windows() and group != perms["lgroup"]):
perms["cgroup"] = group
if "cuser" in perms or "cgroup" in perms:
if not __opts__["test"]:
if os.path.islink(name) and not follow_symlinks:
chown_func = lchown
else:
chown_func = chown
if user is None:
user = perms["luser"]
if group is None:
group = perms["lgroup"]
try:
chown_func(name, user, group)
set_mode(name, mode)
except OSError:
ret["result"] = False
if user:
if isinstance(user, int):
user = uid_to_user(user)
if (
salt.utils.platform.is_windows()
and user_to_uid(user)
!= user_to_uid(get_user(name, follow_symlinks=follow_symlinks))
and user != ""
) or (
not salt.utils.platform.is_windows()
and user != get_user(name, follow_symlinks=follow_symlinks)
and user != ""
):
if __opts__["test"] is True:
ret["changes"]["user"] = user
else:
ret["result"] = False
ret["comment"].append("Failed to change user to {}".format(user))
elif "cuser" in perms and user != "":
ret["changes"]["user"] = user
if group:
if isinstance(group, int):
group = gid_to_group(group)
if (
salt.utils.platform.is_windows()
and group_to_gid(group)
!= group_to_gid(get_group(name, follow_symlinks=follow_symlinks))
and user != ""
) or (
not salt.utils.platform.is_windows()
and group != get_group(name, follow_symlinks=follow_symlinks)
and user != ""
):
if __opts__["test"] is True:
ret["changes"]["group"] = group
else:
ret["result"] = False
ret["comment"].append("Failed to change group to {}".format(group))
elif "cgroup" in perms and user != "":
ret["changes"]["group"] = group
# Mode changes if needed
if mode is not None:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
mode = salt.utils.files.normalize_mode(mode)
if mode != perms["lmode"]:
if __opts__["test"] is True:
ret["changes"]["mode"] = mode
else:
set_mode(name, mode)
if mode != salt.utils.files.normalize_mode(get_mode(name)):
ret["result"] = False
ret["comment"].append(
"Failed to change mode to {}".format(mode)
)
else:
ret["changes"]["mode"] = mode
# Modify attributes of file if needed
if attrs is not None and not is_dir:
# File is a symlink, ignore the mode setting
# if follow_symlinks is False
if os.path.islink(name) and not follow_symlinks:
pass
else:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs and any(attr for attr in diff_attrs):
changes = {
"old": "".join(lsattr(name)[name]),
"new": None,
}
if __opts__["test"] is True:
changes["new"] = attrs
else:
if diff_attrs.added:
chattr(
name,
operator="add",
attributes=diff_attrs.added,
)
if diff_attrs.removed:
chattr(
name,
operator="remove",
attributes=diff_attrs.removed,
)
cmp_attrs = _cmp_attrs(name, attrs)
if any(attr for attr in cmp_attrs):
ret["result"] = False
ret["comment"].append(
"Failed to change attributes to {}".format(attrs)
)
changes["new"] = "".join(lsattr(name)[name])
else:
changes["new"] = attrs
if changes["old"] != changes["new"]:
ret["changes"]["attrs"] = changes
# Set selinux attributes if needed
if salt.utils.platform.is_linux() and (seuser or serole or setype or serange):
selinux_error = False
try:
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = get_selinux_context(name).split(":")
log.debug(
"Current selinux context user:%s role:%s type:%s range:%s",
current_seuser,
current_serole,
current_setype,
current_serange,
)
except ValueError:
log.error("Unable to get current selinux attributes")
ret["result"] = False
ret["comment"].append("Failed to get selinux attributes")
selinux_error = True
if not selinux_error:
requested_seuser = None
requested_serole = None
requested_setype = None
requested_serange = None
# Only set new selinux variables if updates are needed
if seuser and seuser != current_seuser:
requested_seuser = seuser
if serole and serole != current_serole:
requested_serole = serole
if setype and setype != current_setype:
requested_setype = setype
if serange and serange != current_serange:
requested_serange = serange
if (
requested_seuser
or requested_serole
or requested_setype
or requested_serange
):
# selinux updates needed, prep changes output
selinux_change_new = ""
selinux_change_orig = ""
if requested_seuser:
selinux_change_new += "User: {} ".format(requested_seuser)
selinux_change_orig += "User: {} ".format(current_seuser)
if requested_serole:
selinux_change_new += "Role: {} ".format(requested_serole)
selinux_change_orig += "Role: {} ".format(current_serole)
if requested_setype:
selinux_change_new += "Type: {} ".format(requested_setype)
selinux_change_orig += "Type: {} ".format(current_setype)
if requested_serange:
selinux_change_new += "Range: {} ".format(requested_serange)
selinux_change_orig += "Range: {} ".format(current_serange)
if __opts__["test"]:
ret["comment"] = "File {} selinux context to be updated".format(
name
)
ret["result"] = None
ret["changes"]["selinux"] = {
"Old": selinux_change_orig.strip(),
"New": selinux_change_new.strip(),
}
else:
try:
# set_selinux_context requires type to be set on any other change
if (
requested_seuser or requested_serole or requested_serange
) and not requested_setype:
requested_setype = current_setype
result = set_selinux_context(
name,
user=requested_seuser,
role=requested_serole,
type=requested_setype,
range=requested_serange,
persist=True,
)
log.debug("selinux set result: %s", result)
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = result.split(":")
except ValueError:
log.error("Unable to set current selinux attributes")
ret["result"] = False
ret["comment"].append("Failed to set selinux attributes")
selinux_error = True
if not selinux_error:
ret["comment"].append(
"The file {} is set to be changed".format(name)
)
if requested_seuser:
if current_seuser != requested_seuser:
ret["comment"].append("Unable to update seuser context")
ret["result"] = False
if requested_serole:
if current_serole != requested_serole:
ret["comment"].append("Unable to update serole context")
ret["result"] = False
if requested_setype:
if current_setype != requested_setype:
ret["comment"].append("Unable to update setype context")
ret["result"] = False
if requested_serange:
if current_serange != requested_serange:
ret["comment"].append(
"Unable to update serange context"
)
ret["result"] = False
ret["changes"]["selinux"] = {
"Old": selinux_change_orig.strip(),
"New": selinux_change_new.strip(),
}
# Only combine the comment list into a string
# after all comments are added above
if isinstance(orig_comment, str):
if orig_comment:
ret["comment"].insert(0, orig_comment)
ret["comment"] = "; ".join(ret["comment"])
# Set result to None at the very end of the function,
# after all changes have been recorded above
if __opts__["test"] is True and ret["changes"]:
ret["result"] = None
return ret, perms
def check_managed(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
seuser=None,
serole=None,
setype=None,
serange=None,
**kwargs
):
# If the source is a list then find which file exists
source, source_hash = source_list(
source, source_hash, saltenv # pylint: disable=W0633
)
sfn = ""
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
**kwargs
)
if comments:
__clean_tmp(sfn)
return False, comments
changes = check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
# Ignore permission for files written temporary directories
# Files in any path will still be set correctly using get_managed()
if name.startswith(tempfile.gettempdir()):
for key in ["user", "group", "mode"]:
changes.pop(key, None)
__clean_tmp(sfn)
if changes:
log.info(changes)
comments = ["The following values are set to be changed:\n"]
comments.extend("{}: {}\n".format(key, val) for key, val in changes.items())
return None, "".join(comments)
return True, "The file {} is in the correct state".format(name)
def check_managed_changes(
name,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
template,
context,
defaults,
saltenv,
contents=None,
skip_verify=False,
keep_mode=False,
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
**kwargs
):
# If the source is a list then find which file exists
source, source_hash = source_list(
source, source_hash, saltenv # pylint: disable=W0633
)
sfn = ""
source_sum = None
if contents is None:
# Gather the source file from the server
sfn, source_sum, comments = get_managed(
name,
template,
source,
source_hash,
source_hash_name,
user,
group,
mode,
attrs,
saltenv,
context,
defaults,
skip_verify,
verify_ssl=verify_ssl,
**kwargs
)
# Ensure that user-provided hash string is lowercase
if source_sum and ("hsum" in source_sum):
source_sum["hsum"] = source_sum["hsum"].lower()
if comments:
__clean_tmp(sfn)
return False, comments
if sfn and source and keep_mode:
if (
urllib.parse.urlparse(source).scheme
in (
"salt",
"file",
)
or source.startswith("/")
):
try:
mode = __salt__["cp.stat_file"](source, saltenv=saltenv, octal=True)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to stat %s: %s", sfn, exc)
changes = check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
__clean_tmp(sfn)
return changes
def check_file_meta(
name,
sfn,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
contents=None,
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
):
changes = {}
if not source_sum:
source_sum = dict()
try:
lstats = stats(
name, hash_type=source_sum.get("hash_type", None), follow_symlinks=False
)
except CommandExecutionError:
lstats = {}
if not lstats:
changes["newfile"] = name
return changes
if "hsum" in source_sum:
if source_sum["hsum"] != lstats["sum"]:
if not sfn and source:
sfn = __salt__["cp.cache_file"](
source,
saltenv,
source_hash=source_sum["hsum"],
verify_ssl=verify_ssl,
)
if sfn:
try:
changes["diff"] = get_diff(
name, sfn, template=True, show_filenames=False
)
except CommandExecutionError as exc:
changes["diff"] = exc.strerror
else:
changes["sum"] = "Checksum differs"
if contents is not None:
# Write a tempfile with the static contents
if isinstance(contents, bytes):
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=False
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
tmp_.write(contents)
else:
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
with salt.utils.files.fopen(tmp, "w") as tmp_:
tmp_.write(salt.utils.stringutils.to_str(contents))
# Compare the static contents with the named file
try:
differences = get_diff(name, tmp, show_filenames=False)
except CommandExecutionError as exc:
log.error("Failed to diff files: %s", exc)
differences = exc.strerror
__clean_tmp(tmp)
if differences:
if __salt__["config.option"]("obfuscate_templates"):
changes["diff"] = "<Obfuscated Template>"
else:
changes["diff"] = differences
if not salt.utils.platform.is_windows():
# Check owner
if user is not None and user != lstats["user"] and user != lstats["uid"]:
changes["user"] = user
# Check group
if group is not None and group != lstats["group"] and group != lstats["gid"]:
changes["group"] = group
# Normalize the file mode
smode = salt.utils.files.normalize_mode(lstats["mode"])
mode = salt.utils.files.normalize_mode(mode)
if mode is not None and mode != smode:
changes["mode"] = mode
if attrs:
diff_attrs = _cmp_attrs(name, attrs)
if diff_attrs is not None:
if attrs is not None and (
diff_attrs[0] is not None or diff_attrs[1] is not None
):
changes["attrs"] = attrs
# Check selinux
if seuser or serole or setype or serange:
try:
(
current_seuser,
current_serole,
current_setype,
current_serange,
) = get_selinux_context(name).split(":")
log.debug(
"Current selinux context user:%s role:%s type:%s range:%s",
current_seuser,
current_serole,
current_setype,
current_serange,
)
except ValueError as exc:
log.error("Unable to get current selinux attributes")
changes["selinux"] = exc.strerror
if seuser and seuser != current_seuser:
changes["selinux"] = {"user": seuser}
if serole and serole != current_serole:
changes["selinux"] = {"role": serole}
if setype and setype != current_setype:
changes["selinux"] = {"type": setype}
if serange and serange != current_serange:
changes["selinux"] = {"range": serange}
return changes
def get_diff(
file1,
file2,
saltenv="base",
show_filenames=True,
show_changes=True,
template=False,
source_hash_file1=None,
source_hash_file2=None,
):
files = (file1, file2)
source_hashes = (source_hash_file1, source_hash_file2)
paths = []
errors = []
for filename, source_hash in zip(files, source_hashes):
try:
# Local file paths will just return the same path back when passed
# to cp.cache_file.
cached_path = __salt__["cp.cache_file"](
filename, saltenv, source_hash=source_hash
)
if cached_path is False:
errors.append(
"File {} not found".format(
salt.utils.stringutils.to_unicode(filename)
)
)
continue
paths.append(cached_path)
except MinionError as exc:
errors.append(salt.utils.stringutils.to_unicode(exc.__str__()))
continue
if errors:
raise CommandExecutionError("Failed to cache one or more files", info=errors)
args = []
for filename in paths:
try:
with salt.utils.files.fopen(filename, "rb") as fp_:
args.append(fp_.readlines())
except OSError as exc:
raise CommandExecutionError(
"Failed to read {}: {}".format(
salt.utils.stringutils.to_unicode(filename), exc.strerror
)
)
if args[0] != args[1]:
if template and __salt__["config.option"]("obfuscate_templates"):
ret = "<Obfuscated Template>"
elif not show_changes:
ret = "<show_changes=False>"
else:
bdiff = _binary_replace(*paths) # pylint: disable=no-value-for-parameter
if bdiff:
ret = bdiff
else:
if show_filenames:
args.extend(paths)
ret = __utils__["stringutils.get_diff"](*args)
return ret
return ""
def manage_file(
name,
sfn,
ret,
source,
source_sum,
user,
group,
mode,
attrs,
saltenv,
backup,
makedirs=False,
template=None, # pylint: disable=W0613
show_changes=True,
contents=None,
dir_mode=None,
follow_symlinks=True,
skip_verify=False,
keep_mode=False,
encoding=None,
encoding_errors="strict",
seuser=None,
serole=None,
setype=None,
serange=None,
verify_ssl=True,
**kwargs
):
name = os.path.expanduser(name)
if not ret:
ret = {"name": name, "changes": {}, "comment": "", "result": True}
# Ensure that user-provided hash string is lowercase
if source_sum and ("hsum" in source_sum):
source_sum["hsum"] = source_sum["hsum"].lower()
if source:
if not sfn:
# File is not present, cache it
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
htype = source_sum.get("hash_type", __opts__["hash_type"])
# Recalculate source sum now that file has been cached
source_sum = {"hash_type": htype, "hsum": get_hash(sfn, form=htype)}
if keep_mode:
if urllib.parse.urlparse(source).scheme in ("salt", "file", ""):
try:
mode = __salt__["cp.stat_file"](source, saltenv=saltenv, octal=True)
except Exception as exc: # pylint: disable=broad-except
log.warning("Unable to stat %s: %s", sfn, exc)
# Check changes if the target file exists
if os.path.isfile(name) or os.path.islink(name):
if os.path.islink(name) and follow_symlinks:
real_name = os.path.realpath(name)
else:
real_name = name
# Only test the checksums on files with managed contents
if source and not (not follow_symlinks and os.path.islink(real_name)):
name_sum = get_hash(
real_name, source_sum.get("hash_type", __opts__["hash_type"])
)
else:
name_sum = None
# Check if file needs to be replaced
if source and (
name_sum is None
or source_sum.get("hsum", __opts__["hash_type"]) != name_sum
):
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
# If the downloaded file came from a non salt server or local
# source, and we are not skipping checksum verification, then
# verify that it matches the specified checksum.
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({}). If the 'source_hash' value "
"refers to a remote file with multiple possible "
"matches, then it may be necessary to set "
"'source_hash_name'.".format(
source_sum["hash_type"], source, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
# Print a diff equivalent to diff -u old new
if __salt__["config.option"]("obfuscate_templates"):
ret["changes"]["diff"] = "<Obfuscated Template>"
elif not show_changes:
ret["changes"]["diff"] = "<show_changes=False>"
else:
try:
ret["changes"]["diff"] = get_diff(
real_name, sfn, show_filenames=False
)
except CommandExecutionError as exc:
ret["changes"]["diff"] = exc.strerror
# Pre requisites are met, and the file needs to be replaced, do it
try:
salt.utils.files.copyfile(
sfn,
real_name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(sfn)
return _error(ret, "Failed to commit change: {}".format(io_error))
if contents is not None:
# Write the static contents to a temporary file
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
log.debug("File will be encoded with %s", encoding)
tmp_.write(
contents.encode(encoding=encoding, errors=encoding_errors)
)
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
try:
differences = get_diff(
real_name,
tmp,
show_filenames=False,
show_changes=show_changes,
template=True,
)
except CommandExecutionError as exc:
ret.setdefault("warnings", []).append(
"Failed to detect changes to file: {}".format(exc.strerror)
)
differences = ""
if differences:
ret["changes"]["diff"] = differences
# Pre requisites are met, the file needs to be replaced, do it
try:
salt.utils.files.copyfile(
tmp,
real_name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(tmp)
return _error(ret, "Failed to commit change: {}".format(io_error))
__clean_tmp(tmp)
# Check for changing symlink to regular file here
if os.path.islink(name) and not follow_symlinks:
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
# If the downloaded file came from a non salt server source verify
# that it matches the intended sum value
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({})".format(
source_sum["hash_type"], name, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
try:
salt.utils.files.copyfile(
sfn,
name,
__salt__["config.backup_mode"](backup),
__opts__["cachedir"],
)
except OSError as io_error:
__clean_tmp(sfn)
return _error(ret, "Failed to commit change: {}".format(io_error))
ret["changes"]["diff"] = "Replace symbolic link with regular file"
if salt.utils.platform.is_windows():
# This function resides in win_file.py and will be available
# on Windows. The local function will be overridden
# pylint: disable=E1120,E1121,E1123
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
# pylint: enable=E1120,E1121,E1123
else:
ret, _ = check_perms(
name,
ret,
user,
group,
mode,
attrs,
follow_symlinks,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
if ret["changes"]:
ret["comment"] = "File {} updated".format(salt.utils.data.decode(name))
elif not ret["changes"] and ret["result"]:
ret["comment"] = "File {} is in the correct state".format(
salt.utils.data.decode(name)
)
if sfn:
__clean_tmp(sfn)
return ret
else: # target file does not exist
contain_dir = os.path.dirname(name)
def _set_mode_and_make_dirs(name, dir_mode, mode, user, group):
# check for existence of windows drive letter
if salt.utils.platform.is_windows():
drive, _ = os.path.splitdrive(name)
if drive and not os.path.exists(drive):
__clean_tmp(sfn)
return _error(ret, "{} drive not present".format(drive))
if dir_mode is None and mode is not None:
# Add execute bit to each nonzero digit in the mode, if
# dir_mode was not specified. Otherwise, any
# directories created with makedirs_() below can't be
mode_list = [x for x in str(mode)][-3:]
for idx, part in enumerate(mode_list):
if part != "0":
mode_list[idx] = str(int(part) | 1)
dir_mode = "".join(mode_list)
if salt.utils.platform.is_windows():
makedirs_(
path=name,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
else:
makedirs_(name, user=user, group=group, mode=dir_mode)
if source:
if not sfn:
sfn = __salt__["cp.cache_file"](source, saltenv, verify_ssl=verify_ssl)
if not sfn:
return _error(ret, "Source file '{}' not found".format(source))
if not skip_verify and urllib.parse.urlparse(source).scheme != "salt":
dl_sum = get_hash(sfn, source_sum["hash_type"])
if dl_sum != source_sum["hsum"]:
ret["comment"] = (
"Specified {} checksum for {} ({}) does not match "
"actual checksum ({})".format(
source_sum["hash_type"], name, source_sum["hsum"], dl_sum
)
)
ret["result"] = False
return ret
ret["changes"]["diff"] = "New file"
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
ret["changes"].pop("diff", None)
return _error(ret, "Parent directory not present")
else:
if not os.path.isdir(contain_dir):
if makedirs:
_set_mode_and_make_dirs(name, dir_mode, mode, user, group)
else:
__clean_tmp(sfn)
ret["changes"].pop("diff", None)
return _error(ret, "Parent directory not present")
with salt.utils.files.set_umask(0o077 if mode else None):
if contents is None:
if not __opts__["test"]:
if touch(name):
ret["changes"]["new"] = "file {} created".format(name)
ret["comment"] = "Empty file"
else:
return _error(ret, "Empty file {} not created".format(name))
else:
if not __opts__["test"]:
if touch(name):
ret["changes"]["diff"] = "New file"
else:
return _error(ret, "File {} not created".format(name))
if contents is not None:
tmp = salt.utils.files.mkstemp(
prefix=salt.utils.files.TEMPFILE_PREFIX, text=True
)
with salt.utils.files.fopen(tmp, "wb") as tmp_:
if encoding:
if salt.utils.platform.is_windows():
contents = os.linesep.join(
_splitlines_preserving_trailing_newline(contents)
)
log.debug("File will be encoded with %s", encoding)
tmp_.write(
contents.encode(encoding=encoding, errors=encoding_errors)
)
else:
tmp_.write(salt.utils.stringutils.to_bytes(contents))
salt.utils.files.copyfile(
tmp, name, __salt__["config.backup_mode"](backup), __opts__["cachedir"]
)
__clean_tmp(tmp)
elif sfn:
salt.utils.files.copyfile(
sfn, name, __salt__["config.backup_mode"](backup), __opts__["cachedir"]
)
__clean_tmp(sfn)
if mode is None and not salt.utils.platform.is_windows():
mask = salt.utils.files.get_umask()
mode = oct((0o777 ^ mask) & 0o666)
if salt.utils.platform.is_windows():
ret = check_perms(
path=name,
ret=ret,
owner=kwargs.get("win_owner"),
grant_perms=kwargs.get("win_perms"),
deny_perms=kwargs.get("win_deny_perms"),
inheritance=kwargs.get("win_inheritance", True),
reset=kwargs.get("win_perms_reset", False),
)
else:
ret, _ = check_perms(
name,
ret,
user,
group,
mode,
attrs,
seuser=seuser,
serole=serole,
setype=setype,
serange=serange,
)
if not ret["comment"]:
ret["comment"] = "File " + name + " updated"
if __opts__["test"]:
ret["comment"] = "File " + name + " not updated"
elif not ret["changes"] and ret["result"]:
ret["comment"] = "File " + name + " is in the correct state"
if sfn:
__clean_tmp(sfn)
return ret
def mkdir(dir_path, user=None, group=None, mode=None):
dir_path = os.path.expanduser(dir_path)
directory = os.path.normpath(dir_path)
if not os.path.isdir(directory):
makedirs_perms(directory, user, group, mode)
return True
def makedirs_(path, user=None, group=None, mode=None):
path = os.path.expanduser(path)
if mode:
mode = salt.utils.files.normalize_mode(mode)
dirname = os.path.normpath(os.path.dirname(path))
if os.path.isdir(dirname):
msg = "Directory '{}' already exists".format(dirname)
log.debug(msg)
return msg
if os.path.exists(dirname):
msg = "The path '{}' already exists and is not a directory".format(dirname)
log.debug(msg)
return msg
directories_to_create = []
while True:
if os.path.isdir(dirname):
break
directories_to_create.append(dirname)
current_dirname = dirname
dirname = os.path.dirname(dirname)
if current_dirname == dirname:
raise SaltInvocationError(
"Recursive creation for path '{}' would result in an "
"infinite loop. Please use an absolute path.".format(dirname)
)
# create parent directories from the topmost to the most deeply nested one
directories_to_create.reverse()
for directory_to_create in directories_to_create:
# all directories have the user, group and mode set!!
log.debug("Creating directory: %s", directory_to_create)
mkdir(directory_to_create, user=user, group=group, mode=mode)
def makedirs_perms(name, user=None, group=None, mode="0755"):
name = os.path.expanduser(name)
path = os.path
head, tail = path.split(name)
if not tail:
head, tail = path.split(head)
if head and tail and not path.exists(head):
try:
makedirs_perms(head, user, group, mode)
except OSError as exc:
# be happy if someone already created the path
if exc.errno != errno.EEXIST:
raise
if tail == os.curdir: # xxx/newdir/. exists if xxx/newdir exists
return
os.mkdir(name)
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
def get_devmm(name):
name = os.path.expanduser(name)
if is_chrdev(name) or is_blkdev(name):
stat_structure = os.stat(name)
return (os.major(stat_structure.st_rdev), os.minor(stat_structure.st_rdev))
else:
return (0, 0)
def is_chrdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the character device does not exist in the first place
return False
else:
raise
return stat.S_ISCHR(stat_structure.st_mode)
def mknod_chrdev(name, major, minor, user=None, group=None, mode="0660"):
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug(
"Creating character device name:%s major:%s minor:%s mode:%s",
name,
major,
minor,
mode,
)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Character device {} created.".format(name)}
ret["result"] = None
else:
if (
os.mknod(
name,
int(str(mode).lstrip("0Oo"), 8) | stat.S_IFCHR,
os.makedev(major, minor),
)
is None
):
ret["changes"] = {"new": "Character device {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created character device
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def is_blkdev(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the block device does not exist in the first place
return False
else:
raise
return stat.S_ISBLK(stat_structure.st_mode)
def mknod_blkdev(name, major, minor, user=None, group=None, mode="0660"):
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug(
"Creating block device name:%s major:%s minor:%s mode:%s",
name,
major,
minor,
mode,
)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Block device {} created.".format(name)}
ret["result"] = None
else:
if (
os.mknod(
name,
int(str(mode).lstrip("0Oo"), 8) | stat.S_IFBLK,
os.makedev(major, minor),
)
is None
):
ret["changes"] = {"new": "Block device {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there....however, if you are trying to change the
# major/minor, you will need to unlink it first as os.mknod will not overwrite
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created block device
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def is_fifo(name):
name = os.path.expanduser(name)
stat_structure = None
try:
stat_structure = os.stat(name)
except OSError as exc:
if exc.errno == errno.ENOENT:
# If the fifo does not exist in the first place
return False
else:
raise
return stat.S_ISFIFO(stat_structure.st_mode)
def mknod_fifo(name, user=None, group=None, mode="0660"):
name = os.path.expanduser(name)
ret = {"name": name, "changes": {}, "comment": "", "result": False}
log.debug("Creating FIFO name: %s", name)
try:
if __opts__["test"]:
ret["changes"] = {"new": "Fifo pipe {} created.".format(name)}
ret["result"] = None
else:
if os.mkfifo(name, int(str(mode).lstrip("0Oo"), 8)) is None:
ret["changes"] = {"new": "Fifo pipe {} created.".format(name)}
ret["result"] = True
except OSError as exc:
# be happy it is already there
if exc.errno != errno.EEXIST:
raise
else:
ret["comment"] = "File {} exists and cannot be overwritten".format(name)
# quick pass at verifying the permissions of the newly created fifo
check_perms(name, None, user, group, int("{}".format(mode)) if mode else None)
return ret
def mknod(name, ntype, major=0, minor=0, user=None, group=None, mode="0600"):
ret = False
makedirs_(name, user, group)
if ntype == "c":
ret = mknod_chrdev(name, major, minor, user, group, mode)
elif ntype == "b":
ret = mknod_blkdev(name, major, minor, user, group, mode)
elif ntype == "p":
ret = mknod_fifo(name, user, group, mode)
else:
raise SaltInvocationError(
"Node type unavailable: '{}'. Available node types are "
"character ('c'), block ('b'), and pipe ('p').".format(ntype)
)
return ret
def list_backups(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error("file.list_backups: 'limit' value must be numeric")
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
src_dir = parent_dir.replace(":", "_")
else:
src_dir = parent_dir[1:]
# Figure out full path of location of backup file in minion cache
bkdir = os.path.join(bkroot, src_dir)
if not os.path.isdir(bkdir):
return {}
files = {}
for fname in [
x for x in os.listdir(bkdir) if os.path.isfile(os.path.join(bkdir, x))
]:
if salt.utils.platform.is_windows():
# ':' is an illegal filesystem path character on Windows
strpfmt = "{}_%a_%b_%d_%H-%M-%S_%f_%Y".format(basename)
else:
strpfmt = "{}_%a_%b_%d_%H:%M:%S_%f_%Y".format(basename)
try:
timestamp = datetime.datetime.strptime(fname, strpfmt)
except ValueError:
# File didn't match the strp format string, so it's not a backup
# for this file. Move on to the next one.
continue
if salt.utils.platform.is_windows():
str_format = "%a %b %d %Y %H-%M-%S.%f"
else:
str_format = "%a %b %d %Y %H:%M:%S.%f"
files.setdefault(timestamp, {})["Backup Time"] = timestamp.strftime(str_format)
location = os.path.join(bkdir, fname)
files[timestamp]["Size"] = os.stat(location).st_size
files[timestamp]["Location"] = location
return dict(
list(
zip(
list(range(len(files))),
[files[x] for x in sorted(files, reverse=True)[:limit]],
)
)
)
list_backup = salt.utils.functools.alias_function(list_backups, "list_backup")
def list_backups_dir(path, limit=None):
path = os.path.expanduser(path)
try:
limit = int(limit)
except TypeError:
pass
except ValueError:
log.error("file.list_backups_dir: 'limit' value must be numeric")
limit = None
bkroot = _get_bkroot()
parent_dir, basename = os.path.split(path)
# Figure out full path of location of backup folder in minion cache
bkdir = os.path.join(bkroot, parent_dir[1:])
if not os.path.isdir(bkdir):
return {}
files = {}
f = {
i: len(list(n))
for i, n in itertools.groupby(
[x.split("_")[0] for x in sorted(os.listdir(bkdir))]
)
}
ff = os.listdir(bkdir)
for i, n in f.items():
ssfile = {}
for x in sorted(ff):
basename = x.split("_")[0]
if i == basename:
strpfmt = "{}_%a_%b_%d_%H:%M:%S_%f_%Y".format(basename)
try:
timestamp = datetime.datetime.strptime(x, strpfmt)
except ValueError:
# Folder didn't match the strp format string, so it's not a backup
# for this folder. Move on to the next one.
continue
ssfile.setdefault(timestamp, {})["Backup Time"] = timestamp.strftime(
"%a %b %d %Y %H:%M:%S.%f"
)
location = os.path.join(bkdir, x)
ssfile[timestamp]["Size"] = os.stat(location).st_size
ssfile[timestamp]["Location"] = location
sfiles = dict(
list(
zip(
list(range(n)),
[ssfile[x] for x in sorted(ssfile, reverse=True)[:limit]],
)
)
)
sefiles = {i: sfiles}
files.update(sefiles)
return files
def restore_backup(path, backup_id):
path = os.path.expanduser(path)
# Note: This only supports minion backups, so this function will need to be
# modified if/when master backups are implemented.
ret = {"result": False, "comment": "Invalid backup_id '{}'".format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret["comment"] = "backup_id '{}' does not exist for {}".format(backup_id, path)
return ret
salt.utils.files.backup_minion(path, _get_bkroot())
try:
shutil.copyfile(backup["Location"], path)
except OSError as exc:
ret["comment"] = "Unable to restore {} to {}: {}".format(
backup["Location"], path, exc
)
return ret
else:
ret["result"] = True
ret["comment"] = "Successfully restored {} to {}".format(
backup["Location"], path
)
# Try to set proper ownership
if not salt.utils.platform.is_windows():
try:
fstat = os.stat(path)
except OSError:
ret["comment"] += ", but was unable to set ownership"
else:
os.chown(path, fstat.st_uid, fstat.st_gid)
return ret
def delete_backup(path, backup_id):
path = os.path.expanduser(path)
ret = {"result": False, "comment": "Invalid backup_id '{}'".format(backup_id)}
try:
if len(str(backup_id)) == len(str(int(backup_id))):
backup = list_backups(path)[int(backup_id)]
else:
return ret
except ValueError:
return ret
except KeyError:
ret["comment"] = "backup_id '{}' does not exist for {}".format(backup_id, path)
return ret
try:
os.remove(backup["Location"])
except OSError as exc:
ret["comment"] = "Unable to remove {}: {}".format(backup["Location"], exc)
else:
ret["result"] = True
ret["comment"] = "Successfully removed {}".format(backup["Location"])
return ret
remove_backup = salt.utils.functools.alias_function(delete_backup, "remove_backup")
def grep(path, pattern, *opts):
path = os.path.expanduser(path)
# Backup the path in case the glob returns nothing
_path = path
path = glob.glob(path)
# If the list is empty no files exist
# so we revert back to the original path
# so the result is an error.
if not path:
path = _path
split_opts = []
for opt in opts:
try:
split = salt.utils.args.shlex_split(opt)
except AttributeError:
split = salt.utils.args.shlex_split(str(opt))
if len(split) > 1:
raise SaltInvocationError(
"Passing multiple command line arguments in a single string "
"is not supported, please pass the following arguments "
"separately: {}".format(opt)
)
split_opts.extend(split)
if isinstance(path, list):
cmd = ["grep"] + split_opts + [pattern] + path
else:
cmd = ["grep"] + split_opts + [pattern, path]
try:
ret = __salt__["cmd.run_all"](cmd, python_shell=False)
except OSError as exc:
raise CommandExecutionError(exc.strerror)
return ret
def open_files(by_pid=False):
# First we collect valid PIDs
pids = {}
procfs = os.listdir("/proc/")
for pfile in procfs:
try:
pids[int(pfile)] = []
except ValueError:
# Not a valid PID, move on
pass
# Then we look at the open files for each PID
files = {}
for pid in pids:
ppath = "/proc/{}".format(pid)
try:
tids = os.listdir("{}/task".format(ppath))
except OSError:
continue
# Collect the names of all of the file descriptors
fd_ = []
# try:
# fd_.append(os.path.realpath('{0}/task/{1}exe'.format(ppath, tid)))
# except Exception: # pylint: disable=broad-except
# pass
for fpath in os.listdir("{}/fd".format(ppath)):
fd_.append("{}/fd/{}".format(ppath, fpath))
for tid in tids:
try:
fd_.append(os.path.realpath("{}/task/{}/exe".format(ppath, tid)))
except OSError:
continue
for tpath in os.listdir("{}/task/{}/fd".format(ppath, tid)):
fd_.append("{}/task/{}/fd/{}".format(ppath, tid, tpath))
fd_ = sorted(set(fd_))
# Loop through file descriptors and return useful data for each file
for fdpath in fd_:
# Sometimes PIDs and TIDs disappear before we can query them
try:
name = os.path.realpath(fdpath)
# Running stat on the file cuts out all of the sockets and
# deleted files from the list
os.stat(name)
except OSError:
continue
if name not in files:
files[name] = [pid]
else:
# We still want to know which PIDs are using each file
files[name].append(pid)
files[name] = sorted(set(files[name]))
pids[pid].append(name)
pids[pid] = sorted(set(pids[pid]))
if by_pid:
return pids
return files
def pardir():
return os.path.pardir
def normpath(path):
return os.path.normpath(path)
def basename(path):
return os.path.basename(path)
def dirname(path):
return os.path.dirname(path)
def join(*args):
return os.path.join(*args)
def move(src, dst):
src = os.path.expanduser(src)
dst = os.path.expanduser(dst)
if not os.path.isabs(src):
raise SaltInvocationError("Source path must be absolute.")
if not os.path.isabs(dst):
raise SaltInvocationError("Destination path must be absolute.")
ret = {
"result": True,
"comment": "'{}' moved to '{}'".format(src, dst),
}
try:
shutil.move(src, dst)
except OSError as exc:
raise CommandExecutionError(
"Unable to move '{}' to '{}': {}".format(src, dst, exc)
)
return ret
def diskusage(path):
total_size = 0
seen = set()
if os.path.isfile(path):
stat_structure = os.stat(path)
ret = stat_structure.st_size
return ret
for dirpath, dirnames, filenames in salt.utils.path.os_walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
try:
stat_structure = os.stat(fp)
except OSError:
continue
if stat_structure.st_ino in seen:
continue
seen.add(stat_structure.st_ino)
total_size += stat_structure.st_size
ret = total_size
return ret
| true
| true
|
1c425a40a9b397cb0dd09cd767264aa4a99f84b9
| 1,467
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 8
|
2016-08-15T07:02:27.000Z
|
2016-08-24T09:34:00.000Z
|
python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 2
|
2019-07-26T04:06:05.000Z
|
2019-07-29T04:25:24.000Z
|
python/paddle/fluid/tests/unittests/test_parallel_dygraph_sparse_embedding_diff_length_gloo.py
|
zmxdream/Paddle
|
04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c
|
[
"Apache-2.0"
] | 5
|
2021-12-10T11:20:06.000Z
|
2022-02-18T05:18:12.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import sys
import unittest
import paddle.fluid as fluid
from test_dist_base import TestDistBase
from spawn_runner_base import TestDistSpawnRunner
from parallel_dygraph_sparse_embedding import TestSparseEmbedding
from parallel_dygraph_sparse_embedding_fp64 import TestSparseEmbeddingFP64
flag_name = os.path.splitext(__file__)[0]
class TestParallelDygraphSparseEmdedding_GLOO(TestDistBase):
def _setup_config(self):
self._sync_mode = False
self._gloo_mode = True
self._dygraph = True
self._diff_batch = True
def test_sparse_embedding(self):
self.check_with_place(
"parallel_dygraph_sparse_embedding.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name)
if __name__ == "__main__":
unittest.main()
| 31.212766
| 74
| 0.754601
|
from __future__ import print_function
import os
import sys
import unittest
import paddle.fluid as fluid
from test_dist_base import TestDistBase
from spawn_runner_base import TestDistSpawnRunner
from parallel_dygraph_sparse_embedding import TestSparseEmbedding
from parallel_dygraph_sparse_embedding_fp64 import TestSparseEmbeddingFP64
flag_name = os.path.splitext(__file__)[0]
class TestParallelDygraphSparseEmdedding_GLOO(TestDistBase):
def _setup_config(self):
self._sync_mode = False
self._gloo_mode = True
self._dygraph = True
self._diff_batch = True
def test_sparse_embedding(self):
self.check_with_place(
"parallel_dygraph_sparse_embedding.py",
delta=1e-5,
check_error_log=True,
log_name=flag_name)
if __name__ == "__main__":
unittest.main()
| true
| true
|
1c425c1bd579d9f50c7d9c87a59f22ab4798c088
| 21,137
|
py
|
Python
|
vispy/visuals/image.py
|
asnt/vispy
|
e515b00de7086527070b3e51e1133f8c1c5ca165
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/visuals/image.py
|
asnt/vispy
|
e515b00de7086527070b3e51e1133f8c1c5ca165
|
[
"BSD-3-Clause"
] | null | null | null |
vispy/visuals/image.py
|
asnt/vispy
|
e515b00de7086527070b3e51e1133f8c1c5ca165
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
"""Primitive 2D image visual class."""
from __future__ import division
import numpy as np
from ..gloo import Texture2D, VertexBuffer
from ..gloo.texture import should_cast_to_f32
from ..color import get_colormap
from .shaders import Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..io import load_spatial_filters
from ._scalable_textures import CPUScaledTexture2D, GPUScaledTexture2D
VERT_SHADER = """
uniform int method; // 0=subdivide, 1=impostor
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform vec2 image_size;
uniform int method; // 0=subdivide, 1=impostor
uniform sampler2D u_texture;
varying vec2 v_texcoord;
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / image_size, 0, 1);
}
void main()
{
vec2 texcoord;
if( method == 0 ) {
texcoord = v_texcoord;
}
else {
// vertex shader outputs clip coordinates;
// fragment shader maps to texture coordinates
texcoord = map_local_to_tex(vec4(v_texcoord, 0, 1)).xy;
}
gl_FragColor = $color_transform($get_data(texcoord));
}
""" # noqa
_interpolation_template = """
#include "misc/spatial-filters.frag"
vec4 texture_lookup_filtered(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return %s($texture, $shape, texcoord);
}"""
_texture_lookup = """
vec4 texture_lookup(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return texture2D($texture, texcoord);
}"""
_apply_clim_float = """
float apply_clim(float data) {
data = clamp(data, min($clim.x, $clim.y), max($clim.x, $clim.y));
data = (data - $clim.x) / ($clim.y - $clim.x);
return data;
}"""
_apply_clim = """
vec4 apply_clim(vec4 color) {
if ($clim.x < $clim.y) {{
color.rgb = clamp(color.rgb, $clim.x, $clim.y);
}} else if ($clim.x > $clim.y) {{
color.rgb = clamp(color.rgb, $clim.y, $clim.x);
}} else {{
// clims are the same, show minimum colormap value
return vec4(0.0, 0.0, 0.0, 1.0);
}}
color.rgb = color.rgb - $clim.x;
color.rgb = color.rgb / ($clim.y - $clim.x);
return max(color, 0);
}
"""
_apply_gamma_float = """
float apply_gamma(float data) {
return pow(data, $gamma);
}"""
_apply_gamma = """
vec4 apply_gamma(vec4 color) {
color.rgb = pow(color.rgb, vec3($gamma));
return color;
}
"""
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l_red = 'float cmap(vec4 color) { return color.r; }'
class ImageVisual(Visual):
"""Visual subclass displaying an image.
Parameters
----------
data : ndarray
ImageVisual data. Can be shape (M, N), (M, N, 3), or (M, N, 4).
method : str
Selects method of rendering image in case of non-linear transforms.
Each method produces similar results, but may trade efficiency
and accuracy. If the transform is linear, this parameter is ignored
and a single quad is drawn around the area of the image.
* 'auto': Automatically select 'impostor' if the image is drawn
with a nonlinear transform; otherwise select 'subdivide'.
* 'subdivide': ImageVisual is represented as a grid of triangles
with texture coordinates linearly mapped.
* 'impostor': ImageVisual is represented as a quad covering the
entire view, with texture coordinates determined by the
transform. This produces the best transformation results, but may
be slow.
grid: tuple (rows, cols)
If method='subdivide', this tuple determines the number of rows and
columns in the image grid.
cmap : str | ColorMap
Colormap to use for luminance images.
clim : str | tuple
Limits to use for the colormap. I.e. the values that map to black and white
in a gray colormap. Can be 'auto' to auto-set bounds to
the min and max of the data. If not given or None, 'auto' is used.
gamma : float
Gamma to use during colormap lookup. Final color will be cmap(val**gamma).
by default: 1.
interpolation : str
Selects method of image interpolation. Makes use of the two Texture2D
interpolation methods and the available interpolation methods defined
in vispy/gloo/glsl/misc/spatial_filters.frag
* 'nearest': Default, uses 'nearest' with Texture2D interpolation.
* 'bilinear': uses 'linear' with Texture2D interpolation.
* 'hanning', 'hamming', 'hermite', 'kaiser', 'quadric', 'bicubic',
'catrom', 'mitchell', 'spline16', 'spline36', 'gaussian',
'bessel', 'sinc', 'lanczos', 'blackman'
texture_format: numpy.dtype | str | None
How to store data on the GPU. OpenGL allows for many different storage
formats and schemes for the low-level texture data stored in the GPU.
Most common is unsigned integers or floating point numbers.
Unsigned integers are the most widely supported while other formats
may not be supported on older versions of OpenGL, WebGL
(without enabling some extensions), or with older GPUs.
Default value is ``None`` which means data will be scaled on the
CPU and the result stored in the GPU as an unsigned integer. If a
numpy dtype object, an internal texture format will be chosen to
support that dtype and data will *not* be scaled on the CPU. Not all
dtypes are supported. If a string, then
it must be one of the OpenGL internalformat strings described in the
table on this page: https://www.khronos.org/registry/OpenGL-Refpages/gl4/html/glTexImage2D.xhtml
The name should have `GL_` removed and be lowercase (ex.
`GL_R32F` becomes ``'r32f'``). Lastly, this can also be the string
``'auto'`` which will use the data type of the provided image data
to determine the internalformat of the texture.
When this is specified (not ``None``) data is scaled on the
GPU which allows for faster color limit changes. Additionally, when
32-bit float data is provided it won't be copied before being
transferred to the GPU.
**kwargs : dict
Keyword arguments to pass to `Visual`.
Notes
-----
The colormap functionality through ``cmap`` and ``clim`` are only used
if the data are 2D.
"""
VERTEX_SHADER = VERT_SHADER
FRAGMENT_SHADER = FRAG_SHADER
def __init__(self, data=None, method='auto', grid=(1, 1),
cmap='viridis', clim='auto', gamma=1.0,
interpolation='nearest', texture_format=None, **kwargs):
"""Initialize image properties, texture storage, and interpolation methods."""
self._data = None
self._gamma = gamma
# load 'float packed rgba8' interpolation kernel
# to load float interpolation kernel use
# `load_spatial_filters(packed=False)`
kernel, interpolation_names = load_spatial_filters()
self._kerneltex = Texture2D(kernel, interpolation='nearest')
# The unpacking can be debugged by changing "spatial-filters.frag"
# to have the "unpack" function just return the .r component. That
# combined with using the below as the _kerneltex allows debugging
# of the pipeline
# self._kerneltex = Texture2D(kernel, interpolation='linear',
# internalformat='r32f')
interpolation_names, interpolation_fun = self._init_interpolation(
interpolation_names)
self._interpolation_names = interpolation_names
self._interpolation_fun = interpolation_fun
self._interpolation = interpolation
if self._interpolation not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
# check texture interpolation
if self._interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
texture_interpolation = 'nearest'
self._method = method
self._grid = grid
self._need_texture_upload = True
self._need_vertex_update = True
self._need_colortransform_update = True
self._need_interpolation_update = True
if texture_format is None:
self._texture = CPUScaledTexture2D(
data, interpolation=texture_interpolation)
else:
self._texture = GPUScaledTexture2D(
data, internalformat=texture_format,
interpolation=texture_interpolation)
self._subdiv_position = VertexBuffer()
self._subdiv_texcoord = VertexBuffer()
# impostor quad covers entire viewport
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
self._impostor_coords = VertexBuffer(vertices)
self._null_tr = NullTransform()
self._init_view(self)
super(ImageVisual, self).__init__(vcode=self.VERTEX_SHADER, fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent', cull_face=False)
self._draw_mode = 'triangles'
# define _data_lookup_fn as None, will be setup in
# self._build_interpolation()
self._data_lookup_fn = None
self.clim = clim or "auto" # None -> "auto"
self.cmap = cmap
if data is not None:
self.set_data(data)
self.freeze()
@staticmethod
def _init_interpolation(interpolation_names):
# create interpolation shader functions for available
# interpolations
fun = [Function(_interpolation_template % n)
for n in interpolation_names]
interpolation_names = [n.lower() for n in interpolation_names]
interpolation_fun = dict(zip(interpolation_names, fun))
interpolation_names = tuple(sorted(interpolation_names))
# overwrite "nearest" and "bilinear" spatial-filters
# with "hardware" interpolation _data_lookup_fn
interpolation_fun['nearest'] = Function(_texture_lookup)
interpolation_fun['bilinear'] = Function(_texture_lookup)
return interpolation_names, interpolation_fun
def set_data(self, image):
"""Set the image data.
Parameters
----------
image : array-like
The image data.
texture_format : str or None
"""
data = np.asarray(image)
if should_cast_to_f32(data.dtype):
data = data.astype(np.float32)
# can the texture handle this data?
self._texture.check_data_format(data)
if self._data is None or self._data.shape[:2] != data.shape[:2]:
# Only rebuild if the size of the image changed
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
def view(self):
"""Get the :class:`vispy.visuals.visual.VisualView` for this visual."""
v = Visual.view(self)
self._init_view(v)
return v
def _init_view(self, view):
# Store some extra variables per-view
view._need_method_update = True
view._method_used = None
@property
def clim(self):
"""Get color limits used when rendering the image (cmin, cmax)."""
return self._texture.clim
@clim.setter
def clim(self, clim):
if self._texture.set_clim(clim):
self._need_texture_upload = True
# shortcut so we don't have to rebuild the whole color transform
if not self._need_colortransform_update:
self.shared_program.frag['color_transform'][1]['clim'] = self._texture.clim_normalized
self.update()
@property
def cmap(self):
"""Get the colormap object applied to luminance (single band) data."""
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._need_colortransform_update = True
self.update()
@property
def gamma(self):
"""Get the gamma used when rendering the image."""
return self._gamma
@gamma.setter
def gamma(self, value):
"""Set gamma used when rendering the image."""
if value <= 0:
raise ValueError("gamma must be > 0")
self._gamma = float(value)
# shortcut so we don't have to rebuild the color transform
if not self._need_colortransform_update:
self.shared_program.frag['color_transform'][2]['gamma'] = self._gamma
self.update()
@property
def method(self):
"""Get rendering method name."""
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
"""Get size of the image (width, height)."""
return self._data.shape[:2][::-1]
@property
def interpolation(self):
"""Get interpolation algorithm name."""
return self._interpolation
@interpolation.setter
def interpolation(self, i):
if i not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
if self._interpolation != i:
self._interpolation = i
self._need_interpolation_update = True
self.update()
@property
def interpolation_functions(self):
"""Get names of possible interpolation methods."""
return self._interpolation_names
# The interpolation code could be transferred to a dedicated filter
# function in visuals/filters as discussed in #1051
def _build_interpolation(self):
"""Rebuild the _data_lookup_fn for different interpolations."""
interpolation = self._interpolation
self._data_lookup_fn = self._interpolation_fun[interpolation]
self.shared_program.frag['get_data'] = self._data_lookup_fn
# only 'bilinear' uses 'linear' texture interpolation
if interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
# 'nearest' (and also 'bilinear') doesn't use spatial_filters.frag
# so u_kernel and shape setting is skipped
texture_interpolation = 'nearest'
if interpolation != 'nearest':
self.shared_program['u_kernel'] = self._kerneltex
self._data_lookup_fn['shape'] = self._data.shape[:2][::-1]
if self._texture.interpolation != texture_interpolation:
self._texture.interpolation = texture_interpolation
self._data_lookup_fn['texture'] = self._texture
self._need_interpolation_update = False
def _build_vertex_data(self):
"""Rebuild the vertex buffers for the subdivide method."""
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
self._need_vertex_update = False
def _update_method(self, view):
"""Decide which method to use for *view* and configure it accordingly."""
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
def _build_texture(self):
pre_clims = self._texture.clim
pre_internalformat = self._texture.internalformat
self._texture.scale_and_set_data(self._data)
post_clims = self._texture.clim
post_internalformat = self._texture.internalformat
# color transform needs rebuilding if the internalformat was changed
# new color limits need to be assigned if the normalized clims changed
# otherwise, the original color transform should be fine
# Note that this assumes that if clim changed, clim_normalized changed
new_if = post_internalformat != pre_internalformat
new_cl = post_clims != pre_clims
if not new_if and new_cl and not self._need_colortransform_update:
# shortcut so we don't have to rebuild the whole color transform
self.shared_program.frag['color_transform'][1]['clim'] = self._texture.clim_normalized
elif new_if:
self._need_colortransform_update = True
self._need_texture_upload = False
def _compute_bounds(self, axis, view):
if axis > 1:
return 0, 0
else:
return 0, self.size[axis]
def _build_color_transform(self):
if self._data.ndim == 2 or self._data.shape[2] == 1:
# luminance data
fclim = Function(_apply_clim_float)
fgamma = Function(_apply_gamma_float)
# NOTE: _c2l_red only uses the red component, fancy internalformats
# may need to use the other components or a different function chain
fun = FunctionChain(
None, [Function(_c2l_red), fclim, fgamma, Function(self.cmap.glsl_map)]
)
else:
# RGB/A image data (no colormap)
fclim = Function(_apply_clim)
fgamma = Function(_apply_gamma)
fun = FunctionChain(None, [Function(_null_color_transform), fclim, fgamma])
fclim['clim'] = self._texture.clim_normalized
fgamma['gamma'] = self.gamma
return fun
def _prepare_transforms(self, view):
trs = view.transforms
prg = view.view_program
method = view._method_used
if method == 'subdivide':
prg.vert['transform'] = trs.get_transform()
prg.frag['transform'] = self._null_tr
else:
prg.vert['transform'] = self._null_tr
prg.frag['transform'] = trs.get_transform().inverse
def _prepare_draw(self, view):
if self._data is None:
return False
if self._need_interpolation_update:
self._build_interpolation()
if self._need_texture_upload:
self._build_texture()
if self._need_colortransform_update:
prg = view.view_program
self.shared_program.frag['color_transform'] = self._build_color_transform()
self._need_colortransform_update = False
prg['texture2D_LUT'] = self.cmap.texture_lut() \
if (hasattr(self.cmap, 'texture_lut')) else None
if self._need_vertex_update:
self._build_vertex_data()
if view._need_method_update:
self._update_method(view)
| 37.812165
| 104
| 0.623646
|
from __future__ import division
import numpy as np
from ..gloo import Texture2D, VertexBuffer
from ..gloo.texture import should_cast_to_f32
from ..color import get_colormap
from .shaders import Function, FunctionChain
from .transforms import NullTransform
from .visual import Visual
from ..io import load_spatial_filters
from ._scalable_textures import CPUScaledTexture2D, GPUScaledTexture2D
VERT_SHADER = """
uniform int method; // 0=subdivide, 1=impostor
attribute vec2 a_position;
attribute vec2 a_texcoord;
varying vec2 v_texcoord;
void main() {
v_texcoord = a_texcoord;
gl_Position = $transform(vec4(a_position, 0., 1.));
}
"""
FRAG_SHADER = """
uniform vec2 image_size;
uniform int method; // 0=subdivide, 1=impostor
uniform sampler2D u_texture;
varying vec2 v_texcoord;
vec4 map_local_to_tex(vec4 x) {
// Cast ray from 3D viewport to surface of image
// (if $transform does not affect z values, then this
// can be optimized as simply $transform.map(x) )
vec4 p1 = $transform(x);
vec4 p2 = $transform(x + vec4(0, 0, 0.5, 0));
p1 /= p1.w;
p2 /= p2.w;
vec4 d = p2 - p1;
float f = p2.z / d.z;
vec4 p3 = p2 - d * f;
// finally map local to texture coords
return vec4(p3.xy / image_size, 0, 1);
}
void main()
{
vec2 texcoord;
if( method == 0 ) {
texcoord = v_texcoord;
}
else {
// vertex shader outputs clip coordinates;
// fragment shader maps to texture coordinates
texcoord = map_local_to_tex(vec4(v_texcoord, 0, 1)).xy;
}
gl_FragColor = $color_transform($get_data(texcoord));
}
"""
_interpolation_template = """
#include "misc/spatial-filters.frag"
vec4 texture_lookup_filtered(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return %s($texture, $shape, texcoord);
}"""
_texture_lookup = """
vec4 texture_lookup(vec2 texcoord) {
if(texcoord.x < 0.0 || texcoord.x > 1.0 ||
texcoord.y < 0.0 || texcoord.y > 1.0) {
discard;
}
return texture2D($texture, texcoord);
}"""
_apply_clim_float = """
float apply_clim(float data) {
data = clamp(data, min($clim.x, $clim.y), max($clim.x, $clim.y));
data = (data - $clim.x) / ($clim.y - $clim.x);
return data;
}"""
_apply_clim = """
vec4 apply_clim(vec4 color) {
if ($clim.x < $clim.y) {{
color.rgb = clamp(color.rgb, $clim.x, $clim.y);
}} else if ($clim.x > $clim.y) {{
color.rgb = clamp(color.rgb, $clim.y, $clim.x);
}} else {{
// clims are the same, show minimum colormap value
return vec4(0.0, 0.0, 0.0, 1.0);
}}
color.rgb = color.rgb - $clim.x;
color.rgb = color.rgb / ($clim.y - $clim.x);
return max(color, 0);
}
"""
_apply_gamma_float = """
float apply_gamma(float data) {
return pow(data, $gamma);
}"""
_apply_gamma = """
vec4 apply_gamma(vec4 color) {
color.rgb = pow(color.rgb, vec3($gamma));
return color;
}
"""
_null_color_transform = 'vec4 pass(vec4 color) { return color; }'
_c2l_red = 'float cmap(vec4 color) { return color.r; }'
class ImageVisual(Visual):
VERTEX_SHADER = VERT_SHADER
FRAGMENT_SHADER = FRAG_SHADER
def __init__(self, data=None, method='auto', grid=(1, 1),
cmap='viridis', clim='auto', gamma=1.0,
interpolation='nearest', texture_format=None, **kwargs):
self._data = None
self._gamma = gamma
kernel, interpolation_names = load_spatial_filters()
self._kerneltex = Texture2D(kernel, interpolation='nearest')
interpolation_names, interpolation_fun = self._init_interpolation(
interpolation_names)
self._interpolation_names = interpolation_names
self._interpolation_fun = interpolation_fun
self._interpolation = interpolation
if self._interpolation not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
if self._interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
texture_interpolation = 'nearest'
self._method = method
self._grid = grid
self._need_texture_upload = True
self._need_vertex_update = True
self._need_colortransform_update = True
self._need_interpolation_update = True
if texture_format is None:
self._texture = CPUScaledTexture2D(
data, interpolation=texture_interpolation)
else:
self._texture = GPUScaledTexture2D(
data, internalformat=texture_format,
interpolation=texture_interpolation)
self._subdiv_position = VertexBuffer()
self._subdiv_texcoord = VertexBuffer()
vertices = np.array([[-1, -1], [1, -1], [1, 1],
[-1, -1], [1, 1], [-1, 1]],
dtype=np.float32)
self._impostor_coords = VertexBuffer(vertices)
self._null_tr = NullTransform()
self._init_view(self)
super(ImageVisual, self).__init__(vcode=self.VERTEX_SHADER, fcode=self.FRAGMENT_SHADER)
self.set_gl_state('translucent', cull_face=False)
self._draw_mode = 'triangles'
self._data_lookup_fn = None
self.clim = clim or "auto"
self.cmap = cmap
if data is not None:
self.set_data(data)
self.freeze()
@staticmethod
def _init_interpolation(interpolation_names):
fun = [Function(_interpolation_template % n)
for n in interpolation_names]
interpolation_names = [n.lower() for n in interpolation_names]
interpolation_fun = dict(zip(interpolation_names, fun))
interpolation_names = tuple(sorted(interpolation_names))
interpolation_fun['nearest'] = Function(_texture_lookup)
interpolation_fun['bilinear'] = Function(_texture_lookup)
return interpolation_names, interpolation_fun
def set_data(self, image):
data = np.asarray(image)
if should_cast_to_f32(data.dtype):
data = data.astype(np.float32)
self._texture.check_data_format(data)
if self._data is None or self._data.shape[:2] != data.shape[:2]:
self._need_vertex_update = True
self._data = data
self._need_texture_upload = True
def view(self):
v = Visual.view(self)
self._init_view(v)
return v
def _init_view(self, view):
view._need_method_update = True
view._method_used = None
@property
def clim(self):
return self._texture.clim
@clim.setter
def clim(self, clim):
if self._texture.set_clim(clim):
self._need_texture_upload = True
if not self._need_colortransform_update:
self.shared_program.frag['color_transform'][1]['clim'] = self._texture.clim_normalized
self.update()
@property
def cmap(self):
return self._cmap
@cmap.setter
def cmap(self, cmap):
self._cmap = get_colormap(cmap)
self._need_colortransform_update = True
self.update()
@property
def gamma(self):
return self._gamma
@gamma.setter
def gamma(self, value):
if value <= 0:
raise ValueError("gamma must be > 0")
self._gamma = float(value)
# shortcut so we don't have to rebuild the color transform
if not self._need_colortransform_update:
self.shared_program.frag['color_transform'][2]['gamma'] = self._gamma
self.update()
@property
def method(self):
return self._method
@method.setter
def method(self, m):
if self._method != m:
self._method = m
self._need_vertex_update = True
self.update()
@property
def size(self):
return self._data.shape[:2][::-1]
@property
def interpolation(self):
return self._interpolation
@interpolation.setter
def interpolation(self, i):
if i not in self._interpolation_names:
raise ValueError("interpolation must be one of %s" %
', '.join(self._interpolation_names))
if self._interpolation != i:
self._interpolation = i
self._need_interpolation_update = True
self.update()
@property
def interpolation_functions(self):
return self._interpolation_names
def _build_interpolation(self):
interpolation = self._interpolation
self._data_lookup_fn = self._interpolation_fun[interpolation]
self.shared_program.frag['get_data'] = self._data_lookup_fn
if interpolation == 'bilinear':
texture_interpolation = 'linear'
else:
# so u_kernel and shape setting is skipped
texture_interpolation = 'nearest'
if interpolation != 'nearest':
self.shared_program['u_kernel'] = self._kerneltex
self._data_lookup_fn['shape'] = self._data.shape[:2][::-1]
if self._texture.interpolation != texture_interpolation:
self._texture.interpolation = texture_interpolation
self._data_lookup_fn['texture'] = self._texture
self._need_interpolation_update = False
def _build_vertex_data(self):
grid = self._grid
w = 1.0 / grid[1]
h = 1.0 / grid[0]
quad = np.array([[0, 0, 0], [w, 0, 0], [w, h, 0],
[0, 0, 0], [w, h, 0], [0, h, 0]],
dtype=np.float32)
quads = np.empty((grid[1], grid[0], 6, 3), dtype=np.float32)
quads[:] = quad
mgrid = np.mgrid[0.:grid[1], 0.:grid[0]].transpose(1, 2, 0)
mgrid = mgrid[:, :, np.newaxis, :]
mgrid[..., 0] *= w
mgrid[..., 1] *= h
quads[..., :2] += mgrid
tex_coords = quads.reshape(grid[1]*grid[0]*6, 3)
tex_coords = np.ascontiguousarray(tex_coords[:, :2])
vertices = tex_coords * self.size
self._subdiv_position.set_data(vertices.astype('float32'))
self._subdiv_texcoord.set_data(tex_coords.astype('float32'))
self._need_vertex_update = False
def _update_method(self, view):
method = self._method
if method == 'auto':
if view.transforms.get_transform().Linear:
method = 'subdivide'
else:
method = 'impostor'
view._method_used = method
if method == 'subdivide':
view.view_program['method'] = 0
view.view_program['a_position'] = self._subdiv_position
view.view_program['a_texcoord'] = self._subdiv_texcoord
elif method == 'impostor':
view.view_program['method'] = 1
view.view_program['a_position'] = self._impostor_coords
view.view_program['a_texcoord'] = self._impostor_coords
else:
raise ValueError("Unknown image draw method '%s'" % method)
self.shared_program['image_size'] = self.size
view._need_method_update = False
self._prepare_transforms(view)
def _build_texture(self):
pre_clims = self._texture.clim
pre_internalformat = self._texture.internalformat
self._texture.scale_and_set_data(self._data)
post_clims = self._texture.clim
post_internalformat = self._texture.internalformat
# color transform needs rebuilding if the internalformat was changed
# new color limits need to be assigned if the normalized clims changed
# otherwise, the original color transform should be fine
# Note that this assumes that if clim changed, clim_normalized changed
new_if = post_internalformat != pre_internalformat
new_cl = post_clims != pre_clims
if not new_if and new_cl and not self._need_colortransform_update:
# shortcut so we don't have to rebuild the whole color transform
self.shared_program.frag['color_transform'][1]['clim'] = self._texture.clim_normalized
elif new_if:
self._need_colortransform_update = True
self._need_texture_upload = False
def _compute_bounds(self, axis, view):
if axis > 1:
return 0, 0
else:
return 0, self.size[axis]
def _build_color_transform(self):
if self._data.ndim == 2 or self._data.shape[2] == 1:
fclim = Function(_apply_clim_float)
fgamma = Function(_apply_gamma_float)
fun = FunctionChain(
None, [Function(_c2l_red), fclim, fgamma, Function(self.cmap.glsl_map)]
)
else:
fclim = Function(_apply_clim)
fgamma = Function(_apply_gamma)
fun = FunctionChain(None, [Function(_null_color_transform), fclim, fgamma])
fclim['clim'] = self._texture.clim_normalized
fgamma['gamma'] = self.gamma
return fun
def _prepare_transforms(self, view):
trs = view.transforms
prg = view.view_program
method = view._method_used
if method == 'subdivide':
prg.vert['transform'] = trs.get_transform()
prg.frag['transform'] = self._null_tr
else:
prg.vert['transform'] = self._null_tr
prg.frag['transform'] = trs.get_transform().inverse
def _prepare_draw(self, view):
if self._data is None:
return False
if self._need_interpolation_update:
self._build_interpolation()
if self._need_texture_upload:
self._build_texture()
if self._need_colortransform_update:
prg = view.view_program
self.shared_program.frag['color_transform'] = self._build_color_transform()
self._need_colortransform_update = False
prg['texture2D_LUT'] = self.cmap.texture_lut() \
if (hasattr(self.cmap, 'texture_lut')) else None
if self._need_vertex_update:
self._build_vertex_data()
if view._need_method_update:
self._update_method(view)
| true
| true
|
1c425c3ae9103d0ee23378f3a819544555a2ecab
| 415
|
py
|
Python
|
orders/migrations/0002_alter_order_shipping_total.py
|
IvanLezcano/TrabajoPracticoFinalPoloticDjango
|
1a866e18c97e8836e96006a42be298ce201b052c
|
[
"CC0-1.0"
] | 1
|
2021-06-06T20:27:51.000Z
|
2021-06-06T20:27:51.000Z
|
orders/migrations/0002_alter_order_shipping_total.py
|
IvanLezcano/TrabajoPracticoFinalPoloticDjango
|
1a866e18c97e8836e96006a42be298ce201b052c
|
[
"CC0-1.0"
] | null | null | null |
orders/migrations/0002_alter_order_shipping_total.py
|
IvanLezcano/TrabajoPracticoFinalPoloticDjango
|
1a866e18c97e8836e96006a42be298ce201b052c
|
[
"CC0-1.0"
] | 1
|
2021-07-06T01:42:45.000Z
|
2021-07-06T01:42:45.000Z
|
# Generated by Django 3.2.4 on 2021-06-11 16:22
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='shipping_total',
field=models.DecimalField(decimal_places=2, default=500, max_digits=16),
),
]
| 21.842105
| 84
| 0.612048
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('orders', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='order',
name='shipping_total',
field=models.DecimalField(decimal_places=2, default=500, max_digits=16),
),
]
| true
| true
|
1c425c62bcad3f53bf5d215a797467396549f96b
| 2,070
|
py
|
Python
|
cryptocurrency/pump_and_dump/spike_checking/spike_checker.py
|
yzgastk/machine-learning-experiments
|
63bf3af40f0273cd5b782deb7f5718c575ba5adf
|
[
"Apache-2.0"
] | null | null | null |
cryptocurrency/pump_and_dump/spike_checking/spike_checker.py
|
yzgastk/machine-learning-experiments
|
63bf3af40f0273cd5b782deb7f5718c575ba5adf
|
[
"Apache-2.0"
] | null | null | null |
cryptocurrency/pump_and_dump/spike_checking/spike_checker.py
|
yzgastk/machine-learning-experiments
|
63bf3af40f0273cd5b782deb7f5718c575ba5adf
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3.8
# -*- coding: utf-8 -*-
import sys
import pickle as rick
import pandas as pd
import sqlite3 as s3
from numpy import mean
from numpy import std
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
if __name__ == '__main__':
conn_data = s3.connect("./data/spike_data.sqlite")
file_model = open("./data/model_spike_temp.pkl", "rb")
model = rick.load(file_model)
symbol = sys.argv[1]
split_amount = 100000
feature_input = pd.read_sql("SELECT * FROM aggregated_data WHERE symbol = '"+symbol+"' ORDER BY close_time ASC LIMIT "+str(split_amount)+";", conn_data)
columns = ["open", "high", "low", "close", "volume", "close_time", "quote_asset_volume", "trade_number", "tb_base_av",
"tb_quote_av", "label", "listing_date", "existence_time", "ret1", "amplitude_intra", "pump_count", "btc_price",
"market_cap", "volf1", "volbtc1", "ret3", "volf3", "volbtc3", "ret12", "volf12", "volbtc12", "ret24", "volf24",
"volbtc24", "ret36", "volf36", "volbtc36", "ret48", "volf48", "volbtc48", "ret60", "volf60", "volbtc60", "ret72",
"volf72", "volbtc72", "vola3", "volavol3", "rtvol3", "vola12", "volavol12", "rtvol12", "vola24", "volavol24",
"rtvol24", "vola36", "volavol36", "rtvol36", "vola48", "volavol48", "rtvol48", "vola60", "volavol60", "rtvol60",
"vola72", "volavol72", "rtvol72", "last_open_price", "symbol"]
feature_input.columns = columns
cols_to_drop = ["open","high","low", "listing_date", "close","volume","close_time", "tb_base_av", "tb_quote_av", "symbol"]
feature_input = feature_input.drop(cols_to_drop, axis=1)
print(feature_input[feature_input.label == 1].shape)
x_test = feature_input.dropna()
y_test = x_test.label
x_test.drop("label", inplace=True, axis=1)
predictions = model.predict(x_test)
conf_mat = confusion_matrix(y_test.to_list(), predictions)
print(conf_mat)
| 46
| 156
| 0.665217
|
import sys
import pickle as rick
import pandas as pd
import sqlite3 as s3
from numpy import mean
from numpy import std
from sklearn.model_selection import RepeatedStratifiedKFold, cross_val_score
from sklearn.metrics import roc_auc_score
from sklearn.metrics import confusion_matrix
if __name__ == '__main__':
conn_data = s3.connect("./data/spike_data.sqlite")
file_model = open("./data/model_spike_temp.pkl", "rb")
model = rick.load(file_model)
symbol = sys.argv[1]
split_amount = 100000
feature_input = pd.read_sql("SELECT * FROM aggregated_data WHERE symbol = '"+symbol+"' ORDER BY close_time ASC LIMIT "+str(split_amount)+";", conn_data)
columns = ["open", "high", "low", "close", "volume", "close_time", "quote_asset_volume", "trade_number", "tb_base_av",
"tb_quote_av", "label", "listing_date", "existence_time", "ret1", "amplitude_intra", "pump_count", "btc_price",
"market_cap", "volf1", "volbtc1", "ret3", "volf3", "volbtc3", "ret12", "volf12", "volbtc12", "ret24", "volf24",
"volbtc24", "ret36", "volf36", "volbtc36", "ret48", "volf48", "volbtc48", "ret60", "volf60", "volbtc60", "ret72",
"volf72", "volbtc72", "vola3", "volavol3", "rtvol3", "vola12", "volavol12", "rtvol12", "vola24", "volavol24",
"rtvol24", "vola36", "volavol36", "rtvol36", "vola48", "volavol48", "rtvol48", "vola60", "volavol60", "rtvol60",
"vola72", "volavol72", "rtvol72", "last_open_price", "symbol"]
feature_input.columns = columns
cols_to_drop = ["open","high","low", "listing_date", "close","volume","close_time", "tb_base_av", "tb_quote_av", "symbol"]
feature_input = feature_input.drop(cols_to_drop, axis=1)
print(feature_input[feature_input.label == 1].shape)
x_test = feature_input.dropna()
y_test = x_test.label
x_test.drop("label", inplace=True, axis=1)
predictions = model.predict(x_test)
conf_mat = confusion_matrix(y_test.to_list(), predictions)
print(conf_mat)
| true
| true
|
1c425ccfcb659d3ab8f0cabfca4c81485097e1f3
| 588
|
py
|
Python
|
PythonExercicios/ex050 - Soma dos Pares.py
|
caique-santana/CursoEmVideo-Curso_Python3
|
86bb67bbbf348544e1135d8657672d4e33fa70e2
|
[
"MIT"
] | 1
|
2020-04-15T00:49:02.000Z
|
2020-04-15T00:49:02.000Z
|
PythonExercicios/ex050 - Soma dos Pares.py
|
caique-santana/CursoEmVideo-Curso_Python3
|
86bb67bbbf348544e1135d8657672d4e33fa70e2
|
[
"MIT"
] | null | null | null |
PythonExercicios/ex050 - Soma dos Pares.py
|
caique-santana/CursoEmVideo-Curso_Python3
|
86bb67bbbf348544e1135d8657672d4e33fa70e2
|
[
"MIT"
] | null | null | null |
"""Desenvolva um programa que leia seis números inteiros e mostre a soma apenas daqueles que forem pares.
Se o valor digitado for ímpar, desconsidere-o."""
# Meu
soma = 0
for c in range(1, 7):
num = int(input('Digite um número inteiro: '))
if num % 2 == 0:
soma += num
print('A soma dos números pares é {}.'.format(soma))
# Gustavo Guanabara
soma = 0
cont = 0
for c in range(1, 7):
num = int(input('Digite o {}ª valor: '.format(c)))
if num % 2 == 0:
soma += num
cont += 1
print('Você informou {} números PARES e a soma foi {}'.format(cont, soma))
| 29.4
| 105
| 0.622449
|
soma = 0
for c in range(1, 7):
num = int(input('Digite um número inteiro: '))
if num % 2 == 0:
soma += num
print('A soma dos números pares é {}.'.format(soma))
soma = 0
cont = 0
for c in range(1, 7):
num = int(input('Digite o {}ª valor: '.format(c)))
if num % 2 == 0:
soma += num
cont += 1
print('Você informou {} números PARES e a soma foi {}'.format(cont, soma))
| true
| true
|
1c425ce86751fdbcf4a521c0e7c71abb898bcc3e
| 6,939
|
py
|
Python
|
ask-smapi-model/ask_smapi_model/v1/skill/asr/evaluations/get_asr_evaluation_status_response_object.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/asr/evaluations/get_asr_evaluation_status_response_object.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
ask-smapi-model/ask_smapi_model/v1/skill/asr/evaluations/get_asr_evaluation_status_response_object.py
|
Signal-Kinetics/alexa-apis-for-python
|
abb8d3dce18a5510c48b215406ed36c024f01495
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file
# except in compliance with the License. A copy of the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for
# the specific language governing permissions and limitations under the License.
#
import pprint
import re # noqa: F401
import six
import typing
from enum import Enum
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata import EvaluationMetadata
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.asr.evaluations.error_object import ErrorObjectV1
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_status import EvaluationStatusV1
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result import EvaluationMetadataResultV1
from ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object import PostAsrEvaluationsRequestObjectV1
class GetAsrEvaluationStatusResponseObject(EvaluationMetadata):
"""
:param status:
:type status: (optional) ask_smapi_model.v1.skill.asr.evaluations.evaluation_status.EvaluationStatus
:param total_evaluation_count: indicate the total number of evaluations that are supposed to be run in the evaluation request
:type total_evaluation_count: (optional) float
:param completed_evaluation_count: indicate the number of completed evaluations
:type completed_evaluation_count: (optional) float
:param start_timestamp: indicate the start time stamp of the ASR evaluation job. ISO-8601 Format.
:type start_timestamp: (optional) datetime
:param request:
:type request: (optional) ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object.PostAsrEvaluationsRequestObject
:param error:
:type error: (optional) ask_smapi_model.v1.skill.asr.evaluations.error_object.ErrorObject
:param result:
:type result: (optional) ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result.EvaluationMetadataResult
"""
deserialized_types = {
'status': 'ask_smapi_model.v1.skill.asr.evaluations.evaluation_status.EvaluationStatus',
'total_evaluation_count': 'float',
'completed_evaluation_count': 'float',
'start_timestamp': 'datetime',
'request': 'ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object.PostAsrEvaluationsRequestObject',
'error': 'ask_smapi_model.v1.skill.asr.evaluations.error_object.ErrorObject',
'result': 'ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result.EvaluationMetadataResult'
} # type: Dict
attribute_map = {
'status': 'status',
'total_evaluation_count': 'totalEvaluationCount',
'completed_evaluation_count': 'completedEvaluationCount',
'start_timestamp': 'startTimestamp',
'request': 'request',
'error': 'error',
'result': 'result'
} # type: Dict
supports_multiple_types = False
def __init__(self, status=None, total_evaluation_count=None, completed_evaluation_count=None, start_timestamp=None, request=None, error=None, result=None):
# type: (Optional[EvaluationStatusV1], Optional[float], Optional[float], Optional[datetime], Optional[PostAsrEvaluationsRequestObjectV1], Optional[ErrorObjectV1], Optional[EvaluationMetadataResultV1]) -> None
"""
:param status:
:type status: (optional) ask_smapi_model.v1.skill.asr.evaluations.evaluation_status.EvaluationStatus
:param total_evaluation_count: indicate the total number of evaluations that are supposed to be run in the evaluation request
:type total_evaluation_count: (optional) float
:param completed_evaluation_count: indicate the number of completed evaluations
:type completed_evaluation_count: (optional) float
:param start_timestamp: indicate the start time stamp of the ASR evaluation job. ISO-8601 Format.
:type start_timestamp: (optional) datetime
:param request:
:type request: (optional) ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object.PostAsrEvaluationsRequestObject
:param error:
:type error: (optional) ask_smapi_model.v1.skill.asr.evaluations.error_object.ErrorObject
:param result:
:type result: (optional) ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result.EvaluationMetadataResult
"""
self.__discriminator_value = None # type: str
super(GetAsrEvaluationStatusResponseObject, self).__init__(status=status, total_evaluation_count=total_evaluation_count, completed_evaluation_count=completed_evaluation_count, start_timestamp=start_timestamp, request=request, error=error, result=result)
def to_dict(self):
# type: () -> Dict[str, object]
"""Returns the model properties as a dict"""
result = {} # type: Dict
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
# type: () -> str
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
# type: () -> str
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
# type: (object) -> bool
"""Returns true if both objects are equal"""
if not isinstance(other, GetAsrEvaluationStatusResponseObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
# type: (object) -> bool
"""Returns true if both objects are not equal"""
return not self == other
| 46.885135
| 261
| 0.696786
|
import pprint
import re
import six
import typing
from enum import Enum
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata import EvaluationMetadata
if typing.TYPE_CHECKING:
from typing import Dict, List, Optional, Union
from datetime import datetime
from ask_smapi_model.v1.skill.asr.evaluations.error_object import ErrorObjectV1
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_status import EvaluationStatusV1
from ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result import EvaluationMetadataResultV1
from ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object import PostAsrEvaluationsRequestObjectV1
class GetAsrEvaluationStatusResponseObject(EvaluationMetadata):
deserialized_types = {
'status': 'ask_smapi_model.v1.skill.asr.evaluations.evaluation_status.EvaluationStatus',
'total_evaluation_count': 'float',
'completed_evaluation_count': 'float',
'start_timestamp': 'datetime',
'request': 'ask_smapi_model.v1.skill.asr.evaluations.post_asr_evaluations_request_object.PostAsrEvaluationsRequestObject',
'error': 'ask_smapi_model.v1.skill.asr.evaluations.error_object.ErrorObject',
'result': 'ask_smapi_model.v1.skill.asr.evaluations.evaluation_metadata_result.EvaluationMetadataResult'
}
attribute_map = {
'status': 'status',
'total_evaluation_count': 'totalEvaluationCount',
'completed_evaluation_count': 'completedEvaluationCount',
'start_timestamp': 'startTimestamp',
'request': 'request',
'error': 'error',
'result': 'result'
}
supports_multiple_types = False
def __init__(self, status=None, total_evaluation_count=None, completed_evaluation_count=None, start_timestamp=None, request=None, error=None, result=None):
self.__discriminator_value = None
super(GetAsrEvaluationStatusResponseObject, self).__init__(status=status, total_evaluation_count=total_evaluation_count, completed_evaluation_count=completed_evaluation_count, start_timestamp=start_timestamp, request=request, error=error, result=result)
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.deserialized_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else
x.value if isinstance(x, Enum) else x,
value
))
elif isinstance(value, Enum):
result[attr] = value.value
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else
(item[0], item[1].value)
if isinstance(item[1], Enum) else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pprint.pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, GetAsrEvaluationStatusResponseObject):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c425f9604c3c2b74d9702ddc73276507f561f0f
| 28,746
|
py
|
Python
|
pegasus/tools/visualization.py
|
hoondy/pegasus
|
ca6e8dc3b39402deab21d6db80ad4ce8d41631e9
|
[
"BSD-3-Clause"
] | null | null | null |
pegasus/tools/visualization.py
|
hoondy/pegasus
|
ca6e8dc3b39402deab21d6db80ad4ce8d41631e9
|
[
"BSD-3-Clause"
] | null | null | null |
pegasus/tools/visualization.py
|
hoondy/pegasus
|
ca6e8dc3b39402deab21d6db80ad4ce8d41631e9
|
[
"BSD-3-Clause"
] | null | null | null |
import time
import numpy as np
import scipy
import umap as umap_module
import forceatlas2 as fa2
import uuid
from threadpoolctl import threadpool_limits
from pegasusio import MultimodalData
from pegasus.tools import (
eff_n_jobs,
update_rep,
X_from_rep,
W_from_rep,
get_neighbors,
neighbors,
net_train_and_predict,
calculate_nearest_neighbors,
calculate_affinity_matrix,
construct_graph,
)
import logging
logger = logging.getLogger(__name__)
from pegasusio import timer
def calc_tsne(
X,
nthreads,
no_dims,
perplexity,
early_exag_coeff,
learning_rate,
rand_seed,
initialization=None,
max_iter=750,
stop_early_exag_iter=250,
mom_switch_iter=250,
):
"""
TODO: Calculate t-SNE embeddings using the FIt-SNE package
"""
# FItSNE will change X content
# Check if fftw3 is installed.
import ctypes.util
fftw3_loc = ctypes.util.find_library("fftw3")
if fftw3_loc is None:
raise Exception("Please install 'fftw3' first to use the FIt-SNE feature!")
from fitsne import FItSNE
return FItSNE(
X,
nthreads=nthreads,
no_dims=no_dims,
perplexity=perplexity,
early_exag_coeff=early_exag_coeff,
learning_rate=learning_rate,
rand_seed=rand_seed,
initialization=initialization,
max_iter=max_iter,
stop_early_exag_iter=stop_early_exag_iter,
mom_switch_iter=mom_switch_iter,
)
# Running umap using our own kNN indices
def calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
init="spectral",
n_epochs=None,
learning_rate=1.0,
knn_indices=None,
knn_dists=None,
):
"""
TODO: Typing
"""
umap_obj = umap_module.UMAP(
n_components=n_components,
n_neighbors=n_neighbors,
min_dist=min_dist,
spread=spread,
random_state=random_state,
init=init,
n_epochs=n_epochs,
learning_rate=learning_rate,
verbose=True,
)
embedding = None
if X.shape[0] < 4096 or knn_indices is None:
embedding = umap_obj.fit_transform(X)
logger.info(f"Using umap kNN graph because number of cells {X.shape[0]} is smaller than 4096 or knn_indices is not provided.")
else:
assert knn_dists is not None
# preprocessing codes adopted from UMAP's umap_.py fit function in order to use our own kNN graphs
from sklearn.utils import check_random_state, check_array
X = check_array(X, dtype=np.float32, accept_sparse="csr")
umap_obj._raw_data = X
if umap_obj.a is None or umap_obj.b is None:
umap_obj._a, umap_obj._b = umap_module.umap_.find_ab_params(
umap_obj.spread, umap_obj.min_dist
)
else:
umap_obj._a = umap_obj.a
umap_obj._b = umap_obj.b
umap_obj._metric_kwds = (
umap_obj.metric_kwds if umap_obj.metric_kwds is not None else {}
)
umap_obj._target_metric_kwds = {}
_init = (
check_array(umap_obj.init, dtype=np.float32, accept_sparse=False)
if isinstance(umap_obj.init, np.ndarray)
else umap_obj.init
)
umap_obj._initial_alpha = umap_obj.learning_rate
umap_obj._validate_parameters()
if umap_obj.verbose:
logger.info(str(umap_obj))
if scipy.sparse.isspmatrix_csr(X):
if not X.has_sorted_indices:
X.sort_indices()
umap_obj._sparse_data = True
else:
umap_obj._sparse_data = False
_random_state = check_random_state(umap_obj.random_state)
if umap_obj.verbose:
logger.info("Construct fuzzy simplicial set")
umap_obj._small_data = False
umap_obj.graph_, umap_obj._sigmas, umap_obj._rhos = umap_module.umap_.fuzzy_simplicial_set(
X=X,
n_neighbors=umap_obj.n_neighbors,
random_state=_random_state,
metric=umap_obj.metric,
metric_kwds=umap_obj._metric_kwds,
knn_indices=knn_indices,
knn_dists=knn_dists,
angular=umap_obj.angular_rp_forest,
set_op_mix_ratio=umap_obj.set_op_mix_ratio,
local_connectivity=umap_obj.local_connectivity,
verbose=umap_obj.verbose,
)
_n_epochs = umap_obj.n_epochs if umap_obj.n_epochs is not None else 0
if umap_obj.verbose:
logger.info("Construct embedding")
def simplicial_set_embedding(*args, **kwargs):
from packaging import version
if version.parse(umap_module.__version__) >= version.parse('0.5.0'): # For umap-learn v0.5+
kwargs.update({'densmap': False, 'densmap_kwds': {}, 'output_dens': False})
embedding = umap_module.umap_.simplicial_set_embedding(*args, **kwargs)
return (embedding[0] if isinstance(embedding, tuple) else embedding)
embedding = simplicial_set_embedding(
data=X,
graph=umap_obj.graph_,
n_components=umap_obj.n_components,
initial_alpha=umap_obj._initial_alpha,
a=umap_obj._a,
b=umap_obj._b,
gamma=umap_obj.repulsion_strength,
negative_sample_rate=umap_obj.negative_sample_rate,
n_epochs=_n_epochs,
init=_init,
random_state=_random_state,
metric=umap_obj.metric,
metric_kwds=umap_obj._metric_kwds,
verbose=umap_obj.verbose,
)
return embedding
def calc_force_directed_layout(
W,
file_name,
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
init=None,
):
"""
TODO: Typing
"""
G = construct_graph(W)
return fa2.forceatlas2(
file_name,
graph=G,
n_jobs=n_jobs,
target_change_per_node=target_change_per_node,
target_steps=target_steps,
is3d=is3d,
memory=memory,
random_state=random_state,
init=init,
)
@timer(logger=logger)
def tsne(
data: MultimodalData,
rep: str = "pca",
n_jobs: int = -1,
n_components: int = 2,
perplexity: float = 30,
early_exaggeration: int = 12,
learning_rate: float = "auto",
initialization: str = "pca",
random_state: int = 0,
out_basis: str = "tsne",
) -> None:
"""Calculate t-SNE embedding of cells using the FIt-SNE package.
This function uses fitsne_ package. See [Linderman19]_ for details on FIt-SNE algorithm.
.. _fitsne: https://github.com/KlugerLab/FIt-SNE
Parameters
----------
data: ``pegasusio.MultimodalData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all physical CPU cores.
n_components: ``int``, optional, default: ``2``
Dimension of calculated FI-tSNE coordinates. By default, generate 2-dimensional data for 2D visualization.
perplexity: ``float``, optional, default: ``30``
The perplexity is related to the number of nearest neighbors used in other manifold learning algorithms. Larger datasets usually require a larger perplexity.
early_exaggeration: ``int``, optional, default: ``12``
Controls how tight natural clusters in the original space are in the embedded space, and how much space will be between them.
learning_rate: ``float``, optional, default: ``auto``
By default, the learning rate is determined automatically as max(data.shape[0] / early_exaggeration, 200). See [Belkina19]_ and [Kobak19]_ for details.
initialization: ``str``, optional, default: ``pca``
Initialization can be either ``pca`` or ``random`` or np.ndarray. By default, we use ``pca`` initialization according to [Kobak19]_.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
out_basis: ``str``, optional, default: ``"fitsne"``
Key name for calculated FI-tSNE coordinates to store.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm['X_' + out_basis]``: FI-tSNE coordinates of the data.
Examples
--------
>>> pg.tsne(data)
"""
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
X = X_from_rep(data, rep).astype(np.float64)
if learning_rate == "auto":
learning_rate = max(X.shape[0] / early_exaggeration, 200.0)
if initialization == "random":
initialization = None
elif initialization == "pca":
if rep == "pca":
initialization = X[:, 0:n_components].copy()
else:
from sklearn.decomposition import PCA
pca = PCA(n_components=n_components, random_state=random_state)
with threadpool_limits(limits = n_jobs):
initialization = np.ascontiguousarray(pca.fit_transform(X))
initialization = initialization / np.std(initialization[:, 0]) * 0.0001
else:
assert isinstance(initialization, np.ndarray) and initialization.ndim == 2 and initialization.shape[0] == X.shape[0] and initialization.shape[1] == n_components
if initialization.dtype != np.float64:
initialization = initialization.astype(np.float64)
data.obsm["X_" + out_basis] = calc_tsne(
X,
n_jobs,
n_components,
perplexity,
early_exaggeration,
learning_rate,
random_state,
initialization,
)
@timer(logger=logger)
def umap(
data: MultimodalData,
rep: str = "pca",
n_components: int = 2,
n_neighbors: int = 15,
min_dist: float = 0.5,
spread: float = 1.0,
n_jobs: int = -1,
full_speed: bool = False,
random_state: int = 0,
out_basis: str = "umap",
) -> None:
"""Calculate UMAP embedding of cells.
This function uses umap-learn_ package. See [McInnes18]_ for details on UMAP.
.. _umap-learn: https://github.com/lmcinnes/umap
Parameters
----------
data: ``pegasusio.MultimodalData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.
n_components: ``int``, optional, default: ``2``
Dimension of calculated UMAP coordinates. By default, generate 2-dimensional data for 2D visualization.
n_neighbors: ``int``, optional, default: ``15``
Number of nearest neighbors considered during the computation.
min_dist: ``float``, optional, default: ``0.5``
The effective minimum distance between embedded data points.
spread: ``float``, optional, default: ``1.0``
The effective scale of embedded data points.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use for computing kNN graphs. If ``-1``, use all physical CPU cores.
full_speed: ``bool``, optional, default: ``False``
* If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.
* Otherwise, use only one thread to make sure results are reproducible.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
out_basis: ``str``, optional, default: ``"umap"``
Key name for calculated UMAP coordinates to store.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm['X_' + out_basis]``: UMAP coordinates of the data.
Examples
--------
>>> pg.umap(data)
"""
rep = update_rep(rep)
X = X_from_rep(data, rep)
if data.shape[0] < n_neighbors:
logger.warning(f"Warning: Number of samples = {data.shape[0]} < K = {n_neighbors}!\n Set K to {data.shape[0]}.")
n_neighbors = data.shape[0]
knn_indices, knn_dists = get_neighbors(data, K = n_neighbors, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
knn_indices = np.insert(knn_indices[:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1)
knn_dists = np.insert(knn_dists[:, 0 : n_neighbors - 1], 0, 0.0, axis=1)
data.obsm["X_" + out_basis] = calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
@timer(logger=logger)
def fle(
data: MultimodalData,
file_name: str = None,
n_jobs: int = -1,
rep: str = "diffmap",
K: int = 50,
full_speed: bool = False,
target_change_per_node: float = 2.0,
target_steps: int = 5000,
is3d: bool = False,
memory: int = 8,
random_state: int = 0,
out_basis: str = "fle",
) -> None:
"""Construct the Force-directed (FLE) graph.
This implementation uses forceatlas2-python_ package, which is a Python wrapper of ForceAtlas2_.
See [Jacomy14]_ for details on FLE.
.. _forceatlas2-python: https://github.com/klarman-cell-observatory/forceatlas2-python
.. _ForceAtlas2: https://github.com/klarman-cell-observatory/forceatlas2
Parameters
----------
data: ``pegasusio.MultimodalData``
Annotated data matrix with rows for cells and columns for genes.
file_name: ``str``, optional, default: ``None``
Temporary file to store the coordinates as the input to forceatlas2. If ``None``, use ``tempfile.mkstemp`` to generate file name.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all physical CPU cores.
rep: ``str``, optional, default: ``"diffmap"``
Representation of data used for the calculation. By default, use Diffusion Map coordinates. If ``None``, use the count matrix ``data.X``.
K: ``int``, optional, default: ``50``
Number of nearest neighbors to be considered during the computation.
full_speed: ``bool``, optional, default: ``False``
* If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.
* Otherwise, use only one thread to make sure results are reproducible.
target_change_per_node: ``float``, optional, default: ``2.0``
Target change per node to stop ForceAtlas2.
target_steps: ``int``, optional, default: ``5000``
Maximum number of iterations before stopping the ForceAtlas2 algorithm.
is3d: ``bool``, optional, default: ``False``
If ``True``, calculate 3D force-directed layout.
memory: ``int``, optional, default: ``8``
Memory size in GB for the Java FA2 component. By default, use 8GB memory.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
out_basis: ``str``, optional, default: ``"fle"``
Key name for calculated FLE coordinates to store.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm['X_' + out_basis]``: FLE coordinates of the data.
Examples
--------
>>> pg.fle(data)
"""
if file_name is None:
import tempfile
_, file_name = tempfile.mkstemp()
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
if ("W_" + rep) not in data.uns:
neighbors(
data,
K=K,
rep=rep,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
data.obsm["X_" + out_basis] = calc_force_directed_layout(
W_from_rep(data, rep),
file_name,
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
)
@timer(logger=logger)
def select_cells(distances, frac, K=25, alpha=1.0, random_state=0):
"""
TODO: documentation (not user API)
"""
nsample = distances.shape[0]
assert K >= 2
if K > distances.shape[1] + 1:
logger.info(f"Warning: in select_cells, K = {K} > the number of calculated nearest neighbors {distances.shape[1] + 1}!\nSet K to {distances.shape[1] + 1}")
K = distances.shape[1] + 1
probs = np.zeros(nsample)
if alpha == 0.0:
probs[:] = 1.0 # uniform
elif alpha == 1.0:
probs[:] = distances[:, K - 2]
else:
probs[:] = distances[:, K - 2] ** alpha
probs /= probs.sum()
np.random.seed(random_state)
selected = np.zeros(nsample, dtype=bool)
selected[
np.random.choice(nsample, size=int(nsample * frac), replace=False, p=probs)
] = True
return selected
@timer(logger=logger)
def net_umap(
data: MultimodalData,
rep: str = "pca",
n_jobs: int = -1,
n_components: int = 2,
n_neighbors: int = 15,
min_dist: float = 0.5,
spread: float = 1.0,
random_state: int = 0,
select_frac: float = 0.1,
select_K: int = 25,
select_alpha: float = 1.0,
full_speed: bool = False,
net_alpha: float = 0.1,
polish_learning_rate: float = 10.0,
polish_n_epochs: int = 30,
out_basis: str = "net_umap",
) -> None:
"""Calculate Net-UMAP embedding of cells.
Net-UMAP is an approximated UMAP embedding using Deep Learning model to improve the speed.
In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.
See [Li20]_ for details.
.. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html
Parameters
----------
data: ``pegasusio.MultimodalData``
Annotated data matrix with rows for cells and columns for genes.
rep: ``str``, optional, default: ``"pca"``
Representation of data used for the calculation. By default, use PCA coordinates. If ``None``, use the count matrix ``data.X``.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all physical CPU cores.
n_components: ``int``, optional, default: ``2``
Dimension of calculated UMAP coordinates. By default, generate 2-dimensional data for 2D visualization.
n_neighbors: ``int``, optional, default: ``15``
Number of nearest neighbors considered during the computation.
min_dist: ``float``, optional, default: ``0.5``
The effective minimum distance between embedded data points.
spread: ``float``, optional, default: ``1.0``
The effective scale of embedded data points.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
select_frac: ``float``, optional, default: ``0.1``
Down sampling fraction on the cells.
select_K: ``int``, optional, default: ``25``
Number of neighbors to be used to estimate local density for each data point for down sampling.
select_alpha: ``float``, optional, default: ``1.0``
Weight the down sample to be proportional to ``radius ** select_alpha``.
full_speed: ``bool``, optional, default: ``False``
* If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.
* Otherwise, use only one thread to make sure results are reproducible.
net_alpha: ``float``, optional, default: ``0.1``
L2 penalty (regularization term) parameter of the deep regressor.
polish_learning_frac: ``float``, optional, default: ``10.0``
After running the deep regressor to predict new coordinates, use ``polish_learning_frac`` * ``n_obs`` as the learning rate to polish the coordinates.
polish_n_iter: ``int``, optional, default: ``30``
Number of iterations for polishing UMAP run.
out_basis: ``str``, optional, default: ``"net_umap"``
Key name for calculated UMAP coordinates to store.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm['X_' + out_basis]``: Net UMAP coordinates of the data.
Update ``data.obs``:
* ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.
Examples
--------
>>> pg.net_umap(data)
"""
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
knn_indices, knn_dists = get_neighbors(data, K = select_K, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
selected = select_cells(
knn_dists,
select_frac,
K=select_K,
alpha=select_alpha,
random_state=random_state,
)
X_full = X_from_rep(data, rep)
X = X_full[selected, :]
if data.shape[0] < n_neighbors:
logger.warning(f"Warning: Number of samples = {data.shape[0]} < K = {n_neighbors}!\n Set K to {data.shape[0]}.")
n_neighbors = data.shape[0]
ds_indices_key = "ds_" + rep + "_knn_indices" # ds refers to down-sampling
ds_distances_key = "ds_" + rep + "_knn_distances"
indices, distances = calculate_nearest_neighbors(
X,
K=n_neighbors,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
data.uns[ds_indices_key] = indices
data.uns[ds_distances_key] = distances
knn_indices = np.insert(
data.uns[ds_indices_key][:, 0 : n_neighbors - 1], 0, range(X.shape[0]), axis=1
)
knn_dists = np.insert(
data.uns[ds_distances_key][:, 0 : n_neighbors - 1], 0, 0.0, axis=1
)
X_umap = calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
data.uns["X_" + out_basis + "_small"] = X_umap
data.obs["ds_selected"] = selected
Y_init = np.zeros((data.shape[0], n_components), dtype=np.float64)
Y_init[selected, :] = X_umap
Y_init[~selected, :] = net_train_and_predict(
X, X_umap, X_full[~selected, :], net_alpha, n_jobs, random_state, verbose=True
)
data.obsm["X_" + out_basis + "_pred"] = Y_init
knn_indices, knn_dists = get_neighbors(data, K = n_neighbors, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
knn_indices = np.insert(knn_indices[:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1)
knn_dists = np.insert(knn_dists[:, 0 : n_neighbors - 1], 0, 0.0, axis=1)
data.obsm["X_" + out_basis] = calc_umap(
X_full,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
init=Y_init,
n_epochs=polish_n_epochs,
learning_rate=polish_learning_rate,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
@timer(logger=logger)
def net_fle(
data: MultimodalData,
file_name: str = None,
n_jobs: int = -1,
rep: str = "diffmap",
K: int = 50,
full_speed: bool = False,
target_change_per_node: float = 2.0,
target_steps: int = 5000,
is3d: bool = False,
memory: int = 8,
random_state: int = 0,
select_frac: float = 0.1,
select_K: int = 25,
select_alpha: float = 1.0,
net_alpha: float = 0.1,
polish_target_steps: int = 1500,
out_basis: str = "net_fle",
) -> None:
"""Construct Net-Force-directed (FLE) graph.
Net-FLE is an approximated FLE graph using Deep Learning model to improve the speed.
In specific, the deep model used is MLPRegressor_, the *scikit-learn* implementation of Multi-layer Perceptron regressor.
See [Li20]_ for details.
.. _MLPRegressor: https://scikit-learn.org/stable/modules/generated/sklearn.neural_network.MLPRegressor.html
Parameters
----------
data: ``pegasusio.MultimodalData``
Annotated data matrix with rows for cells and columns for genes.
file_name: ``str``, optional, default: ``None``
Temporary file to store the coordinates as the input to forceatlas2. If ``None``, use ``tempfile.mkstemp`` to generate file name.
n_jobs: ``int``, optional, default: ``-1``
Number of threads to use. If ``-1``, use all physical CPU cores.
rep: ``str``, optional, default: ``"diffmap"``
Representation of data used for the calculation. By default, use Diffusion Map coordinates. If ``None``, use the count matrix ``data.X``.
K: ``int``, optional, default: ``50``
Number of nearest neighbors to be considered during the computation.
full_speed: ``bool``, optional, default: ``False``
* If ``True``, use multiple threads in constructing ``hnsw`` index. However, the kNN results are not reproducible.
* Otherwise, use only one thread to make sure results are reproducible.
target_change_per_node: ``float``, optional, default: ``2.0``
Target change per node to stop ForceAtlas2.
target_steps: ``int``, optional, default: ``5000``
Maximum number of iterations before stopping the ForceAtlas2 algorithm.
is3d: ``bool``, optional, default: ``False``
If ``True``, calculate 3D force-directed layout.
memory: ``int``, optional, default: ``8``
Memory size in GB for the Java FA2 component. By default, use 8GB memory.
random_state: ``int``, optional, default: ``0``
Random seed set for reproducing results.
select_frac: ``float``, optional, default: ``0.1``
Down sampling fraction on the cells.
select_K: ``int``, optional, default: ``25``
Number of neighbors to be used to estimate local density for each data point for down sampling.
select_alpha: ``float``, optional, default: ``1.0``
Weight the down sample to be proportional to ``radius ** select_alpha``.
net_alpha: ``float``, optional, default: ``0.1``
L2 penalty (regularization term) parameter of the deep regressor.
polish_target_steps: ``int``, optional, default: ``1500``
After running the deep regressor to predict new coordinate, Number of ForceAtlas2 iterations.
out_basis: ``str``, optional, default: ``"net_fle"``
Key name for calculated FLE coordinates to store.
Returns
-------
``None``
Update ``data.obsm``:
* ``data.obsm['X_' + out_basis]``: Net FLE coordinates of the data.
Update ``data.obs``:
* ``data.obs['ds_selected']``: Boolean array to indicate which cells are selected during the down sampling phase.
Examples
--------
>>> pg.net_fle(data)
"""
if file_name is None:
if file_name is None:
import tempfile
_, file_name = tempfile.mkstemp()
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
if ("W_" + rep) not in data.uns:
neighbors(
data,
K=K,
rep=rep,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
knn_indices, knn_dists = get_neighbors(data, K = select_K, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
selected = select_cells(
knn_dists,
select_frac,
K=select_K,
alpha=select_alpha,
random_state=random_state,
)
X_full = X_from_rep(data, rep)
X = X_full[selected, :]
ds_indices_key = "ds_" + rep + "_knn_indices"
ds_distances_key = "ds_" + rep + "_knn_distances"
indices, distances = calculate_nearest_neighbors(
X, K=K, n_jobs=n_jobs, random_state=random_state, full_speed=full_speed
)
data.uns[ds_indices_key] = indices
data.uns[ds_distances_key] = distances
W = calculate_affinity_matrix(indices, distances)
X_fle = calc_force_directed_layout(
W,
file_name + ".small",
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
)
data.uns["X_" + out_basis + "_small"] = X_fle
data.obs["ds_diffmap_selected"] = selected
n_components = 2 if not is3d else 3
Y_init = np.zeros((data.shape[0], n_components), dtype=np.float64)
Y_init[selected, :] = X_fle
Y_init[~selected, :] = net_train_and_predict(
X, X_fle, X_full[~selected, :], net_alpha, n_jobs, random_state, verbose=True
)
data.obsm["X_" + out_basis + "_pred"] = Y_init
data.obsm["X_" + out_basis] = calc_force_directed_layout(
W_from_rep(data, rep),
file_name,
n_jobs,
target_change_per_node,
polish_target_steps,
is3d,
memory,
random_state,
init=Y_init,
)
| 32.408117
| 168
| 0.634245
|
import time
import numpy as np
import scipy
import umap as umap_module
import forceatlas2 as fa2
import uuid
from threadpoolctl import threadpool_limits
from pegasusio import MultimodalData
from pegasus.tools import (
eff_n_jobs,
update_rep,
X_from_rep,
W_from_rep,
get_neighbors,
neighbors,
net_train_and_predict,
calculate_nearest_neighbors,
calculate_affinity_matrix,
construct_graph,
)
import logging
logger = logging.getLogger(__name__)
from pegasusio import timer
def calc_tsne(
X,
nthreads,
no_dims,
perplexity,
early_exag_coeff,
learning_rate,
rand_seed,
initialization=None,
max_iter=750,
stop_early_exag_iter=250,
mom_switch_iter=250,
):
import ctypes.util
fftw3_loc = ctypes.util.find_library("fftw3")
if fftw3_loc is None:
raise Exception("Please install 'fftw3' first to use the FIt-SNE feature!")
from fitsne import FItSNE
return FItSNE(
X,
nthreads=nthreads,
no_dims=no_dims,
perplexity=perplexity,
early_exag_coeff=early_exag_coeff,
learning_rate=learning_rate,
rand_seed=rand_seed,
initialization=initialization,
max_iter=max_iter,
stop_early_exag_iter=stop_early_exag_iter,
mom_switch_iter=mom_switch_iter,
)
def calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
init="spectral",
n_epochs=None,
learning_rate=1.0,
knn_indices=None,
knn_dists=None,
):
umap_obj = umap_module.UMAP(
n_components=n_components,
n_neighbors=n_neighbors,
min_dist=min_dist,
spread=spread,
random_state=random_state,
init=init,
n_epochs=n_epochs,
learning_rate=learning_rate,
verbose=True,
)
embedding = None
if X.shape[0] < 4096 or knn_indices is None:
embedding = umap_obj.fit_transform(X)
logger.info(f"Using umap kNN graph because number of cells {X.shape[0]} is smaller than 4096 or knn_indices is not provided.")
else:
assert knn_dists is not None
from sklearn.utils import check_random_state, check_array
X = check_array(X, dtype=np.float32, accept_sparse="csr")
umap_obj._raw_data = X
if umap_obj.a is None or umap_obj.b is None:
umap_obj._a, umap_obj._b = umap_module.umap_.find_ab_params(
umap_obj.spread, umap_obj.min_dist
)
else:
umap_obj._a = umap_obj.a
umap_obj._b = umap_obj.b
umap_obj._metric_kwds = (
umap_obj.metric_kwds if umap_obj.metric_kwds is not None else {}
)
umap_obj._target_metric_kwds = {}
_init = (
check_array(umap_obj.init, dtype=np.float32, accept_sparse=False)
if isinstance(umap_obj.init, np.ndarray)
else umap_obj.init
)
umap_obj._initial_alpha = umap_obj.learning_rate
umap_obj._validate_parameters()
if umap_obj.verbose:
logger.info(str(umap_obj))
if scipy.sparse.isspmatrix_csr(X):
if not X.has_sorted_indices:
X.sort_indices()
umap_obj._sparse_data = True
else:
umap_obj._sparse_data = False
_random_state = check_random_state(umap_obj.random_state)
if umap_obj.verbose:
logger.info("Construct fuzzy simplicial set")
umap_obj._small_data = False
umap_obj.graph_, umap_obj._sigmas, umap_obj._rhos = umap_module.umap_.fuzzy_simplicial_set(
X=X,
n_neighbors=umap_obj.n_neighbors,
random_state=_random_state,
metric=umap_obj.metric,
metric_kwds=umap_obj._metric_kwds,
knn_indices=knn_indices,
knn_dists=knn_dists,
angular=umap_obj.angular_rp_forest,
set_op_mix_ratio=umap_obj.set_op_mix_ratio,
local_connectivity=umap_obj.local_connectivity,
verbose=umap_obj.verbose,
)
_n_epochs = umap_obj.n_epochs if umap_obj.n_epochs is not None else 0
if umap_obj.verbose:
logger.info("Construct embedding")
def simplicial_set_embedding(*args, **kwargs):
from packaging import version
if version.parse(umap_module.__version__) >= version.parse('0.5.0'): # For umap-learn v0.5+
kwargs.update({'densmap': False, 'densmap_kwds': {}, 'output_dens': False})
embedding = umap_module.umap_.simplicial_set_embedding(*args, **kwargs)
return (embedding[0] if isinstance(embedding, tuple) else embedding)
embedding = simplicial_set_embedding(
data=X,
graph=umap_obj.graph_,
n_components=umap_obj.n_components,
initial_alpha=umap_obj._initial_alpha,
a=umap_obj._a,
b=umap_obj._b,
gamma=umap_obj.repulsion_strength,
negative_sample_rate=umap_obj.negative_sample_rate,
n_epochs=_n_epochs,
init=_init,
random_state=_random_state,
metric=umap_obj.metric,
metric_kwds=umap_obj._metric_kwds,
verbose=umap_obj.verbose,
)
return embedding
def calc_force_directed_layout(
W,
file_name,
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
init=None,
):
G = construct_graph(W)
return fa2.forceatlas2(
file_name,
graph=G,
n_jobs=n_jobs,
target_change_per_node=target_change_per_node,
target_steps=target_steps,
is3d=is3d,
memory=memory,
random_state=random_state,
init=init,
)
@timer(logger=logger)
def tsne(
data: MultimodalData,
rep: str = "pca",
n_jobs: int = -1,
n_components: int = 2,
perplexity: float = 30,
early_exaggeration: int = 12,
learning_rate: float = "auto",
initialization: str = "pca",
random_state: int = 0,
out_basis: str = "tsne",
) -> None:
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
X = X_from_rep(data, rep).astype(np.float64)
if learning_rate == "auto":
learning_rate = max(X.shape[0] / early_exaggeration, 200.0)
if initialization == "random":
initialization = None
elif initialization == "pca":
if rep == "pca":
initialization = X[:, 0:n_components].copy()
else:
from sklearn.decomposition import PCA
pca = PCA(n_components=n_components, random_state=random_state)
with threadpool_limits(limits = n_jobs):
initialization = np.ascontiguousarray(pca.fit_transform(X))
initialization = initialization / np.std(initialization[:, 0]) * 0.0001
else:
assert isinstance(initialization, np.ndarray) and initialization.ndim == 2 and initialization.shape[0] == X.shape[0] and initialization.shape[1] == n_components
if initialization.dtype != np.float64:
initialization = initialization.astype(np.float64)
data.obsm["X_" + out_basis] = calc_tsne(
X,
n_jobs,
n_components,
perplexity,
early_exaggeration,
learning_rate,
random_state,
initialization,
)
@timer(logger=logger)
def umap(
data: MultimodalData,
rep: str = "pca",
n_components: int = 2,
n_neighbors: int = 15,
min_dist: float = 0.5,
spread: float = 1.0,
n_jobs: int = -1,
full_speed: bool = False,
random_state: int = 0,
out_basis: str = "umap",
) -> None:
rep = update_rep(rep)
X = X_from_rep(data, rep)
if data.shape[0] < n_neighbors:
logger.warning(f"Warning: Number of samples = {data.shape[0]} < K = {n_neighbors}!\n Set K to {data.shape[0]}.")
n_neighbors = data.shape[0]
knn_indices, knn_dists = get_neighbors(data, K = n_neighbors, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
knn_indices = np.insert(knn_indices[:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1)
knn_dists = np.insert(knn_dists[:, 0 : n_neighbors - 1], 0, 0.0, axis=1)
data.obsm["X_" + out_basis] = calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
@timer(logger=logger)
def fle(
data: MultimodalData,
file_name: str = None,
n_jobs: int = -1,
rep: str = "diffmap",
K: int = 50,
full_speed: bool = False,
target_change_per_node: float = 2.0,
target_steps: int = 5000,
is3d: bool = False,
memory: int = 8,
random_state: int = 0,
out_basis: str = "fle",
) -> None:
if file_name is None:
import tempfile
_, file_name = tempfile.mkstemp()
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
if ("W_" + rep) not in data.uns:
neighbors(
data,
K=K,
rep=rep,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
data.obsm["X_" + out_basis] = calc_force_directed_layout(
W_from_rep(data, rep),
file_name,
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
)
@timer(logger=logger)
def select_cells(distances, frac, K=25, alpha=1.0, random_state=0):
nsample = distances.shape[0]
assert K >= 2
if K > distances.shape[1] + 1:
logger.info(f"Warning: in select_cells, K = {K} > the number of calculated nearest neighbors {distances.shape[1] + 1}!\nSet K to {distances.shape[1] + 1}")
K = distances.shape[1] + 1
probs = np.zeros(nsample)
if alpha == 0.0:
probs[:] = 1.0 # uniform
elif alpha == 1.0:
probs[:] = distances[:, K - 2]
else:
probs[:] = distances[:, K - 2] ** alpha
probs /= probs.sum()
np.random.seed(random_state)
selected = np.zeros(nsample, dtype=bool)
selected[
np.random.choice(nsample, size=int(nsample * frac), replace=False, p=probs)
] = True
return selected
@timer(logger=logger)
def net_umap(
data: MultimodalData,
rep: str = "pca",
n_jobs: int = -1,
n_components: int = 2,
n_neighbors: int = 15,
min_dist: float = 0.5,
spread: float = 1.0,
random_state: int = 0,
select_frac: float = 0.1,
select_K: int = 25,
select_alpha: float = 1.0,
full_speed: bool = False,
net_alpha: float = 0.1,
polish_learning_rate: float = 10.0,
polish_n_epochs: int = 30,
out_basis: str = "net_umap",
) -> None:
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
knn_indices, knn_dists = get_neighbors(data, K = select_K, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
selected = select_cells(
knn_dists,
select_frac,
K=select_K,
alpha=select_alpha,
random_state=random_state,
)
X_full = X_from_rep(data, rep)
X = X_full[selected, :]
if data.shape[0] < n_neighbors:
logger.warning(f"Warning: Number of samples = {data.shape[0]} < K = {n_neighbors}!\n Set K to {data.shape[0]}.")
n_neighbors = data.shape[0]
ds_indices_key = "ds_" + rep + "_knn_indices" # ds refers to down-sampling
ds_distances_key = "ds_" + rep + "_knn_distances"
indices, distances = calculate_nearest_neighbors(
X,
K=n_neighbors,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
data.uns[ds_indices_key] = indices
data.uns[ds_distances_key] = distances
knn_indices = np.insert(
data.uns[ds_indices_key][:, 0 : n_neighbors - 1], 0, range(X.shape[0]), axis=1
)
knn_dists = np.insert(
data.uns[ds_distances_key][:, 0 : n_neighbors - 1], 0, 0.0, axis=1
)
X_umap = calc_umap(
X,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
data.uns["X_" + out_basis + "_small"] = X_umap
data.obs["ds_selected"] = selected
Y_init = np.zeros((data.shape[0], n_components), dtype=np.float64)
Y_init[selected, :] = X_umap
Y_init[~selected, :] = net_train_and_predict(
X, X_umap, X_full[~selected, :], net_alpha, n_jobs, random_state, verbose=True
)
data.obsm["X_" + out_basis + "_pred"] = Y_init
knn_indices, knn_dists = get_neighbors(data, K = n_neighbors, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
knn_indices = np.insert(knn_indices[:, 0 : n_neighbors - 1], 0, range(data.shape[0]), axis=1)
knn_dists = np.insert(knn_dists[:, 0 : n_neighbors - 1], 0, 0.0, axis=1)
data.obsm["X_" + out_basis] = calc_umap(
X_full,
n_components,
n_neighbors,
min_dist,
spread,
random_state,
init=Y_init,
n_epochs=polish_n_epochs,
learning_rate=polish_learning_rate,
knn_indices=knn_indices,
knn_dists=knn_dists,
)
@timer(logger=logger)
def net_fle(
data: MultimodalData,
file_name: str = None,
n_jobs: int = -1,
rep: str = "diffmap",
K: int = 50,
full_speed: bool = False,
target_change_per_node: float = 2.0,
target_steps: int = 5000,
is3d: bool = False,
memory: int = 8,
random_state: int = 0,
select_frac: float = 0.1,
select_K: int = 25,
select_alpha: float = 1.0,
net_alpha: float = 0.1,
polish_target_steps: int = 1500,
out_basis: str = "net_fle",
) -> None:
if file_name is None:
if file_name is None:
import tempfile
_, file_name = tempfile.mkstemp()
rep = update_rep(rep)
n_jobs = eff_n_jobs(n_jobs)
if ("W_" + rep) not in data.uns:
neighbors(
data,
K=K,
rep=rep,
n_jobs=n_jobs,
random_state=random_state,
full_speed=full_speed,
)
knn_indices, knn_dists = get_neighbors(data, K = select_K, rep = rep, n_jobs = n_jobs, random_state = random_state, full_speed = full_speed)
selected = select_cells(
knn_dists,
select_frac,
K=select_K,
alpha=select_alpha,
random_state=random_state,
)
X_full = X_from_rep(data, rep)
X = X_full[selected, :]
ds_indices_key = "ds_" + rep + "_knn_indices"
ds_distances_key = "ds_" + rep + "_knn_distances"
indices, distances = calculate_nearest_neighbors(
X, K=K, n_jobs=n_jobs, random_state=random_state, full_speed=full_speed
)
data.uns[ds_indices_key] = indices
data.uns[ds_distances_key] = distances
W = calculate_affinity_matrix(indices, distances)
X_fle = calc_force_directed_layout(
W,
file_name + ".small",
n_jobs,
target_change_per_node,
target_steps,
is3d,
memory,
random_state,
)
data.uns["X_" + out_basis + "_small"] = X_fle
data.obs["ds_diffmap_selected"] = selected
n_components = 2 if not is3d else 3
Y_init = np.zeros((data.shape[0], n_components), dtype=np.float64)
Y_init[selected, :] = X_fle
Y_init[~selected, :] = net_train_and_predict(
X, X_fle, X_full[~selected, :], net_alpha, n_jobs, random_state, verbose=True
)
data.obsm["X_" + out_basis + "_pred"] = Y_init
data.obsm["X_" + out_basis] = calc_force_directed_layout(
W_from_rep(data, rep),
file_name,
n_jobs,
target_change_per_node,
polish_target_steps,
is3d,
memory,
random_state,
init=Y_init,
)
| true
| true
|
1c4260852b0f621da5efbc981c92c14d38f9bbe8
| 1,469
|
py
|
Python
|
src/UQpy/distributions/collection/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/distributions/collection/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
src/UQpy/distributions/collection/__init__.py
|
SURGroup/UncertaintyQuantification
|
a94c8db47d07134ea2b3b0a3ca53ca818532c3e6
|
[
"MIT"
] | null | null | null |
"""distributions module."""
from UQpy.distributions.collection.Beta import Beta
from UQpy.distributions.collection.Binomial import Binomial
from UQpy.distributions.collection.Cauchy import Cauchy
from UQpy.distributions.collection.ChiSquare import ChiSquare
from UQpy.distributions.collection.Exponential import Exponential
from UQpy.distributions.collection.Gamma import Gamma
from UQpy.distributions.collection.GeneralizedExtreme import GeneralizedExtreme
from UQpy.distributions.collection.InverseGaussian import InverseGauss
from UQpy.distributions.collection.Laplace import Laplace
from UQpy.distributions.collection.Levy import Levy
from UQpy.distributions.collection.Logistic import Logistic
from UQpy.distributions.collection.Lognormal import Lognormal
from UQpy.distributions.collection.Maxwell import Maxwell
from UQpy.distributions.collection.Multinomial import Multinomial
from UQpy.distributions.collection.MultivariateNormal import MultivariateNormal
from UQpy.distributions.collection.Normal import Normal
from UQpy.distributions.collection.Pareto import Pareto
from UQpy.distributions.collection.Poisson import Poisson
from UQpy.distributions.collection.Rayleigh import Rayleigh
from UQpy.distributions.collection.TruncatedNormal import TruncatedNormal
from UQpy.distributions.collection.Uniform import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.distributions.collection.JointCopula import JointCopula
| 58.76
| 79
| 0.884275
|
from UQpy.distributions.collection.Beta import Beta
from UQpy.distributions.collection.Binomial import Binomial
from UQpy.distributions.collection.Cauchy import Cauchy
from UQpy.distributions.collection.ChiSquare import ChiSquare
from UQpy.distributions.collection.Exponential import Exponential
from UQpy.distributions.collection.Gamma import Gamma
from UQpy.distributions.collection.GeneralizedExtreme import GeneralizedExtreme
from UQpy.distributions.collection.InverseGaussian import InverseGauss
from UQpy.distributions.collection.Laplace import Laplace
from UQpy.distributions.collection.Levy import Levy
from UQpy.distributions.collection.Logistic import Logistic
from UQpy.distributions.collection.Lognormal import Lognormal
from UQpy.distributions.collection.Maxwell import Maxwell
from UQpy.distributions.collection.Multinomial import Multinomial
from UQpy.distributions.collection.MultivariateNormal import MultivariateNormal
from UQpy.distributions.collection.Normal import Normal
from UQpy.distributions.collection.Pareto import Pareto
from UQpy.distributions.collection.Poisson import Poisson
from UQpy.distributions.collection.Rayleigh import Rayleigh
from UQpy.distributions.collection.TruncatedNormal import TruncatedNormal
from UQpy.distributions.collection.Uniform import Uniform
from UQpy.distributions.collection.JointIndependent import JointIndependent
from UQpy.distributions.collection.JointCopula import JointCopula
| true
| true
|
1c4261909a6125c4b24deccef4cdfb6b92d5f694
| 6,127
|
py
|
Python
|
torchgeo/trainers/regression.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
torchgeo/trainers/regression.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
torchgeo/trainers/regression.py
|
remtav/torchgeo
|
d06b103f81edec4f4e0d13ccd621d318364679a2
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
"""Regression tasks."""
from typing import Any, Dict, cast
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules import Conv2d, Linear
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics import MeanAbsoluteError, MeanSquaredError, MetricCollection
from torchvision import models
from ..datasets.utils import unbind_samples
# https://github.com/pytorch/pytorch/issues/60979
# https://github.com/pytorch/pytorch/pull/61045
Conv2d.__module__ = "nn.Conv2d"
Linear.__module__ = "nn.Linear"
class RegressionTask(pl.LightningModule):
"""LightningModule for training models on regression datasets."""
def config_task(self) -> None:
"""Configures the task based on kwargs parameters."""
if self.hyperparams["model"] == "resnet18":
self.model = models.resnet18(pretrained=self.hyperparams["pretrained"])
in_features = self.model.fc.in_features
self.model.fc = nn.Linear(in_features, out_features=1)
else:
raise ValueError(f"Model type '{self.hyperparams['model']}' is not valid.")
def __init__(self, **kwargs: Any) -> None:
"""Initialize a new LightningModule for training simple regression models.
Keyword Args:
model: Name of the model to use
learning_rate: Initial learning rate to use in the optimizer
learning_rate_schedule_patience: Patience parameter for the LR scheduler
"""
super().__init__()
# Creates `self.hparams` from kwargs
self.save_hyperparameters() # type: ignore[operator]
self.hyperparams = cast(Dict[str, Any], self.hparams)
self.config_task()
self.train_metrics = MetricCollection(
{"RMSE": MeanSquaredError(squared=False), "MAE": MeanAbsoluteError()},
prefix="train_",
)
self.val_metrics = self.train_metrics.clone(prefix="val_")
self.test_metrics = self.train_metrics.clone(prefix="test_")
def forward(self, *args: Any, **kwargs: Any) -> Any:
"""Forward pass of the model.
Args:
x: tensor of data to run through the model
Returns:
output from the model
"""
return self.model(*args, **kwargs)
def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
"""Compute and return the training loss.
Args:
batch: the output of your DataLoader
Returns:
training loss
"""
batch = args[0]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("train_loss", loss) # logging to TensorBoard
self.train_metrics(y_hat, y)
return loss
def training_epoch_end(self, outputs: Any) -> None:
"""Logs epoch-level training metrics.
Args:
outputs: list of items returned by training_step
"""
self.log_dict(self.train_metrics.compute())
self.train_metrics.reset()
def validation_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute validation loss and log example predictions.
Args:
batch: the output of your DataLoader
batch_idx: the index of this batch
"""
batch = args[0]
batch_idx = args[1]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("val_loss", loss)
self.val_metrics(y_hat, y)
if batch_idx < 10:
try:
datamodule = self.trainer.datamodule # type: ignore[union-attr]
batch["prediction"] = y_hat
for key in ["image", "label", "prediction"]:
batch[key] = batch[key].cpu()
sample = unbind_samples(batch)[0]
fig = datamodule.plot(sample)
summary_writer = self.logger.experiment # type: ignore[union-attr]
summary_writer.add_figure(
f"image/{batch_idx}", fig, global_step=self.global_step
)
except AttributeError:
pass
def validation_epoch_end(self, outputs: Any) -> None:
"""Logs epoch level validation metrics.
Args:
outputs: list of items returned by validation_step
"""
self.log_dict(self.val_metrics.compute())
self.val_metrics.reset()
def test_step(self, *args: Any, **kwargs: Any) -> None:
"""Compute test loss.
Args:
batch: the output of your DataLoader
"""
batch = args[0]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("test_loss", loss)
self.test_metrics(y_hat, y)
def test_epoch_end(self, outputs: Any) -> None:
"""Logs epoch level test metrics.
Args:
outputs: list of items returned by test_step
"""
self.log_dict(self.test_metrics.compute())
self.test_metrics.reset()
def configure_optimizers(self) -> Dict[str, Any]:
"""Initialize the optimizer and learning rate scheduler.
Returns:
a "lr dict" according to the pytorch lightning documentation --
https://pytorch-lightning.readthedocs.io/en/latest/common/lightning_module.html#configure-optimizers
"""
optimizer = torch.optim.AdamW(
self.model.parameters(), lr=self.hyperparams["learning_rate"]
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer,
patience=self.hyperparams["learning_rate_schedule_patience"],
),
"monitor": "val_loss",
},
}
| 32.94086
| 112
| 0.598498
|
from typing import Any, Dict, cast
import pytorch_lightning as pl
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import Tensor
from torch.nn.modules import Conv2d, Linear
from torch.optim.lr_scheduler import ReduceLROnPlateau
from torchmetrics import MeanAbsoluteError, MeanSquaredError, MetricCollection
from torchvision import models
from ..datasets.utils import unbind_samples
Conv2d.__module__ = "nn.Conv2d"
Linear.__module__ = "nn.Linear"
class RegressionTask(pl.LightningModule):
def config_task(self) -> None:
if self.hyperparams["model"] == "resnet18":
self.model = models.resnet18(pretrained=self.hyperparams["pretrained"])
in_features = self.model.fc.in_features
self.model.fc = nn.Linear(in_features, out_features=1)
else:
raise ValueError(f"Model type '{self.hyperparams['model']}' is not valid.")
def __init__(self, **kwargs: Any) -> None:
super().__init__()
self.save_hyperparameters()
self.hyperparams = cast(Dict[str, Any], self.hparams)
self.config_task()
self.train_metrics = MetricCollection(
{"RMSE": MeanSquaredError(squared=False), "MAE": MeanAbsoluteError()},
prefix="train_",
)
self.val_metrics = self.train_metrics.clone(prefix="val_")
self.test_metrics = self.train_metrics.clone(prefix="test_")
def forward(self, *args: Any, **kwargs: Any) -> Any:
return self.model(*args, **kwargs)
def training_step(self, *args: Any, **kwargs: Any) -> Tensor:
batch = args[0]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("train_loss", loss)
self.train_metrics(y_hat, y)
return loss
def training_epoch_end(self, outputs: Any) -> None:
self.log_dict(self.train_metrics.compute())
self.train_metrics.reset()
def validation_step(self, *args: Any, **kwargs: Any) -> None:
batch = args[0]
batch_idx = args[1]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("val_loss", loss)
self.val_metrics(y_hat, y)
if batch_idx < 10:
try:
datamodule = self.trainer.datamodule
batch["prediction"] = y_hat
for key in ["image", "label", "prediction"]:
batch[key] = batch[key].cpu()
sample = unbind_samples(batch)[0]
fig = datamodule.plot(sample)
summary_writer = self.logger.experiment
summary_writer.add_figure(
f"image/{batch_idx}", fig, global_step=self.global_step
)
except AttributeError:
pass
def validation_epoch_end(self, outputs: Any) -> None:
self.log_dict(self.val_metrics.compute())
self.val_metrics.reset()
def test_step(self, *args: Any, **kwargs: Any) -> None:
batch = args[0]
x = batch["image"]
y = batch["label"].view(-1, 1)
y_hat = self.forward(x)
loss = F.mse_loss(y_hat, y)
self.log("test_loss", loss)
self.test_metrics(y_hat, y)
def test_epoch_end(self, outputs: Any) -> None:
self.log_dict(self.test_metrics.compute())
self.test_metrics.reset()
def configure_optimizers(self) -> Dict[str, Any]:
optimizer = torch.optim.AdamW(
self.model.parameters(), lr=self.hyperparams["learning_rate"]
)
return {
"optimizer": optimizer,
"lr_scheduler": {
"scheduler": ReduceLROnPlateau(
optimizer,
patience=self.hyperparams["learning_rate_schedule_patience"],
),
"monitor": "val_loss",
},
}
| true
| true
|
1c42620eeadd924a4985cdcf2ca0e88b6096d381
| 20,007
|
py
|
Python
|
cudaDistanceMatrix/cudaDistanceMatrix.py
|
greenmonn/distanceMatrixGPU
|
b06d9309ff6bf5e950a1f9384e58b47665a9b22e
|
[
"MIT"
] | 2
|
2021-05-25T14:51:42.000Z
|
2021-12-20T11:11:40.000Z
|
cudaDistanceMatrix/cudaDistanceMatrix.py
|
greenmonn/distanceMatrixGPU
|
b06d9309ff6bf5e950a1f9384e58b47665a9b22e
|
[
"MIT"
] | null | null | null |
cudaDistanceMatrix/cudaDistanceMatrix.py
|
greenmonn/distanceMatrixGPU
|
b06d9309ff6bf5e950a1f9384e58b47665a9b22e
|
[
"MIT"
] | null | null | null |
import h5py
import numpy as np
from skcuda import linalg, misc
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
linalg.init()
from numpy.linalg import norm
import os.path
cuda_driver = pycuda.driver.init()
pycuda.driver.Device(0).retain_primary_context()
from .fillArrayNumbaFloat32 import cudaFillFlattenArray32, cudaFillFullArray32
class DistanceMatrix:
"""GPU accelerated Distannce matrix caluclations.
Parameters
----------
file_name: string
Name of h5py file to save the distance matrix (don't need to have whole array in working memory).
dtype_out: string
What precision is needed? 32 or 64?
gpu: bool
Use gpu accelerations
numba: bool
Use additional gpu acceleration
"""
def __init__(self,
file_name="cuda_dist",
gpu=True,
numba=True,
dictinoray=None):
if os.path.isfile(str(file_name)):
os.remove(str(file_name))
self.filename_dist = file_name
self.file_dist = h5py.File(file_name, 'w')
self.out_type = np.float32
self.float = "float32"
self.load_full = False
self.gpu = gpu
self.numba = numba
if dictinoray:
self.dictinoray = dictinoray
else:
self.dictinoray = None
def _cuda_norm(self, X):
"""Caluclate L2-norm on gpu.
Parameters
----------
X: array
Array to normalize
Returns
-------
normX: array
Normalized array
"""
return misc.divide(X, misc.sum(X ** 2, axis=1, keepdims=True) ** 0.5)
def _norm(self, X):
"""Caluclate L2-norm on cpu.
Parameters
----------
X: array
Array to normalize
Returns
-------
normX: array
Normalized array
"""
return X / norm(X,axis=1, keepdims=True)
def _get_XTX_cuda(self, X, x_1, x_2, y_1, y_2):
"""Caluclate dot product between two array on gpu.
Parameters
----------
X: array
Array to normalize
x_1: int
Lower bound on slice on x-axis
x_2: int
Upper bound on slice x-axis
y_1: int
Lower bound on slice y-axis
y_2: int
Upper bound on slice y-axis
Returns
-------
XX.T: array
X X.T array
"""
X_f, X_b = gpuarray.to_gpu(X[x_1:x_2, :]), gpuarray.to_gpu(X[y_1:y_2, :])
X_f_norm, X_b_norm = self._cuda_norm(X_f), self._cuda_norm(X_b)
return linalg.dot(X_f_norm, X_b_norm, transb="T").get()
def _get_XTX(self, X, x_1, x_2, y_1, y_2):
"""Caluclate dot product between two array on cpu.
Parameters
----------
X: array
Array to normalize
x_1: int
Lower bound on slice on x-axis
x_2: int
Upper bound on slice x-axis
y_1: int
Lower bound on slice y-axis
y_2: int
Upper bound on slice y-axis
Returns
-------
XX.T: array
X X.T array
"""
Xnorm1, Xnorm2 = self._norm(X[x_1:x_2,:]), self._norm(X[y_1:y_2,:])
return np.dot(Xnorm1, Xnorm2.T)
def _get_array_extensions(self, i, j):
"""Get how much sub-array have to be exteneded to completely fill the whole array.
Parameters
----------
i: int
Index to keep track on size of and position on sub-array
j: int
Index to keep track on size of and position on sub-array
Returns
-------
extendX: int
How much to extend sub-array on x-axis
extendY: int
How much to extend sub-array on y-axis
"""
extendX, extendY = 0, 0
if i == self.nr_square - 1 and j == self.nr_square - 1:
extendX = self.N % self.nr_square
extendY = self.N % self.nr_square
elif i == self.nr_square - 1:
extendX = self.N % self.nr_square
elif j == self.nr_square - 1:
extendY = self.N % self.nr_square
return extendX, extendY
def _get_extensions_coef(self, i, j, dx, dy):
"""Get position of where sub-array will be spliced from the complete array.
Parameters
----------
i: int
Index to keep track on size of and position on sub-array
j: int
Index to keep track on size of and position on sub-array
dx: int
How much to extend on x-axis
dy: int
How much to extend on y-axis
Returns
-------
x_1: int
Lower bound on slice on x-axis
x_2: int
Upper bound on slice on x-axis
y_1: int
Lower bound on slice on y-axis
y_2: int
Upper bound on slice on y-axis
"""
x_1, x_2 = self.l * i, self.l * (i + 1) + dx
y_1, y_2 = self.l * j, self.l * (j + 1) + dy
return x_1, x_2, y_1, y_2
def _get_flatten_distance_matrix(self, f_dist):
"""Get the complete flatten distance matrix.
Parameters
----------
f_dist: array
Contrains all sub-arrays with all distance data.
Returns
-------
flattenDistancematrix: array
The whole distance matrix
"""
entries = int(self.N * (self.N - 1) / 2)
distance_matrix = np.zeros([entries],dtype=self.out_type)
start_splice = 0
for k in list(f_dist.keys()):
data = np.asarray(f_dist[k])
end_splice = start_splice + data.shape[0]
distance_matrix[start_splice:end_splice] = data
start_splice = end_splice
return distance_matrix
def _fill_fullDistanceMatrix(self, i, j, dist_data):
"""Fill the complete full distance matrix (Void, fill array in-place)
Parameters
----------
i: int
Index to keep track on size of and position on sub-array
j: int
Index to keep track on size of and position on sub-array
dist_data: array
Flatten sub-array with the distance data.
Returns
-------
Void
"""
extendX, extendY = self._get_array_extensions(i, j)
x_1, x_2, y_1, y_2 = self._get_extensions_coef(i, j, extendX, extendY)
if i == j:
subArr = np.ones([self.l + extendX, self.l + extendX], dtype=self.out_type)
if self.float == "float32":
A = cudaFillFullArray32(dist_data, subArr, self.l + extendX)
elif self.float == "float64":
A = cudaFillFullArray64(dist_data, subArr, self.l + extendX)
self.fullDistMatrix[x_1: x_2, y_1 : y_2] = A
else:
self.fullDistMatrix[x_1: x_2, y_1 : y_2] = dist_data.reshape(self.l + extendX, self.l+ extendY)
self.fullDistMatrix[y_1 : y_2, x_1: x_2] = dist_data.reshape(self.l + extendX, self.l+ extendY).T
def _get_full_distance_matrix(self, f_dist):
"""Get the complete full distance matrix
Parameters
----------
f_dist: array
Array with all sub-arrays with distance data.
Returns
-------
fullDistanceMatrix: array
Array with the full complete distance array
"""
self.fullDistMatrix = np.zeros([self.N, self.N])
sorted_filenames = self._sort_files(f_dist)
for fileArr in sorted_filenames:
dist_data = np.asarray(f_dist[fileArr])
i, j = int(fileArr.split(":")[1].split("_")[1]), int(fileArr.split(":")[1].split("_")[2])
self._fill_fullDistanceMatrix(i, j, dist_data)
return self.fullDistMatrix
def _sort_files(self, f_dist):
file_names = list(f_dist.keys())
arr_number = [int(ss.split(":")[1].split("_")[3]) for ss in file_names]
_, sorted_filenames = zip(*sorted(zip(arr_number, file_names)))
return sorted_filenames
def get_distance_matrix(self, fullMatrix=False):
"""Get either the complete flatten or full distance matrix.
Parameters
----------
fullMatrix: bool
Get full (True) take N**2 memory, else flatten (False) take N(N-1)/2 memory. Both O(N**2)
Returns
-------
DistanceMatrix: array
Matrix with all the distance.
"""
f_dist = h5py.File(self.filename_dist, 'r')
if fullMatrix:
self.load_full = fullMatrix
return self._get_full_distance_matrix(f_dist)
else:
return self._get_flatten_distance_matrix(f_dist)
def _get_flatten_entries(self, i, j, X_shape):
"""Get entries for the flatten array. It's a bi-jection (i,j) -> k, k -> (i,j)
Parameters
----------
i : int
Index for the x-axis i.e., the ith row.
j: int
Index for the y-axis i.e., the jth column.
X_shape: tuple
Shape of sub-array
Returns
-------
l_x: int
length of sub-array on x-axis
entries: int
Number of entries in flatten array
square: bool
Is the matrix a square(True) or a triangle(False).
"""
if i == j:
l_x = X_shape[0]
entries = l_x*(l_x - 1)/2
square = False
else:
l_x, l_y = X_shape[0], X_shape[1]
entries = l_x * l_y
square = True
return l_x, entries, square
def _fill_flatten_distMatrix(self, entries, X, dx):
"""Fill the flatten distance matrix with data from sub-arrays.
Parameters
----------
entries : int
Number of entries in flatten array
X: array
Sub-array with distance data.
dx: int
Length of the sub-array of the complete array to get filled.
Returns
-------
subdistMatrix_flatten: array
Part of the complete flatten array that have been given distance values.
"""
empty_subdistMatrix_flatten = np.zeros((int(entries)), dtype=self.out_type)
if self.float == "float32":
subdistMatrix_flatten = cudaFillFlattenArray32(empty_subdistMatrix_flatten, X, dx)
elif self.float == "float64":
subdistMatrix_flatten = cudaFillFlattenArray64(empty_subdistMatrix_flatten, X, dx)
return subdistMatrix_flatten
def get_similarity(self, i, j,load_full=False):
"""Fill the flatten distance matrix with data from sub-arrays.
Parameters
----------
entries : int
Number of entries in flatten array
X: array
Sub-array with distance data.
dx: int
Length of the sub-array of the complete array to get filled.
Returns
-------
subdistMatrix_flatten: array
Part of the complete flatten array that have been given distance values.
"""
if isinstance(i, str) or isinstance(j, str):
if self.dictinoray:
try:
i = int(self.dictinoray[i])
j = int(self.dictinoray[j])
except:
print("The values dont exists in dict.")
if not self.load_full:
if load_full:
self.load_full = load_full
f_dist = h5py.File(self.filename_dist, 'r')
self._get_full_distance_matrix(f_dist)
return self.fullDistMatrix[i, j]
else:
if i == j:
return 1
f_dist = h5py.File(self.filename_dist, 'r')
sorted_filenames = self._sort_files(f_dist)
if i < j:
f_n = self._get_val(sorted_filenames, j, i)
val = self._get_ix(f_dist,f_n,j, i)
else:
f_n = self._get_val(sorted_filenames, i, j)
val = self._get_ix(f_dist,f_n,i,j)
return val
else:
return self.fullDistMatrix[i, j]
def most_similar(self, i, load_full=False):
"""Fill the flatten distance matrix with data from sub-arrays.
Parameters
----------
entries : int
Number of entries in flatten array
X: array
Sub-array with distance data.
dx: int
Length of the sub-array of the complete array to get filled.
Returns
-------
subdistMatrix_flatten: array
Part of the complete flatten array that have been given distance values.
"""
if isinstance(i, str) or isinstance(j, str):
if self.dictinoray:
try:
i = int(self.dictinoray[i])
except:
print("The values dont exists in dict.")
if not self.load_full:
if load_full:
self.load_full = load_full
f_dist = h5py.File(self.filename_dist, 'r')
self._get_full_distance_matrix(f_dist)
sims = self.fullDistMatrix[i, :]
else:
sims = list()
f_dist = h5py.File(self.filename_dist, 'r')
sorted_filenames = self._sort_files(f_dist)
for j in range(self.N):
if i == j:
val = 1
elif i < j:
f_n = self._get_val(sorted_filenames, j, i)
val = self._get_ix(f_dist,f_n,j, i)
else:
f_n = self._get_val(sorted_filenames, i, j)
val = self._get_ix(f_dist,f_n,i,j)
sims.append(val)
sims = np.asarray(sims)
else:
sims = self.fullDistMatrix[i, :]
if self.dictinoray:
sorted_val, sorted_name = zip(*sorted(zip(sims, self.dictinoray.keys())))
return dict(zip(sorted_name[::-1], sorted_val[::-1]))
else:
return self.fullDistMatrix[i, :]
def _get_bounderies(self, f_n):
"""Get bounderies of sub-array in the array.
Parameters
----------
f_n : string
File name of sub-array
Returns
-------
x_range[0]: int
Start cordinate in x-axis
x_range[1]: int
Stop cordinate in x-axis
y_range[0]: int
Start cordinate in y-axis
y_range[1]: int
Stop cordinate in y-axis
"""
x, y = f_n.split(":")[0].split("_")
x_range = x.split("-")
y_range = y.split("-")
return x_range[0], x_range[1], y_range[0], y_range[1]
def _get_val(self, x, i, j):
"""Get (i,j) from under triangle.
Parameters
----------
x : array
Data array
i: int
x index
j: int
y index
Returns
-------
val: int
(i,j) value
"""
if len(x) == 1:
f_n = x[0]
x1, x2, y1, y2 = self._get_bounderies(f_n)
if int(x1) <= i <= int(x2) and int(y1) <= j <= int(y2):
return f_n
else:
return None
else:
p = int(np.ceil(len(x) / 2))
x1, x2, y1, y2 = self._get_bounderies(x[p])
if int(x1) <= i:
if i <= int(x2):
if int(y1) <= j:
if j >= int(y2):
val = self._get_val(x[p], i, j)
else:
val = self._get_val(x[p:], i, j)
else:
val = self._get_val(x[:p], i, j)
else:
val = self._get_val(x[p:], i, j)
else:
val = self._get_val(x[:p], i, j)
return val
def _get_ix(self, f_dist, file_name, i, j):
"""Get (i,j) from under triangle.
Parameters
----------
f_dist : object
Sotre all file data
file_name: file_name
Names of subarrays
i: int
x index
j: int
y index
Returns
-------
val: int
(i,j) value
"""
f1, f2 = file_name.split(":")
val = f2.split("_")
dx, dy = f1.split("_")
x1, x2 = dx.split("-")
y1, y2 = dy.split("-")
si, sj = int(val[1]), int(val[2])
X = np.array(f_dist[file_name])
di, dj = int(i) - int(x1), int(j) - int(y1)
extendX, extendY = self._get_array_extensions(si, sj)
if si == sj:
if self.float == "float32":
subArr = np.ones([self.l + extendX, self.l + extendX], dtype=self.out_type)
A = cudaFillFullArray32(X, subArr, self.l + extendX)
else:
A = X.reshape(self.l + extendX, self.l+ extendY)
return A[di, dj]
def calculate_distmatrix(self, X, nr_square=4):
"""Calcualte the whole distane matrix
Parameters
----------
X : array
Array to calcualte distances for.
Returns
-------
Void
"""
if np.float32 != X.dtype:
print("Warning: Array is not float32. The array will be convered from ", X.dtype, " to float32 for speed up.")
X = X.astype(np.float32)
l = X.shape[0] / nr_square
assert l >= 2, "Please pick fewer number of sub-arrays each has a length longer than 2 elements. Currently " + str(round(l, 2))
if self.float=="float64":
assert X.shape[0] >= 10000, "If using float64 ensure that that X.shape[0] > 10 000. Otherwise use float32 or cpu-version."
self.l = int(np.floor(l))
self.N = X.shape[0]
self.nr_square = nr_square
arr_nr = 0
for i in range(self.nr_square):
for j in range(self.nr_square):
if j <= i:
extendX, extendY = self._get_array_extensions(i, j)
x_1, x_2, y_1, y_2 = self._get_extensions_coef(i, j, extendX, extendY)
if self.gpu:
XTX = self._get_XTX_cuda(X,*[x_1,x_2,y_1,y_2])
else:
XTX = self._get_XTX(X, *[x_1, x_2, y_1, y_2])
l_x, entries, square = self._get_flatten_entries(i, j, XTX.shape)
if self.numba:
if square:
subdistMatrix_flatten = XTX.flatten()
else:
subdistMatrix_flatten = self._fill_flatten_distMatrix(entries, XTX, l_x)
else:
if square:
subdistMatrix_flatten = XTX.flatten()
else:
subdistMatrix_flatten = XTX[np.tril_indices(l_x, k=-1)]
self.file_dist.create_dataset(str(x_1) + "-" + str(x_2 - 1) + "_" + str(y_1)+"-"+ str(y_2 - 1) + ':subArray_' + str(i) + "_" + str(j) + "_" + str(arr_nr), data=subdistMatrix_flatten, dtype=subdistMatrix_flatten.dtype)
arr_nr +=1
self.file_dist.close()
| 33.124172
| 237
| 0.503924
|
import h5py
import numpy as np
from skcuda import linalg, misc
import pycuda.autoinit
from pycuda.compiler import SourceModule
import pycuda.gpuarray as gpuarray
linalg.init()
from numpy.linalg import norm
import os.path
cuda_driver = pycuda.driver.init()
pycuda.driver.Device(0).retain_primary_context()
from .fillArrayNumbaFloat32 import cudaFillFlattenArray32, cudaFillFullArray32
class DistanceMatrix:
def __init__(self,
file_name="cuda_dist",
gpu=True,
numba=True,
dictinoray=None):
if os.path.isfile(str(file_name)):
os.remove(str(file_name))
self.filename_dist = file_name
self.file_dist = h5py.File(file_name, 'w')
self.out_type = np.float32
self.float = "float32"
self.load_full = False
self.gpu = gpu
self.numba = numba
if dictinoray:
self.dictinoray = dictinoray
else:
self.dictinoray = None
def _cuda_norm(self, X):
return misc.divide(X, misc.sum(X ** 2, axis=1, keepdims=True) ** 0.5)
def _norm(self, X):
return X / norm(X,axis=1, keepdims=True)
def _get_XTX_cuda(self, X, x_1, x_2, y_1, y_2):
X_f, X_b = gpuarray.to_gpu(X[x_1:x_2, :]), gpuarray.to_gpu(X[y_1:y_2, :])
X_f_norm, X_b_norm = self._cuda_norm(X_f), self._cuda_norm(X_b)
return linalg.dot(X_f_norm, X_b_norm, transb="T").get()
def _get_XTX(self, X, x_1, x_2, y_1, y_2):
Xnorm1, Xnorm2 = self._norm(X[x_1:x_2,:]), self._norm(X[y_1:y_2,:])
return np.dot(Xnorm1, Xnorm2.T)
def _get_array_extensions(self, i, j):
extendX, extendY = 0, 0
if i == self.nr_square - 1 and j == self.nr_square - 1:
extendX = self.N % self.nr_square
extendY = self.N % self.nr_square
elif i == self.nr_square - 1:
extendX = self.N % self.nr_square
elif j == self.nr_square - 1:
extendY = self.N % self.nr_square
return extendX, extendY
def _get_extensions_coef(self, i, j, dx, dy):
x_1, x_2 = self.l * i, self.l * (i + 1) + dx
y_1, y_2 = self.l * j, self.l * (j + 1) + dy
return x_1, x_2, y_1, y_2
def _get_flatten_distance_matrix(self, f_dist):
entries = int(self.N * (self.N - 1) / 2)
distance_matrix = np.zeros([entries],dtype=self.out_type)
start_splice = 0
for k in list(f_dist.keys()):
data = np.asarray(f_dist[k])
end_splice = start_splice + data.shape[0]
distance_matrix[start_splice:end_splice] = data
start_splice = end_splice
return distance_matrix
def _fill_fullDistanceMatrix(self, i, j, dist_data):
extendX, extendY = self._get_array_extensions(i, j)
x_1, x_2, y_1, y_2 = self._get_extensions_coef(i, j, extendX, extendY)
if i == j:
subArr = np.ones([self.l + extendX, self.l + extendX], dtype=self.out_type)
if self.float == "float32":
A = cudaFillFullArray32(dist_data, subArr, self.l + extendX)
elif self.float == "float64":
A = cudaFillFullArray64(dist_data, subArr, self.l + extendX)
self.fullDistMatrix[x_1: x_2, y_1 : y_2] = A
else:
self.fullDistMatrix[x_1: x_2, y_1 : y_2] = dist_data.reshape(self.l + extendX, self.l+ extendY)
self.fullDistMatrix[y_1 : y_2, x_1: x_2] = dist_data.reshape(self.l + extendX, self.l+ extendY).T
def _get_full_distance_matrix(self, f_dist):
self.fullDistMatrix = np.zeros([self.N, self.N])
sorted_filenames = self._sort_files(f_dist)
for fileArr in sorted_filenames:
dist_data = np.asarray(f_dist[fileArr])
i, j = int(fileArr.split(":")[1].split("_")[1]), int(fileArr.split(":")[1].split("_")[2])
self._fill_fullDistanceMatrix(i, j, dist_data)
return self.fullDistMatrix
def _sort_files(self, f_dist):
file_names = list(f_dist.keys())
arr_number = [int(ss.split(":")[1].split("_")[3]) for ss in file_names]
_, sorted_filenames = zip(*sorted(zip(arr_number, file_names)))
return sorted_filenames
def get_distance_matrix(self, fullMatrix=False):
f_dist = h5py.File(self.filename_dist, 'r')
if fullMatrix:
self.load_full = fullMatrix
return self._get_full_distance_matrix(f_dist)
else:
return self._get_flatten_distance_matrix(f_dist)
def _get_flatten_entries(self, i, j, X_shape):
if i == j:
l_x = X_shape[0]
entries = l_x*(l_x - 1)/2
square = False
else:
l_x, l_y = X_shape[0], X_shape[1]
entries = l_x * l_y
square = True
return l_x, entries, square
def _fill_flatten_distMatrix(self, entries, X, dx):
empty_subdistMatrix_flatten = np.zeros((int(entries)), dtype=self.out_type)
if self.float == "float32":
subdistMatrix_flatten = cudaFillFlattenArray32(empty_subdistMatrix_flatten, X, dx)
elif self.float == "float64":
subdistMatrix_flatten = cudaFillFlattenArray64(empty_subdistMatrix_flatten, X, dx)
return subdistMatrix_flatten
def get_similarity(self, i, j,load_full=False):
if isinstance(i, str) or isinstance(j, str):
if self.dictinoray:
try:
i = int(self.dictinoray[i])
j = int(self.dictinoray[j])
except:
print("The values dont exists in dict.")
if not self.load_full:
if load_full:
self.load_full = load_full
f_dist = h5py.File(self.filename_dist, 'r')
self._get_full_distance_matrix(f_dist)
return self.fullDistMatrix[i, j]
else:
if i == j:
return 1
f_dist = h5py.File(self.filename_dist, 'r')
sorted_filenames = self._sort_files(f_dist)
if i < j:
f_n = self._get_val(sorted_filenames, j, i)
val = self._get_ix(f_dist,f_n,j, i)
else:
f_n = self._get_val(sorted_filenames, i, j)
val = self._get_ix(f_dist,f_n,i,j)
return val
else:
return self.fullDistMatrix[i, j]
def most_similar(self, i, load_full=False):
if isinstance(i, str) or isinstance(j, str):
if self.dictinoray:
try:
i = int(self.dictinoray[i])
except:
print("The values dont exists in dict.")
if not self.load_full:
if load_full:
self.load_full = load_full
f_dist = h5py.File(self.filename_dist, 'r')
self._get_full_distance_matrix(f_dist)
sims = self.fullDistMatrix[i, :]
else:
sims = list()
f_dist = h5py.File(self.filename_dist, 'r')
sorted_filenames = self._sort_files(f_dist)
for j in range(self.N):
if i == j:
val = 1
elif i < j:
f_n = self._get_val(sorted_filenames, j, i)
val = self._get_ix(f_dist,f_n,j, i)
else:
f_n = self._get_val(sorted_filenames, i, j)
val = self._get_ix(f_dist,f_n,i,j)
sims.append(val)
sims = np.asarray(sims)
else:
sims = self.fullDistMatrix[i, :]
if self.dictinoray:
sorted_val, sorted_name = zip(*sorted(zip(sims, self.dictinoray.keys())))
return dict(zip(sorted_name[::-1], sorted_val[::-1]))
else:
return self.fullDistMatrix[i, :]
def _get_bounderies(self, f_n):
x, y = f_n.split(":")[0].split("_")
x_range = x.split("-")
y_range = y.split("-")
return x_range[0], x_range[1], y_range[0], y_range[1]
def _get_val(self, x, i, j):
if len(x) == 1:
f_n = x[0]
x1, x2, y1, y2 = self._get_bounderies(f_n)
if int(x1) <= i <= int(x2) and int(y1) <= j <= int(y2):
return f_n
else:
return None
else:
p = int(np.ceil(len(x) / 2))
x1, x2, y1, y2 = self._get_bounderies(x[p])
if int(x1) <= i:
if i <= int(x2):
if int(y1) <= j:
if j >= int(y2):
val = self._get_val(x[p], i, j)
else:
val = self._get_val(x[p:], i, j)
else:
val = self._get_val(x[:p], i, j)
else:
val = self._get_val(x[p:], i, j)
else:
val = self._get_val(x[:p], i, j)
return val
def _get_ix(self, f_dist, file_name, i, j):
f1, f2 = file_name.split(":")
val = f2.split("_")
dx, dy = f1.split("_")
x1, x2 = dx.split("-")
y1, y2 = dy.split("-")
si, sj = int(val[1]), int(val[2])
X = np.array(f_dist[file_name])
di, dj = int(i) - int(x1), int(j) - int(y1)
extendX, extendY = self._get_array_extensions(si, sj)
if si == sj:
if self.float == "float32":
subArr = np.ones([self.l + extendX, self.l + extendX], dtype=self.out_type)
A = cudaFillFullArray32(X, subArr, self.l + extendX)
else:
A = X.reshape(self.l + extendX, self.l+ extendY)
return A[di, dj]
def calculate_distmatrix(self, X, nr_square=4):
if np.float32 != X.dtype:
print("Warning: Array is not float32. The array will be convered from ", X.dtype, " to float32 for speed up.")
X = X.astype(np.float32)
l = X.shape[0] / nr_square
assert l >= 2, "Please pick fewer number of sub-arrays each has a length longer than 2 elements. Currently " + str(round(l, 2))
if self.float=="float64":
assert X.shape[0] >= 10000, "If using float64 ensure that that X.shape[0] > 10 000. Otherwise use float32 or cpu-version."
self.l = int(np.floor(l))
self.N = X.shape[0]
self.nr_square = nr_square
arr_nr = 0
for i in range(self.nr_square):
for j in range(self.nr_square):
if j <= i:
extendX, extendY = self._get_array_extensions(i, j)
x_1, x_2, y_1, y_2 = self._get_extensions_coef(i, j, extendX, extendY)
if self.gpu:
XTX = self._get_XTX_cuda(X,*[x_1,x_2,y_1,y_2])
else:
XTX = self._get_XTX(X, *[x_1, x_2, y_1, y_2])
l_x, entries, square = self._get_flatten_entries(i, j, XTX.shape)
if self.numba:
if square:
subdistMatrix_flatten = XTX.flatten()
else:
subdistMatrix_flatten = self._fill_flatten_distMatrix(entries, XTX, l_x)
else:
if square:
subdistMatrix_flatten = XTX.flatten()
else:
subdistMatrix_flatten = XTX[np.tril_indices(l_x, k=-1)]
self.file_dist.create_dataset(str(x_1) + "-" + str(x_2 - 1) + "_" + str(y_1)+"-"+ str(y_2 - 1) + ':subArray_' + str(i) + "_" + str(j) + "_" + str(arr_nr), data=subdistMatrix_flatten, dtype=subdistMatrix_flatten.dtype)
arr_nr +=1
self.file_dist.close()
| true
| true
|
1c4262cdeb92ebd6c335d957cdc8fd8bfca03129
| 190
|
py
|
Python
|
Learning Python/Exercise Files/Ch2/helloworld_my.py
|
RomanShevtsiv/linkedin-learning
|
d7ec85953b7e88905f87928ede067d32344b984f
|
[
"MIT"
] | null | null | null |
Learning Python/Exercise Files/Ch2/helloworld_my.py
|
RomanShevtsiv/linkedin-learning
|
d7ec85953b7e88905f87928ede067d32344b984f
|
[
"MIT"
] | null | null | null |
Learning Python/Exercise Files/Ch2/helloworld_my.py
|
RomanShevtsiv/linkedin-learning
|
d7ec85953b7e88905f87928ede067d32344b984f
|
[
"MIT"
] | null | null | null |
#
# Example file for HelloWorld
#
def main():
print("Hello World")
name = input("What is your name? ")
print("Nice to meet you,", name)
if __name__ == "__main__":
main()
| 13.571429
| 39
| 0.594737
|
def main():
print("Hello World")
name = input("What is your name? ")
print("Nice to meet you,", name)
if __name__ == "__main__":
main()
| true
| true
|
1c426315e61989e059c3795bd0347b919c2187d5
| 2,365
|
py
|
Python
|
openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Authentication_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Authentication_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
openGaussBase/testcase/SECURITY/PERMISSIONS/Opengauss_Function_Security_Authentication_Case0006.py
|
opengauss-mirror/Yat
|
aef107a8304b94e5d99b4f1f36eb46755eb8919e
|
[
"MulanPSL-1.0"
] | null | null | null |
"""
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : security
Case Name : 修改密码有效时间为1000,-1
Description :
1.登录数据库,执行gs_guc set -N all -I all -c "password_effect_time=1000"
2.gs_guc set -N all -I all -c "password_effect_time=-1"
Expect :
1.设置失败,参数超范围
2.设置失败,参数超范围
History :
"""
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Policy(unittest.TestCase):
def setUp(self):
logger.info(
'---Opengauss_Function_Security_Authentication_Case0006 start---')
self.common = Common()
self.sh_primy = CommonSH('PrimaryDbUser')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_policy(self):
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -N all -I all ' \
f'-c "password_effect_time=1000"'
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertTrue(msg1.find(
'ERROR: The value 1000 is outside the valid range for parameter '
'"password_effect_time" (0 .. 999)') > -1)
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -N all -I all -c "password_effect_time=-1"'
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertTrue(msg1.find('ERROR: The value -1 is outside the valid '
'range for parameter '
'"password_effect_time" (0 .. 999)') > -1)
def tearDown(self):
logger.info(
'---Opengauss_Function_Security_Authentication_Case0006 finish---')
| 34.779412
| 84
| 0.649894
|
import unittest
from yat.test import Node
from yat.test import macro
from testcase.utils.Common import Common
from testcase.utils.CommonSH import CommonSH
from testcase.utils.Constant import Constant
from testcase.utils.Logger import Logger
logger = Logger()
class Policy(unittest.TestCase):
def setUp(self):
logger.info(
'---Opengauss_Function_Security_Authentication_Case0006 start---')
self.common = Common()
self.sh_primy = CommonSH('PrimaryDbUser')
self.userNode = Node('PrimaryDbUser')
self.DB_ENV_PATH = macro.DB_ENV_PATH
def test_policy(self):
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -N all -I all ' \
f'-c "password_effect_time=1000"'
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertTrue(msg1.find(
'ERROR: The value 1000 is outside the valid range for parameter '
'"password_effect_time" (0 .. 999)') > -1)
excute_cmd1 = f'source {self.DB_ENV_PATH};' \
f'gs_guc set -N all -I all -c "password_effect_time=-1"'
msg1 = self.userNode.sh(excute_cmd1).result()
logger.info(msg1)
self.assertTrue(msg1.find('ERROR: The value -1 is outside the valid '
'range for parameter '
'"password_effect_time" (0 .. 999)') > -1)
def tearDown(self):
logger.info(
'---Opengauss_Function_Security_Authentication_Case0006 finish---')
| true
| true
|
1c4263adea94853f60b5eafdb904b7159d01f84a
| 365
|
py
|
Python
|
app/decorators.py
|
mildock/KakaotalkHost
|
cf110e5f4675852d032972c0da57c21e6f3b56b6
|
[
"MIT"
] | null | null | null |
app/decorators.py
|
mildock/KakaotalkHost
|
cf110e5f4675852d032972c0da57c21e6f3b56b6
|
[
"MIT"
] | null | null | null |
app/decorators.py
|
mildock/KakaotalkHost
|
cf110e5f4675852d032972c0da57c21e6f3b56b6
|
[
"MIT"
] | null | null | null |
from time import time
from functools import wraps
def processtime(func):
'''
콘솔에서 함수의 실행시간을 디버깅하는 용도입니다.
'''
@wraps(func)
def wrapper(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
during = time() - start
print(">>", func.__name__, ">> %.5fs" % during)
return result
return wrapper
| 21.470588
| 55
| 0.564384
|
from time import time
from functools import wraps
def processtime(func):
@wraps(func)
def wrapper(*args, **kwargs):
start = time()
result = func(*args, **kwargs)
during = time() - start
print(">>", func.__name__, ">> %.5fs" % during)
return result
return wrapper
| true
| true
|
1c426483c131a2f228d4fb9f7f0f8a73a3ca5d0d
| 420
|
py
|
Python
|
Text.py
|
nvakhilnair/Image-Encryption-and-Decryption-using-RSA
|
6a033aac703acd4d9f3f2f32faa2c79ad3c31600
|
[
"MIT"
] | null | null | null |
Text.py
|
nvakhilnair/Image-Encryption-and-Decryption-using-RSA
|
6a033aac703acd4d9f3f2f32faa2c79ad3c31600
|
[
"MIT"
] | null | null | null |
Text.py
|
nvakhilnair/Image-Encryption-and-Decryption-using-RSA
|
6a033aac703acd4d9f3f2f32faa2c79ad3c31600
|
[
"MIT"
] | null | null | null |
from decrypt import decrpytion
def text_process(input_Dir,d,N):
with open(input_Dir) as f:
line = f.readline()
f.close()
line = line[1:len(line)-1]
line_list = list(line.split(","))
data_ciphered = []
for i in line_list:
data_ciphered.append(int(i))
data_deciphered = []
for i in data_ciphered:
data_deciphered.append(decrpytion(i,d,N))
return data_deciphered
| 28
| 49
| 0.645238
|
from decrypt import decrpytion
def text_process(input_Dir,d,N):
with open(input_Dir) as f:
line = f.readline()
f.close()
line = line[1:len(line)-1]
line_list = list(line.split(","))
data_ciphered = []
for i in line_list:
data_ciphered.append(int(i))
data_deciphered = []
for i in data_ciphered:
data_deciphered.append(decrpytion(i,d,N))
return data_deciphered
| true
| true
|
1c4264913c5b765b683a7fbcf3168ec8a3de824a
| 809
|
py
|
Python
|
src/league_py/api/decorators.py
|
cguethle/league_py
|
58e0ad35e62260bdf019a21707531132557567ba
|
[
"MIT"
] | null | null | null |
src/league_py/api/decorators.py
|
cguethle/league_py
|
58e0ad35e62260bdf019a21707531132557567ba
|
[
"MIT"
] | null | null | null |
src/league_py/api/decorators.py
|
cguethle/league_py
|
58e0ad35e62260bdf019a21707531132557567ba
|
[
"MIT"
] | null | null | null |
""" Helper decorators for LOL API calls.
"""
import functools
from league_py.api import get_region
class LolApi(object):
""" Helper to facilitate building of the api urls for use by the basic api methods.
"""
version = None
name = None
def __init__(self, url):
self.url = url
def __call__(self, fnc):
@functools.wraps(fnc)
def decorated(*args, **kwargs):
url_pattern = "/api/lol/{region}/v{version}/{api_name}/{url_details}"
api_url = url_pattern.format(
region=get_region(),
version=self.version,
api_name=self.name,
url_details=self.url
)
fnc.func_globals['api_url'] = api_url
return fnc(*args, **kwargs)
return decorated
| 26.966667
| 87
| 0.577256
|
import functools
from league_py.api import get_region
class LolApi(object):
version = None
name = None
def __init__(self, url):
self.url = url
def __call__(self, fnc):
@functools.wraps(fnc)
def decorated(*args, **kwargs):
url_pattern = "/api/lol/{region}/v{version}/{api_name}/{url_details}"
api_url = url_pattern.format(
region=get_region(),
version=self.version,
api_name=self.name,
url_details=self.url
)
fnc.func_globals['api_url'] = api_url
return fnc(*args, **kwargs)
return decorated
| true
| true
|
1c4264f1b535bdcffc4958e6c8c5f249ac98dd37
| 1,741
|
py
|
Python
|
tests/components/demo/test_lock.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 3
|
2020-10-23T14:39:11.000Z
|
2021-02-17T14:40:17.000Z
|
tests/components/demo/test_lock.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
tests/components/demo/test_lock.py
|
petewill/home-assistant
|
5859dba4344f05fb8774aa1207e47ac28f627a67
|
[
"Apache-2.0"
] | 6
|
2020-04-10T06:21:11.000Z
|
2021-07-01T08:53:38.000Z
|
"""The tests for the Demo lock platform."""
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import lock
from tests.common import get_test_home_assistant, mock_service
from tests.components.lock import common
FRONT = "lock.front_door"
KITCHEN = "lock.kitchen_door"
OPENABLE_LOCK = "lock.openable_lock"
class TestLockDemo(unittest.TestCase):
"""Test the demo lock."""
def setUp(self): # pylint: disable=invalid-name
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
assert setup_component(self.hass, lock.DOMAIN, {"lock": {"platform": "demo"}})
def tearDown(self): # pylint: disable=invalid-name
"""Stop everything that was started."""
self.hass.stop()
def test_is_locked(self):
"""Test if lock is locked."""
assert lock.is_locked(self.hass, FRONT)
self.hass.states.is_state(FRONT, "locked")
assert not lock.is_locked(self.hass, KITCHEN)
self.hass.states.is_state(KITCHEN, "unlocked")
def test_locking(self):
"""Test the locking of a lock."""
common.lock(self.hass, KITCHEN)
self.hass.block_till_done()
assert lock.is_locked(self.hass, KITCHEN)
def test_unlocking(self):
"""Test the unlocking of a lock."""
common.unlock(self.hass, FRONT)
self.hass.block_till_done()
assert not lock.is_locked(self.hass, FRONT)
def test_opening(self):
"""Test the opening of a lock."""
calls = mock_service(self.hass, lock.DOMAIN, lock.SERVICE_OPEN)
common.open_lock(self.hass, OPENABLE_LOCK)
self.hass.block_till_done()
assert 1 == len(calls)
| 31.654545
| 86
| 0.669156
|
import unittest
from homeassistant.setup import setup_component
from homeassistant.components import lock
from tests.common import get_test_home_assistant, mock_service
from tests.components.lock import common
FRONT = "lock.front_door"
KITCHEN = "lock.kitchen_door"
OPENABLE_LOCK = "lock.openable_lock"
class TestLockDemo(unittest.TestCase):
def setUp(self):
self.hass = get_test_home_assistant()
assert setup_component(self.hass, lock.DOMAIN, {"lock": {"platform": "demo"}})
def tearDown(self):
self.hass.stop()
def test_is_locked(self):
assert lock.is_locked(self.hass, FRONT)
self.hass.states.is_state(FRONT, "locked")
assert not lock.is_locked(self.hass, KITCHEN)
self.hass.states.is_state(KITCHEN, "unlocked")
def test_locking(self):
common.lock(self.hass, KITCHEN)
self.hass.block_till_done()
assert lock.is_locked(self.hass, KITCHEN)
def test_unlocking(self):
common.unlock(self.hass, FRONT)
self.hass.block_till_done()
assert not lock.is_locked(self.hass, FRONT)
def test_opening(self):
calls = mock_service(self.hass, lock.DOMAIN, lock.SERVICE_OPEN)
common.open_lock(self.hass, OPENABLE_LOCK)
self.hass.block_till_done()
assert 1 == len(calls)
| true
| true
|
1c4265b4bcea0102a286800f0e08177217b2a2ef
| 1,432
|
py
|
Python
|
clocq/knowledge_base/creation/csv_to_clocq/transform_json_to_pickle.py
|
PhilippChr/CLOCQ
|
b74eea30a30e20b4e9c46908b5385b1eafb2ba53
|
[
"MIT"
] | 8
|
2021-12-06T13:35:27.000Z
|
2021-12-27T09:46:29.000Z
|
clocq/knowledge_base/creation/csv_to_clocq/transform_json_to_pickle.py
|
PhilippChr/CLOCQ
|
b74eea30a30e20b4e9c46908b5385b1eafb2ba53
|
[
"MIT"
] | 3
|
2022-01-22T14:06:51.000Z
|
2022-03-16T01:36:29.000Z
|
clocq/knowledge_base/creation/csv_to_clocq/transform_json_to_pickle.py
|
PhilippChr/CLOCQ
|
b74eea30a30e20b4e9c46908b5385b1eafb2ba53
|
[
"MIT"
] | 2
|
2021-12-06T13:35:31.000Z
|
2021-12-21T06:36:26.000Z
|
import json
import pickle
import re
HIGHEST_ID = 92114576
ENT_PATTERN = re.compile('^Q[0-9]+$')
PRE_PATTERN = re.compile('^P[0-9]+$')
with open("dicts/entity_nodes.pickle", "rb") as infile:
entities_dict = pickle.load(infile)
with open("dicts/pred_nodes.pickle", "rb") as infile:
predicates_dict = pickle.load(infile)
def item_to_integer(item):
try:
if item[0] == "Q" and re.match(ENT_PATTERN, item):
return int(entities_dict[item])
elif item[0] == "P" and re.match(PRE_PATTERN, item):
return int(predicates_dict[item])
elif len(item) < 40:
return int(-literals_dict[item])
except:
return None
def json_to_encoded_pickle(json_path, pickle_path):
pickle_dict = list()
with open(json_path, "r") as fp:
json_dict = json.load(fp)
for i in range(HIGHEST_ID):
pickle_dict.append(None)
for item in json_dict:
entry = json_dict[item]
integer_encoded_item = item_to_integer(item)
if not integer_encoded_item:
continue
try:
pickle_dict[integer_encoded_item] = entry
except:
print(integer_encoded_item)
with open(pickle_path, 'wb') as output:
pickle.dump(pickle_dict, output, protocol=pickle.HIGHEST_PROTOCOL)
"""
MAIN
"""
if __name__ == "__main__":
json_to_encoded_pickle("dicts/aliases_dict.json", "dicts/aliases.pickle")
json_to_encoded_pickle("dicts/labels_dict.json", "dicts/labels.pickle")
json_to_encoded_pickle("dicts/descriptions_dict.json", "dicts/descriptions.pickle")
| 26.518519
| 84
| 0.73743
|
import json
import pickle
import re
HIGHEST_ID = 92114576
ENT_PATTERN = re.compile('^Q[0-9]+$')
PRE_PATTERN = re.compile('^P[0-9]+$')
with open("dicts/entity_nodes.pickle", "rb") as infile:
entities_dict = pickle.load(infile)
with open("dicts/pred_nodes.pickle", "rb") as infile:
predicates_dict = pickle.load(infile)
def item_to_integer(item):
try:
if item[0] == "Q" and re.match(ENT_PATTERN, item):
return int(entities_dict[item])
elif item[0] == "P" and re.match(PRE_PATTERN, item):
return int(predicates_dict[item])
elif len(item) < 40:
return int(-literals_dict[item])
except:
return None
def json_to_encoded_pickle(json_path, pickle_path):
pickle_dict = list()
with open(json_path, "r") as fp:
json_dict = json.load(fp)
for i in range(HIGHEST_ID):
pickle_dict.append(None)
for item in json_dict:
entry = json_dict[item]
integer_encoded_item = item_to_integer(item)
if not integer_encoded_item:
continue
try:
pickle_dict[integer_encoded_item] = entry
except:
print(integer_encoded_item)
with open(pickle_path, 'wb') as output:
pickle.dump(pickle_dict, output, protocol=pickle.HIGHEST_PROTOCOL)
if __name__ == "__main__":
json_to_encoded_pickle("dicts/aliases_dict.json", "dicts/aliases.pickle")
json_to_encoded_pickle("dicts/labels_dict.json", "dicts/labels.pickle")
json_to_encoded_pickle("dicts/descriptions_dict.json", "dicts/descriptions.pickle")
| true
| true
|
1c4266aff4dac39120705746b9a9fdfcd5e41f20
| 1,305
|
py
|
Python
|
src/config.py
|
R6M9/b
|
12f1a17b8a053d87ad342c53639f6a832ed7a165
|
[
"MIT"
] | 1
|
2022-03-05T13:34:59.000Z
|
2022-03-05T13:34:59.000Z
|
src/config.py
|
melek-saidani/bypass_utility
|
3320fb9fb34ae5cff64a51ab4358663baa72a425
|
[
"MIT"
] | null | null | null |
src/config.py
|
melek-saidani/bypass_utility
|
3320fb9fb34ae5cff64a51ab4358663baa72a425
|
[
"MIT"
] | 1
|
2021-08-29T00:53:40.000Z
|
2021-08-29T00:53:40.000Z
|
import json5
class Config:
watchdog_address: int = 0x10007000
uart_base: int = 0x11002000
payload_address: int = 0x100A00
var_0: int = None
var_1: int = 0xA
payload: str
crash_method: int = 0
def default(self, hw_code):
config = open("default_config.json5")
self.from_file(config, hw_code)
config.close()
return self
def from_file(self, config, hw_code):
hw_code = hex(hw_code)
config = json5.load(config)
if hw_code in config:
self.from_dict(config[hw_code])
else:
raise NotImplementedError("Can't find {} hw_code in config".format(hw_code))
return self
def from_dict(self, entry):
if "watchdog_address" in entry:
self.watchdog_address = entry["watchdog_address"]
if "uart_base" in entry:
self.uart_base = entry["uart_base"]
if "payload_address" in entry:
self.payload_address = entry["payload_address"]
if "var_0" in entry:
self.var_0 = entry["var_0"]
if "var_1" in entry:
self.var_1 = entry["var_1"]
if "crash_method" in entry:
self.crash_method = entry["crash_method"]
self.payload = entry["payload"]
return self
| 24.166667
| 88
| 0.595402
|
import json5
class Config:
watchdog_address: int = 0x10007000
uart_base: int = 0x11002000
payload_address: int = 0x100A00
var_0: int = None
var_1: int = 0xA
payload: str
crash_method: int = 0
def default(self, hw_code):
config = open("default_config.json5")
self.from_file(config, hw_code)
config.close()
return self
def from_file(self, config, hw_code):
hw_code = hex(hw_code)
config = json5.load(config)
if hw_code in config:
self.from_dict(config[hw_code])
else:
raise NotImplementedError("Can't find {} hw_code in config".format(hw_code))
return self
def from_dict(self, entry):
if "watchdog_address" in entry:
self.watchdog_address = entry["watchdog_address"]
if "uart_base" in entry:
self.uart_base = entry["uart_base"]
if "payload_address" in entry:
self.payload_address = entry["payload_address"]
if "var_0" in entry:
self.var_0 = entry["var_0"]
if "var_1" in entry:
self.var_1 = entry["var_1"]
if "crash_method" in entry:
self.crash_method = entry["crash_method"]
self.payload = entry["payload"]
return self
| true
| true
|
1c42675f32f5b19737a76c62da34641547830ea1
| 3,951
|
py
|
Python
|
pythonCodes/linearRegression.py
|
cansuyildiz/DataMiningLibraryForSparklingWaterPlatform
|
0fda4ab40c7613eead313f36374b53923e2357f7
|
[
"MIT"
] | 1
|
2020-07-28T12:12:53.000Z
|
2020-07-28T12:12:53.000Z
|
pythonCodes/linearRegression.py
|
cansuyildiz/DataMiningLibraryForSparklingWaterPlatform
|
0fda4ab40c7613eead313f36374b53923e2357f7
|
[
"MIT"
] | null | null | null |
pythonCodes/linearRegression.py
|
cansuyildiz/DataMiningLibraryForSparklingWaterPlatform
|
0fda4ab40c7613eead313f36374b53923e2357f7
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri May 04 10:40:37 2018
@author: cansu.yildiz
"""
from __future__ import print_function
import argparse
import h2o
import findspark
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from pyspark import SparkConf, SparkContext
from pysparkling import *
from pyspark.sql import SparkSession
from math import sqrt
import time
from functools import reduce
def parseData(train_filepath, response):
data=h2o.import_file(path=train_filepath)
#test=h2o.import_file(path=test_filepath)
data.describe()
#data.drop("musteri id")
r=data.runif(1234)
train=data[r<0.8]
test=data[r>=0.8]
#predictors= data.columns
#predictors=['Appliances', 'lights', 'T1', 'RH_1', 'T2', 'RH_2', 'T3', 'RH_3', 'T4', 'RH_4', 'T5', 'RH_5', 'T6', 'RH_6', 'T7', 'RH_7', 'T8', 'RH_8', 'T9', 'RH_9', 'T_out', 'Press_mm_hg', 'RH_out', 'Windspeed', 'Visibility', 'Tdewpoint', 'rv1']
predictors=data.columns
print("predictors= ", predictors)
#response='rv2'
#response = "quality"
#response = 'ADRES1_SEHIR_KODU'
#test=test.drop("date")
test.drop(response)
#print(test.columns)
return train,test,predictors
def evaluate(train,test,predictors,response):
linearEstimator=H2OGeneralizedLinearEstimator(score_each_iteration=True, solver="l_bfgs", early_stopping=False)
linearEstimator.train(x=predictors, y=response, training_frame=train)#, validation_frame=validation)
return linearEstimator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Linear Regression on Sparkling Water')
parser.add_argument('--dataset', type=str, default="winequality-red2.csv")
parser.add_argument('--response', type=str)
args = parser.parse_args()
train_filepath="hdfs://localhost:9000/user/cansu/" + args.dataset #energydata_complete.csv"
response = args.response
#test_filepath="hdfs://192.168.34.252:9000/ozge/energy_test.csv"
#Initiate System----------------------------------------------
findspark.init()
#conf = SparkConf().setAppName("LinearRegression").setMaster("spark://192.168.34.225:7077").set("spark.ui.port", "7077").set("spark.executor.memory", "5g").set("spark.driver.memory", "5g")
conf = SparkConf().setAppName("LinearRegression").setMaster("local[*]").set("spark.ui.port", "7077").set("spark.executor.memory", "5g").set("spark.driver.memory", "5g")
sc = SparkContext(conf=conf)
#Initiate Spark Session
spark = SparkSession.builder.getOrCreate()
#Initiate Spark Context
h2oContext = H2OContext.getOrCreate(spark)
h2oContext.show()
#------------------------------------------------------------
train,test,predictors = parseData(train_filepath,response)
duration=[]
for i in range(2):
t1=time.time()
evaluate(train,test,predictors,response)
t2=time.time()
duration.append(t2-t1)
sizeOfDuration=len(duration)
#calculates avarage duration
mean = reduce(lambda x, y: x + y, duration) / sizeOfDuration
#calculates standart devaiation of durations
differences= [x-mean for x in duration]
sq_differences = [d**2 for d in differences]
std= sqrt(sum(sq_differences)/(sizeOfDuration-1))
linearEstimator = evaluate(train,test,predictors,response)
prediction=linearEstimator.predict(test)
print("prediction for response column= ", prediction)
print(linearEstimator.auc)
print("mean duration of linear regression= ",mean,"\nStandart Deviation= ",std)
h2oContext.stop()
sc.stop()
file = open("linearRegression.txt","w")
file.write("mean = "+ str(mean))
file.write("\nstandart deviation = " + str(std))
file.close()
| 32.652893
| 248
| 0.639079
|
from __future__ import print_function
import argparse
import h2o
import findspark
from h2o.estimators.glm import H2OGeneralizedLinearEstimator
from pyspark import SparkConf, SparkContext
from pysparkling import *
from pyspark.sql import SparkSession
from math import sqrt
import time
from functools import reduce
def parseData(train_filepath, response):
data=h2o.import_file(path=train_filepath)
data.describe()
r=data.runif(1234)
train=data[r<0.8]
test=data[r>=0.8]
predictors=data.columns
print("predictors= ", predictors)
test.drop(response)
return train,test,predictors
def evaluate(train,test,predictors,response):
linearEstimator=H2OGeneralizedLinearEstimator(score_each_iteration=True, solver="l_bfgs", early_stopping=False)
linearEstimator.train(x=predictors, y=response, training_frame=train)
return linearEstimator
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Linear Regression on Sparkling Water')
parser.add_argument('--dataset', type=str, default="winequality-red2.csv")
parser.add_argument('--response', type=str)
args = parser.parse_args()
train_filepath="hdfs://localhost:9000/user/cansu/" + args.dataset
response = args.response
#test_filepath="hdfs://192.168.34.252:9000/ozge/energy_test.csv"
#Initiate System----------------------------------------------
findspark.init()
#conf = SparkConf().setAppName("LinearRegression").setMaster("spark://192.168.34.225:7077").set("spark.ui.port", "7077").set("spark.executor.memory", "5g").set("spark.driver.memory", "5g")
conf = SparkConf().setAppName("LinearRegression").setMaster("local[*]").set("spark.ui.port", "7077").set("spark.executor.memory", "5g").set("spark.driver.memory", "5g")
sc = SparkContext(conf=conf)
#Initiate Spark Session
spark = SparkSession.builder.getOrCreate()
#Initiate Spark Context
h2oContext = H2OContext.getOrCreate(spark)
h2oContext.show()
#------------------------------------------------------------
train,test,predictors = parseData(train_filepath,response)
duration=[]
for i in range(2):
t1=time.time()
evaluate(train,test,predictors,response)
t2=time.time()
duration.append(t2-t1)
sizeOfDuration=len(duration)
#calculates avarage duration
mean = reduce(lambda x, y: x + y, duration) / sizeOfDuration
#calculates standart devaiation of durations
differences= [x-mean for x in duration]
sq_differences = [d**2 for d in differences]
std= sqrt(sum(sq_differences)/(sizeOfDuration-1))
linearEstimator = evaluate(train,test,predictors,response)
prediction=linearEstimator.predict(test)
print("prediction for response column= ", prediction)
print(linearEstimator.auc)
print("mean duration of linear regression= ",mean,"\nStandart Deviation= ",std)
h2oContext.stop()
sc.stop()
file = open("linearRegression.txt","w")
file.write("mean = "+ str(mean))
file.write("\nstandart deviation = " + str(std))
file.close()
| true
| true
|
1c4269d1ac5c3a06e4a302c3ed7f1afbf85326c2
| 4,528
|
py
|
Python
|
TPCh11.py
|
MrOrioleCashback/snippets
|
2dc7e4539009ab57b0f069e2d68908bc943d687a
|
[
"MIT"
] | null | null | null |
TPCh11.py
|
MrOrioleCashback/snippets
|
2dc7e4539009ab57b0f069e2d68908bc943d687a
|
[
"MIT"
] | null | null | null |
TPCh11.py
|
MrOrioleCashback/snippets
|
2dc7e4539009ab57b0f069e2d68908bc943d687a
|
[
"MIT"
] | null | null | null |
list1 = ['a', 'b', 'c', 'd', 'e', 'f']
list2 = [1, 2, 3, 4, 5, 6]
list3 = 'bananna'
my_dict = dict([(x, y) for x, y in zip(list1, list2)])
my_dict['g'] = 7
#print(my_dict) #unordered
#print('a' in my_dict) #indexed by key
#print(1 in my_dict) #only keys
#print(1 in my_dict.values()) #use .values() to search values
def histogram(string):
temp_dict = dict()
for character in string:
if character not in temp_dict:
temp_dict[character] = 1
else:
temp_dict[character] += 1
return temp_dict
#print(histogram(list3))
#print(my_dict.get('x', 5))
def histogram_with_get(string):
temp_dict = dict()
for character in string:
temp_dict[character] = temp_dict.get(character, 0) + 1
return temp_dict
#print(histogram_with_get(list3))
def print_hist(dic):
for character in dic:
print(character, dic[character])
#print_hist(my_dict)
#print('----------------')
def print_hist_sorted(dic):
for character in sorted(dic):
print(character, dic[character])
#print_hist_sorted(my_dict)
#print('----------------')
def reverse_lookup(dic, value):
for key in dic:
if dic[key] == value:
return key
raise LookupError('Value not in dictonary')
#print(reverse_lookup(my_dict, 8))
def invert_dict(dic):
inverse = dict()
for key in dic:
value = dic[key]
if value not in inverse:
inverse[value] = key
else:
inverse[value].append(key)
return inverse
#print(invert_dict(histogram_with_get('parrot')))
def fabonacci(n):
"""
Return the n'th number of the fabonacci sequence
"""
if n == 0:
return 0
elif n == 1:
return 1
else:
return fabonacci(n-1) + fabonacci(n-2)
#print(fabonacci(40)) #[Finished in 64.7s]
known = {0:0, 1:1}
def fabonacci_with_memo(n):
if n in known:
return known[n]
result = fabonacci_with_memo(n-1) + fabonacci_with_memo(n-2)
known[n] = result
return result
#print(fabonacci_with_memo(40)) #[Finished in 0.1s]
count = 0
def counter():
count += 1
return(count)
#print(counter()) #UnboundLocalError: local variable 'count' referenced before assignment
def counter_global():
global count
count += 1
return(count)
#print(counter_global())
#----------------------------------------------------------
fin = open('words.txt')
def word_dict_search(file, search_word):
word_keys = dict()
for line in file:
word = line.strip()
word_keys[word] = 0
if search_word in word_keys:
return True
return False
#print(word_dict_search(fin, 'zamindars')) #[Finished in 0.1s]
def list_search(file, search_word):
word_list = []
for line in file:
word = line.strip()
word_list.append(word)
if search_word in word_list:
return True
return False
#print(list_search(fin, 'zamindars')) #[Finished in 0.1s]
def inverse_with_setdefault(dic):
inverse = dict()
for key in dic:
value = dic[key]
inverse.setdefault(value, key)
return inverse
#print(invert_dict(my_dict))
#print(inverse_with_setdefault(my_dict))
def ackermann(m, n):
"""
http://en.wikipedia.org/wiki/Ackermann_function
"""
if m == 0:
return n+1
if n == 0:
return ackermann(m-1, 1)
return ackermann(m-1, ackermann(m, n-1))
#print(ackermann(3, 4))
e = ['a', 'b', 'c', 'd', 'e', 'f', 'b', 'h']
def has_duplicates(t):
#list version
temp = t[:]
temp.sort()
for i in range(len(temp)-1):
if temp[i] == temp[i+1]:
return True
return False
#print(has_duplicates(e))
def has_duplicates_dict(t):
temp_dict = dict()
for item in t:
if item in temp_dict:
return True
temp_dict[item] = 1
return False
#print(has_duplicates_dict(e))
def wordlist_dict(file):
word_keys = dict()
for line in file:
word = line.strip()
word_keys[word] = 0
return word_keys
def rotate_pairs(word, file):
rotated = word[::-1].lower()
if word in wordlist_dict(file):
return word, rotated
return 'not found'
#print(rotate_pairs('pots', fin))
| 21.258216
| 90
| 0.563163
|
list1 = ['a', 'b', 'c', 'd', 'e', 'f']
list2 = [1, 2, 3, 4, 5, 6]
list3 = 'bananna'
my_dict = dict([(x, y) for x, y in zip(list1, list2)])
my_dict['g'] = 7
ter in string:
if character not in temp_dict:
temp_dict[character] = 1
else:
temp_dict[character] += 1
return temp_dict
def histogram_with_get(string):
temp_dict = dict()
for character in string:
temp_dict[character] = temp_dict.get(character, 0) + 1
return temp_dict
def print_hist(dic):
for character in dic:
print(character, dic[character])
def print_hist_sorted(dic):
for character in sorted(dic):
print(character, dic[character])
def reverse_lookup(dic, value):
for key in dic:
if dic[key] == value:
return key
raise LookupError('Value not in dictonary')
def invert_dict(dic):
inverse = dict()
for key in dic:
value = dic[key]
if value not in inverse:
inverse[value] = key
else:
inverse[value].append(key)
return inverse
def fabonacci(n):
if n == 0:
return 0
elif n == 1:
return 1
else:
return fabonacci(n-1) + fabonacci(n-2)
1}
def fabonacci_with_memo(n):
if n in known:
return known[n]
result = fabonacci_with_memo(n-1) + fabonacci_with_memo(n-2)
known[n] = result
return result
f counter():
count += 1
return(count)
ount)
fin = open('words.txt')
def word_dict_search(file, search_word):
word_keys = dict()
for line in file:
word = line.strip()
word_keys[word] = 0
if search_word in word_keys:
return True
return False
ch(file, search_word):
word_list = []
for line in file:
word = line.strip()
word_list.append(word)
if search_word in word_list:
return True
return False
h_setdefault(dic):
inverse = dict()
for key in dic:
value = dic[key]
inverse.setdefault(value, key)
return inverse
def ackermann(m, n):
if m == 0:
return n+1
if n == 0:
return ackermann(m-1, 1)
return ackermann(m-1, ackermann(m, n-1))
e = ['a', 'b', 'c', 'd', 'e', 'f', 'b', 'h']
def has_duplicates(t):
temp = t[:]
temp.sort()
for i in range(len(temp)-1):
if temp[i] == temp[i+1]:
return True
return False
def has_duplicates_dict(t):
temp_dict = dict()
for item in t:
if item in temp_dict:
return True
temp_dict[item] = 1
return False
def wordlist_dict(file):
word_keys = dict()
for line in file:
word = line.strip()
word_keys[word] = 0
return word_keys
def rotate_pairs(word, file):
rotated = word[::-1].lower()
if word in wordlist_dict(file):
return word, rotated
return 'not found'
| true
| true
|
1c4269d277bffd065a07c442e015bebcb400301c
| 4,100
|
py
|
Python
|
samples/openapi3/client/petstore/python-experimental/petstore_api/api/store_api_endpoints/delete_order.py
|
sensorario/openapi-generator
|
bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c
|
[
"Apache-2.0"
] | 1
|
2022-01-24T08:22:21.000Z
|
2022-01-24T08:22:21.000Z
|
samples/openapi3/client/petstore/python-experimental/petstore_api/api/store_api_endpoints/delete_order.py
|
sensorario/openapi-generator
|
bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c
|
[
"Apache-2.0"
] | 4
|
2021-09-29T08:46:32.000Z
|
2021-12-08T09:07:04.000Z
|
samples/openapi3/client/petstore/python-experimental/petstore_api/api/store_api_endpoints/delete_order.py
|
sensorario/openapi-generator
|
bf68e9b7d2d9a27fab481fe6bab3f57bc135b94c
|
[
"Apache-2.0"
] | 1
|
2022-02-24T15:54:44.000Z
|
2022-02-24T15:54:44.000Z
|
# coding: utf-8
"""
Generated by: https://openapi-generator.tech
"""
from dataclasses import dataclass
import re # noqa: F401
import sys # noqa: F401
import typing
import urllib3
from petstore_api import api_client, exceptions
import decimal # noqa: F401
from datetime import date, datetime # noqa: F401
from frozendict import frozendict # noqa: F401
from petstore_api.schemas import ( # noqa: F401
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
# path params
OrderIdSchema = StrSchema
RequestRequiredPathParams = typing.TypedDict(
'RequestRequiredPathParams',
{
'order_id': OrderIdSchema,
}
)
RequestOptionalPathParams = typing.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_order_id = api_client.PathParameter(
name="order_id",
style=api_client.ParameterStyle.SIMPLE,
schema=OrderIdSchema,
required=True,
)
_path = '/store/order/{order_id}'
_method = 'DELETE'
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
)
_status_code_to_response = {
'400': _response_for_400,
'404': _response_for_404,
}
class DeleteOrder(api_client.Api):
def delete_order(
self: api_client.Api,
path_params: RequestPathParams = frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
api_client.ApiResponseWithoutDeserialization
]:
"""
Delete purchase order by ID
:param skip_deserialization: If true then api_response.response will be set but
api_response.body and api_response.headers will not be deserialized into schema
class instances
"""
self._verify_typed_dict_inputs(RequestPathParams, path_params)
_path_params = {}
for parameter in (
request_path_order_id,
):
parameter_data = path_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
# TODO add cookie handling
response = self.api_client.call_api(
resource_path=_path,
method=_method,
path_params=_path_params,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| 24.404762
| 103
| 0.680488
|
from dataclasses import dataclass
import re
import sys
import typing
import urllib3
from petstore_api import api_client, exceptions
import decimal
from datetime import date, datetime
from frozendict import frozendict
from petstore_api.schemas import (
AnyTypeSchema,
ComposedSchema,
DictSchema,
ListSchema,
StrSchema,
IntSchema,
Int32Schema,
Int64Schema,
Float32Schema,
Float64Schema,
NumberSchema,
DateSchema,
DateTimeSchema,
DecimalSchema,
BoolSchema,
BinarySchema,
NoneSchema,
none_type,
InstantiationMetadata,
Unset,
unset,
ComposedBase,
ListBase,
DictBase,
NoneBase,
StrBase,
IntBase,
NumberBase,
DateBase,
DateTimeBase,
BoolBase,
BinaryBase,
Schema,
_SchemaValidator,
_SchemaTypeChecker,
_SchemaEnumMaker
)
OrderIdSchema = StrSchema
RequestRequiredPathParams = typing.TypedDict(
'RequestRequiredPathParams',
{
'order_id': OrderIdSchema,
}
)
RequestOptionalPathParams = typing.TypedDict(
'RequestOptionalPathParams',
{
},
total=False
)
class RequestPathParams(RequestRequiredPathParams, RequestOptionalPathParams):
pass
request_path_order_id = api_client.PathParameter(
name="order_id",
style=api_client.ParameterStyle.SIMPLE,
schema=OrderIdSchema,
required=True,
)
_path = '/store/order/{order_id}'
_method = 'DELETE'
@dataclass
class ApiResponseFor400(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_400 = api_client.OpenApiResponse(
response_cls=ApiResponseFor400,
)
@dataclass
class ApiResponseFor404(api_client.ApiResponse):
response: urllib3.HTTPResponse
body: Unset = unset
headers: Unset = unset
_response_for_404 = api_client.OpenApiResponse(
response_cls=ApiResponseFor404,
)
_status_code_to_response = {
'400': _response_for_400,
'404': _response_for_404,
}
class DeleteOrder(api_client.Api):
def delete_order(
self: api_client.Api,
path_params: RequestPathParams = frozendict(),
stream: bool = False,
timeout: typing.Optional[typing.Union[int, typing.Tuple]] = None,
skip_deserialization: bool = False,
) -> typing.Union[
api_client.ApiResponseWithoutDeserialization
]:
self._verify_typed_dict_inputs(RequestPathParams, path_params)
_path_params = {}
for parameter in (
request_path_order_id,
):
parameter_data = path_params.get(parameter.name, unset)
if parameter_data is unset:
continue
serialized_data = parameter.serialize(parameter_data)
_path_params.update(serialized_data)
response = self.api_client.call_api(
resource_path=_path,
method=_method,
path_params=_path_params,
stream=stream,
timeout=timeout,
)
if skip_deserialization:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
else:
response_for_status = _status_code_to_response.get(str(response.status))
if response_for_status:
api_response = response_for_status.deserialize(response, self.api_client.configuration)
else:
api_response = api_client.ApiResponseWithoutDeserialization(response=response)
if not 200 <= response.status <= 299:
raise exceptions.ApiException(api_response=api_response)
return api_response
| true
| true
|
1c426a1bd83479239c465bb66b269e18f73bcea5
| 6,421
|
py
|
Python
|
emat/database/sqlite/callback.py
|
jinsanity07git/tmip-emat
|
ff816cf50f141825078bb276d6da46d92c5028a9
|
[
"BSD-3-Clause"
] | 13
|
2019-03-26T13:27:43.000Z
|
2022-02-02T18:30:36.000Z
|
emat/database/sqlite/callback.py
|
jinsanity07git/tmip-emat
|
ff816cf50f141825078bb276d6da46d92c5028a9
|
[
"BSD-3-Clause"
] | 19
|
2019-04-24T20:58:10.000Z
|
2020-09-11T22:31:06.000Z
|
emat/database/sqlite/callback.py
|
jinsanity07git/tmip-emat
|
ff816cf50f141825078bb276d6da46d92c5028a9
|
[
"BSD-3-Clause"
] | 17
|
2019-02-19T16:13:52.000Z
|
2022-02-14T20:50:36.000Z
|
import pandas as pd
import numpy as np
from ...workbench import RealParameter, IntegerParameter, BooleanParameter, CategoricalParameter
from ...workbench.em_framework.callbacks import AbstractCallback
from ...workbench.util.ema_exceptions import EMAError
from ...util.loggers import get_module_logger
from ..._pkg_constants import *
_logger = get_module_logger(__name__)
class SQLiteCallback(AbstractCallback):
"""
default callback system
callback can be used in perform_experiments as a means for
specifying the way in which the results should be handled. If no
callback is specified, this default implementation is used. This
one can be overwritten or replaced with a callback of your own
design. For example if you prefer to store the result in a database
or write them to a text file
"""
i = 0
cases = None
results = {}
shape_error_msg = "can only save up to 2d arrays, this array is {}d"
constraint_error_msg = ('can only save 1d arrays for constraint, '
'this array is {}d')
def __init__(self, uncs, levers, outcomes, nr_experiments,
reporting_interval=100, reporting_frequency=10,
scope_name=None, design_name=None, db=None,
using_metamodel=False, metamodel_id=12345,
):
'''
Parameters
----------
uncs : list
a list of the parameters over which the experiments
are being run.
outcomes : list
a list of outcomes
nr_experiments : int
the total number of experiments to be executed
reporting_interval : int, optional
the interval between progress logs
reporting_frequency: int, optional
the total number of progress logs
'''
super().__init__(uncs, levers, outcomes,
nr_experiments, reporting_interval,
reporting_frequency)
self.i = 0
self.cases = None
self.results = {}
self.outcomes = [outcome.name for outcome in outcomes]
# determine data types of parameters
columns = []
dtypes = []
self.parameters = []
for parameter in uncs + levers:
name = parameter.name
self.parameters.append(name)
dataType = 'float'
if isinstance(parameter, CategoricalParameter):
dataType = 'object'
elif isinstance(parameter, BooleanParameter):
dataType = 'bool'
elif isinstance(parameter, IntegerParameter):
dataType = 'int'
columns.append(name)
dtypes.append(dataType)
for name in ['scenario', 'policy', 'model']:
columns.append(name)
dtypes.append('object')
df = pd.DataFrame(index=np.arange(nr_experiments))
for name, dtype in zip(columns, dtypes):
df[name] = pd.Series(dtype=dtype)
self.cases = df
self.nr_experiments = nr_experiments
self.scope_name = scope_name
self.design_name = design_name
self.db = db
self.using_metamodel = using_metamodel
self.metamodel_id = metamodel_id
def _store_case(self, experiment):
scenario = experiment.scenario
policy = experiment.policy
index = experiment.experiment_id
self.cases.at[index, 'scenario'] = scenario.name
self.cases.at[index, 'policy'] = policy.name
self.cases.at[index, 'model'] = experiment.model_name
for k, v in scenario.items():
self.cases.at[index, k] = v
for k, v in policy.items():
self.cases.at[index, k] = v
ex_ids = self.db.write_experiment_parameters(self.scope_name, self.design_name, self.cases.iloc[index:index+1, :-3])
return ex_ids[0]
def _store_outcomes(self, case_id, outcomes, ex_id):
for outcome in self.outcomes:
try:
outcome_res = outcomes[outcome]
except KeyError:
message = "%s not specified as outcome in msi" % outcome
_logger.debug(message)
else:
# outcome is found, store it
try:
self.results[outcome][case_id, ] = outcome_res
except KeyError:
# outcome is non-scalar
shape = np.asarray(outcome_res).shape
if len(shape) > 2:
message = self.shape_error_msg.format(len(shape))
raise EMAError(message)
shape = list(shape)
shape.insert(0, self.nr_experiments)
self.results[outcome] = np.empty(shape)
self.results[outcome][:] = np.NAN
self.results[outcome][case_id, ] = outcome_res
_logger.debug("stored {} = {}".format(outcome, outcome_res))
self.db.write_ex_m_1(self.scope_name,
SOURCE_IS_CORE_MODEL if not self.using_metamodel else self.metamodel_id,
ex_id,
outcome,
outcome_res,)
def __call__(self, experiment, outcomes):
'''
Method responsible for storing results. This method calls
:meth:`super` first, thus utilizing the logging provided there.
Parameters
----------
experiment: Experiment instance
outcomes: dict
the outcomes dict
'''
super().__call__(experiment, outcomes)
# store the case
ex_id = self._store_case(experiment)
# store outcomes
self._store_outcomes(experiment.experiment_id, outcomes, ex_id)
def get_results(self):
return self.cases, self.results
def SQLiteCallbackFactory(scope_name=None, design_name=None, db=None, using_metamodel=False):
return lambda *a, **k: SQLiteCallback(*a,**k,
scope_name=scope_name,
design_name=design_name,
db=db,
using_metamodel=using_metamodel)
| 34.896739
| 124
| 0.566267
|
import pandas as pd
import numpy as np
from ...workbench import RealParameter, IntegerParameter, BooleanParameter, CategoricalParameter
from ...workbench.em_framework.callbacks import AbstractCallback
from ...workbench.util.ema_exceptions import EMAError
from ...util.loggers import get_module_logger
from ..._pkg_constants import *
_logger = get_module_logger(__name__)
class SQLiteCallback(AbstractCallback):
i = 0
cases = None
results = {}
shape_error_msg = "can only save up to 2d arrays, this array is {}d"
constraint_error_msg = ('can only save 1d arrays for constraint, '
'this array is {}d')
def __init__(self, uncs, levers, outcomes, nr_experiments,
reporting_interval=100, reporting_frequency=10,
scope_name=None, design_name=None, db=None,
using_metamodel=False, metamodel_id=12345,
):
super().__init__(uncs, levers, outcomes,
nr_experiments, reporting_interval,
reporting_frequency)
self.i = 0
self.cases = None
self.results = {}
self.outcomes = [outcome.name for outcome in outcomes]
columns = []
dtypes = []
self.parameters = []
for parameter in uncs + levers:
name = parameter.name
self.parameters.append(name)
dataType = 'float'
if isinstance(parameter, CategoricalParameter):
dataType = 'object'
elif isinstance(parameter, BooleanParameter):
dataType = 'bool'
elif isinstance(parameter, IntegerParameter):
dataType = 'int'
columns.append(name)
dtypes.append(dataType)
for name in ['scenario', 'policy', 'model']:
columns.append(name)
dtypes.append('object')
df = pd.DataFrame(index=np.arange(nr_experiments))
for name, dtype in zip(columns, dtypes):
df[name] = pd.Series(dtype=dtype)
self.cases = df
self.nr_experiments = nr_experiments
self.scope_name = scope_name
self.design_name = design_name
self.db = db
self.using_metamodel = using_metamodel
self.metamodel_id = metamodel_id
def _store_case(self, experiment):
scenario = experiment.scenario
policy = experiment.policy
index = experiment.experiment_id
self.cases.at[index, 'scenario'] = scenario.name
self.cases.at[index, 'policy'] = policy.name
self.cases.at[index, 'model'] = experiment.model_name
for k, v in scenario.items():
self.cases.at[index, k] = v
for k, v in policy.items():
self.cases.at[index, k] = v
ex_ids = self.db.write_experiment_parameters(self.scope_name, self.design_name, self.cases.iloc[index:index+1, :-3])
return ex_ids[0]
def _store_outcomes(self, case_id, outcomes, ex_id):
for outcome in self.outcomes:
try:
outcome_res = outcomes[outcome]
except KeyError:
message = "%s not specified as outcome in msi" % outcome
_logger.debug(message)
else:
try:
self.results[outcome][case_id, ] = outcome_res
except KeyError:
shape = np.asarray(outcome_res).shape
if len(shape) > 2:
message = self.shape_error_msg.format(len(shape))
raise EMAError(message)
shape = list(shape)
shape.insert(0, self.nr_experiments)
self.results[outcome] = np.empty(shape)
self.results[outcome][:] = np.NAN
self.results[outcome][case_id, ] = outcome_res
_logger.debug("stored {} = {}".format(outcome, outcome_res))
self.db.write_ex_m_1(self.scope_name,
SOURCE_IS_CORE_MODEL if not self.using_metamodel else self.metamodel_id,
ex_id,
outcome,
outcome_res,)
def __call__(self, experiment, outcomes):
super().__call__(experiment, outcomes)
ex_id = self._store_case(experiment)
self._store_outcomes(experiment.experiment_id, outcomes, ex_id)
def get_results(self):
return self.cases, self.results
def SQLiteCallbackFactory(scope_name=None, design_name=None, db=None, using_metamodel=False):
return lambda *a, **k: SQLiteCallback(*a,**k,
scope_name=scope_name,
design_name=design_name,
db=db,
using_metamodel=using_metamodel)
| true
| true
|
1c426b551914d9e9e32db31584539aaa94f1c500
| 1,119
|
py
|
Python
|
python/phonenumbers/data/region_KM.py
|
vishnuku/python-phonenumbers
|
6ac2cdd06b7ccf709a8efb21629cf2c5f030e627
|
[
"Apache-2.0"
] | 3
|
2018-12-02T23:09:00.000Z
|
2018-12-02T23:16:59.000Z
|
python/phonenumbers/data/region_KM.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
python/phonenumbers/data/region_KM.py
|
carljm/python-phonenumbers
|
494044aaf75443dbfd62b8d1352b441af6a458ae
|
[
"Apache-2.0"
] | null | null | null |
"""Auto-generated file, do not edit by hand. KM metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KM = PhoneMetadata(id='KM', country_code=269, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[3478]\\d{6}', possible_number_pattern='\\d{7}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='7[4-7]\\d{5}', example_number='7712345', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='[34]\\d{6}', example_number='3212345', possible_length=(7,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:39[01]|8\\d{2})\\d{4}', possible_number_pattern='\\d{7}', example_number='8001234', possible_length=(7,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3')])
| 62.166667
| 167
| 0.730116
|
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KM = PhoneMetadata(id='KM', country_code=269, international_prefix='00',
general_desc=PhoneNumberDesc(national_number_pattern='[3478]\\d{6}', possible_number_pattern='\\d{7}', possible_length=(7,)),
fixed_line=PhoneNumberDesc(national_number_pattern='7[4-7]\\d{5}', example_number='7712345', possible_length=(7,)),
mobile=PhoneNumberDesc(national_number_pattern='[34]\\d{6}', example_number='3212345', possible_length=(7,)),
toll_free=PhoneNumberDesc(),
premium_rate=PhoneNumberDesc(national_number_pattern='(?:39[01]|8\\d{2})\\d{4}', possible_number_pattern='\\d{7}', example_number='8001234', possible_length=(7,)),
shared_cost=PhoneNumberDesc(),
personal_number=PhoneNumberDesc(),
voip=PhoneNumberDesc(),
pager=PhoneNumberDesc(),
uan=PhoneNumberDesc(),
voicemail=PhoneNumberDesc(),
no_international_dialling=PhoneNumberDesc(),
number_format=[NumberFormat(pattern='(\\d{3})(\\d{2})(\\d{2})', format='\\1 \\2 \\3')])
| true
| true
|
1c426d4046389b146362310306a9cc103065da7a
| 399
|
py
|
Python
|
96. Unique Binary Search Trees.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
96. Unique Binary Search Trees.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
96. Unique Binary Search Trees.py
|
XinchaoGou/MyLeetCode
|
bba0ab077374f7da2cb1a990266bc59fa7ddf23c
|
[
"MIT"
] | null | null | null |
class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n+1)
dp[0], dp[1] = 1, 1
for i in range(2, n+1):
for j in range(1, i+1):
dp [i] += dp[j-1] * dp[i-j]
return dp[n]
class Solution:
def numTrees(self, n: int) -> int:
C = 1
for i in range(0, n):
C = C * 2*(2*i+1)/(i+2)
return int(C)
| 24.9375
| 43
| 0.411028
|
class Solution:
def numTrees(self, n: int) -> int:
dp = [0] * (n+1)
dp[0], dp[1] = 1, 1
for i in range(2, n+1):
for j in range(1, i+1):
dp [i] += dp[j-1] * dp[i-j]
return dp[n]
class Solution:
def numTrees(self, n: int) -> int:
C = 1
for i in range(0, n):
C = C * 2*(2*i+1)/(i+2)
return int(C)
| true
| true
|
1c426e79980c532222fc25ad8d44e96babb574fc
| 3,126
|
py
|
Python
|
CALLC/trainl2.py
|
RobbinBouwmeester/CALLC_evaluation
|
0125ed88b767c305261cf5731c671f890bfacadd
|
[
"Apache-2.0"
] | 2
|
2020-01-23T09:51:37.000Z
|
2020-04-23T00:28:16.000Z
|
CALLC/trainl2.py
|
RobbinBouwmeester/CALLC_evaluation
|
0125ed88b767c305261cf5731c671f890bfacadd
|
[
"Apache-2.0"
] | null | null | null |
CALLC/trainl2.py
|
RobbinBouwmeester/CALLC_evaluation
|
0125ed88b767c305261cf5731c671f890bfacadd
|
[
"Apache-2.0"
] | null | null | null |
"""
Robbin Bouwmeester
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
This code is used to train retention time predictors and store
predictions from a CV procedure for further analysis.
This project was made possible by MASSTRPLAN. MASSTRPLAN received funding
from the Marie Sklodowska-Curie EU Framework for Research and Innovation
Horizon 2020, under Grant Agreement No. 675132.
"""
import subprocess
import pandas as pd
def call_ghostbusters(infile_known="temp/tempKnownsl2.csv",infile_unknown="temp/tempUnknownsl2.csv",fold_list="temp/tempFolds.txt"):
"""
Get the dataframe associated with this analysis
Parameters
----------
infile_known : str
location of a file with known retention time, for Layer 2
infile_unknown : str
location of a file with umknown retention time, for Layer 2
fold_list : str
the folds to be used in Layer 2
Returns
-------
pd.DataFrame
test predictions
pd.DataFrame
train predictions
"""
cmd = "Rscript makeGAM.R %s %s %s" % (infile_known,infile_unknown,fold_list)
print("Going to execute this command: ",cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out, err = p.communicate()
preds = pd.read_csv("GAMpredTemp.csv")
train_preds = pd.read_csv("GAMtrainTemp.csv")
return(preds,train_preds)
def apply_l2(known_all,unknown_all,ignore_cols=["IDENTIFIER","time"],cv_list=None):
"""
Get the dataframe associated with this analysis
Parameters
----------
known_all : pd.DataFrame
dataframe with known retention time, for Layer 2
unknown_all : pd.DataFrame
dataframe with unknown retention time, for Layer 2
ignore_cols : list
ignore these columns
cv_list : list
the folds to be used in Layer 2
Returns
-------
pd.DataFrame
test predictions
pd.DataFrame
train predictions
"""
ret_preds = []
ret_preds_train = []
cnames = []
known_all.index = known_all["IDENTIFIER"]
unknown_all.index = unknown_all["IDENTIFIER"]
infile_known_handle = open("temp/tempKnownsl2.csv","w")
infile_unknown_handle = open("temp/tempUnknownsl2.csv","w")
infile_fold_handle = open("temp/tempFolds.txt","w")
known_all.to_csv(infile_known_handle,index=False)
unknown_all.to_csv(infile_unknown_handle,index=False)
infile_fold_handle.write("\n".join(map(str,cv_list)))
infile_known_handle.close()
infile_unknown_handle.close()
infile_fold_handle.close()
preds,train_preds = call_ghostbusters()
return(preds,train_preds)
| 31.26
| 132
| 0.713692
|
import subprocess
import pandas as pd
def call_ghostbusters(infile_known="temp/tempKnownsl2.csv",infile_unknown="temp/tempUnknownsl2.csv",fold_list="temp/tempFolds.txt"):
cmd = "Rscript makeGAM.R %s %s %s" % (infile_known,infile_unknown,fold_list)
print("Going to execute this command: ",cmd)
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)
out, err = p.communicate()
preds = pd.read_csv("GAMpredTemp.csv")
train_preds = pd.read_csv("GAMtrainTemp.csv")
return(preds,train_preds)
def apply_l2(known_all,unknown_all,ignore_cols=["IDENTIFIER","time"],cv_list=None):
ret_preds = []
ret_preds_train = []
cnames = []
known_all.index = known_all["IDENTIFIER"]
unknown_all.index = unknown_all["IDENTIFIER"]
infile_known_handle = open("temp/tempKnownsl2.csv","w")
infile_unknown_handle = open("temp/tempUnknownsl2.csv","w")
infile_fold_handle = open("temp/tempFolds.txt","w")
known_all.to_csv(infile_known_handle,index=False)
unknown_all.to_csv(infile_unknown_handle,index=False)
infile_fold_handle.write("\n".join(map(str,cv_list)))
infile_known_handle.close()
infile_unknown_handle.close()
infile_fold_handle.close()
preds,train_preds = call_ghostbusters()
return(preds,train_preds)
| true
| true
|
1c4271422b7611b9a21c7152230960baf2a01abd
| 4,371
|
py
|
Python
|
homeassistant/components/logbook/queries/entities_and_devices.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | null | null | null |
homeassistant/components/logbook/queries/entities_and_devices.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 2
|
2021-09-18T03:56:30.000Z
|
2022-03-17T13:07:51.000Z
|
homeassistant/components/logbook/queries/entities_and_devices.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | null | null | null |
"""Entities and Devices queries for logbook."""
from __future__ import annotations
from collections.abc import Iterable
from datetime import datetime as dt
import sqlalchemy
from sqlalchemy import lambda_stmt, select, union_all
from sqlalchemy.orm import Query
from sqlalchemy.sql.lambdas import StatementLambdaElement
from sqlalchemy.sql.selectable import CTE, CompoundSelect
from homeassistant.components.recorder.models import EventData, Events, States
from .common import (
apply_events_context_hints,
apply_states_context_hints,
select_events_context_id_subquery,
select_events_context_only,
select_events_without_states,
select_states_context_only,
)
from .devices import apply_event_device_id_matchers
from .entities import (
apply_entities_hints,
apply_event_entity_id_matchers,
states_query_for_entity_ids,
)
def _select_entities_device_id_context_ids_sub_query(
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> CompoundSelect:
"""Generate a subquery to find context ids for multiple entities and multiple devices."""
union = union_all(
select_events_context_id_subquery(start_day, end_day, event_types).where(
_apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids, json_quotable_device_ids
)
),
apply_entities_hints(select(States.context_id))
.filter((States.last_updated > start_day) & (States.last_updated < end_day))
.where(States.entity_id.in_(entity_ids)),
)
return select(union.c.context_id).group_by(union.c.context_id)
def _apply_entities_devices_context_union(
query: Query,
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> CompoundSelect:
devices_entities_cte: CTE = _select_entities_device_id_context_ids_sub_query(
start_day,
end_day,
event_types,
entity_ids,
json_quotable_entity_ids,
json_quotable_device_ids,
).cte()
# We used to optimize this to exclude rows we already in the union with
# a States.entity_id.not_in(entity_ids) but that made the
# query much slower on MySQL, and since we already filter them away
# in the python code anyways since they will have context_only
# set on them the impact is minimal.
return query.union_all(
states_query_for_entity_ids(start_day, end_day, entity_ids),
apply_events_context_hints(
select_events_context_only()
.select_from(devices_entities_cte)
.outerjoin(Events, devices_entities_cte.c.context_id == Events.context_id)
).outerjoin(EventData, (Events.data_id == EventData.data_id)),
apply_states_context_hints(
select_states_context_only()
.select_from(devices_entities_cte)
.outerjoin(States, devices_entities_cte.c.context_id == States.context_id)
),
)
def entities_devices_stmt(
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> StatementLambdaElement:
"""Generate a logbook query for multiple entities."""
stmt = lambda_stmt(
lambda: _apply_entities_devices_context_union(
select_events_without_states(start_day, end_day, event_types).where(
_apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids, json_quotable_device_ids
)
),
start_day,
end_day,
event_types,
entity_ids,
json_quotable_entity_ids,
json_quotable_device_ids,
).order_by(Events.time_fired)
)
return stmt
def _apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids: Iterable[str], json_quotable_device_ids: Iterable[str]
) -> sqlalchemy.or_:
"""Create matchers for the device_id and entity_id in the event_data."""
return apply_event_entity_id_matchers(
json_quotable_entity_ids
) | apply_event_device_id_matchers(json_quotable_device_ids)
| 35.25
| 93
| 0.715626
|
from __future__ import annotations
from collections.abc import Iterable
from datetime import datetime as dt
import sqlalchemy
from sqlalchemy import lambda_stmt, select, union_all
from sqlalchemy.orm import Query
from sqlalchemy.sql.lambdas import StatementLambdaElement
from sqlalchemy.sql.selectable import CTE, CompoundSelect
from homeassistant.components.recorder.models import EventData, Events, States
from .common import (
apply_events_context_hints,
apply_states_context_hints,
select_events_context_id_subquery,
select_events_context_only,
select_events_without_states,
select_states_context_only,
)
from .devices import apply_event_device_id_matchers
from .entities import (
apply_entities_hints,
apply_event_entity_id_matchers,
states_query_for_entity_ids,
)
def _select_entities_device_id_context_ids_sub_query(
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> CompoundSelect:
union = union_all(
select_events_context_id_subquery(start_day, end_day, event_types).where(
_apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids, json_quotable_device_ids
)
),
apply_entities_hints(select(States.context_id))
.filter((States.last_updated > start_day) & (States.last_updated < end_day))
.where(States.entity_id.in_(entity_ids)),
)
return select(union.c.context_id).group_by(union.c.context_id)
def _apply_entities_devices_context_union(
query: Query,
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> CompoundSelect:
devices_entities_cte: CTE = _select_entities_device_id_context_ids_sub_query(
start_day,
end_day,
event_types,
entity_ids,
json_quotable_entity_ids,
json_quotable_device_ids,
).cte()
return query.union_all(
states_query_for_entity_ids(start_day, end_day, entity_ids),
apply_events_context_hints(
select_events_context_only()
.select_from(devices_entities_cte)
.outerjoin(Events, devices_entities_cte.c.context_id == Events.context_id)
).outerjoin(EventData, (Events.data_id == EventData.data_id)),
apply_states_context_hints(
select_states_context_only()
.select_from(devices_entities_cte)
.outerjoin(States, devices_entities_cte.c.context_id == States.context_id)
),
)
def entities_devices_stmt(
start_day: dt,
end_day: dt,
event_types: tuple[str, ...],
entity_ids: list[str],
json_quotable_entity_ids: list[str],
json_quotable_device_ids: list[str],
) -> StatementLambdaElement:
stmt = lambda_stmt(
lambda: _apply_entities_devices_context_union(
select_events_without_states(start_day, end_day, event_types).where(
_apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids, json_quotable_device_ids
)
),
start_day,
end_day,
event_types,
entity_ids,
json_quotable_entity_ids,
json_quotable_device_ids,
).order_by(Events.time_fired)
)
return stmt
def _apply_event_entity_id_device_id_matchers(
json_quotable_entity_ids: Iterable[str], json_quotable_device_ids: Iterable[str]
) -> sqlalchemy.or_:
return apply_event_entity_id_matchers(
json_quotable_entity_ids
) | apply_event_device_id_matchers(json_quotable_device_ids)
| true
| true
|
1c4271641131b9b76624ace443ad9d870b03b26b
| 17,129
|
py
|
Python
|
worktime.py
|
knilch0r/worktime
|
f9b8c733f868a59deb94b60d8aea917b58fb4cb9
|
[
"MIT"
] | null | null | null |
worktime.py
|
knilch0r/worktime
|
f9b8c733f868a59deb94b60d8aea917b58fb4cb9
|
[
"MIT"
] | null | null | null |
worktime.py
|
knilch0r/worktime
|
f9b8c733f868a59deb94b60d8aea917b58fb4cb9
|
[
"MIT"
] | 1
|
2020-06-15T12:36:01.000Z
|
2020-06-15T12:36:01.000Z
|
#!/usr/bin/python
import sys
import json
import csv
import os
import copy
import json
import argparse
from datetime import datetime
from datetime import timedelta
from dateutil import parser as date_parser
try:
# Mac OS only
import Quartz
except ImportError:
# Windows
import ctypes
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
class Config:
def __init__(self):
self.daily_work_minutes = 450
self.daily_break_minutes = 30
self.notifications = True
self.date_format = '%Y-%m-%d'
self.time_format = '%H:%M:%S'
self.lock_break = False
self.filename = '{}/work_hours.csv'.format(SCRIPT_PATH)
CONFIG_FILE = '{}/config.json'.format(SCRIPT_PATH)
CONFIG = Config()
START_OF_THE_DAY = '00:00:00'
# Indexes for CSV data
IDX_DATE = 0
IDX_START = 1
IDX_END = 2
IDX_OT = 3
IDX_TYPE = 4
IDX_BREAK = 5
IDX_WORKTIME = 6
IDX_STATUS = 7
# Parse command line arguments
def parse_args():
parser = argparse.ArgumentParser(description='Work hour automator')
parser.add_argument('-d', '--date', required=False, default=datetime.today(), type=lambda s: date_parser.parse(s),
help='Date for manual commands, default: today')
parser.add_argument('-f', '--flex', required=False, action='store_true',
help='Add flex day for given date (-d or --date)')
parser.add_argument('-s', '--start', nargs='?', const='now', type=lambda s: parse_time(s),
help='Modify start time for given date (-d or --date). Default: now')
parser.add_argument('-e', '--end', nargs='?', const='now', type=lambda s: parse_time(s),
help='Modify end time for given date (-d or --date). Default: now')
parser.add_argument('-ab', '--add-break', dest='addbreakmin', required=False, type=int,
help='Add break time in minutes for given date (-d or --date)')
parser.add_argument('-rb', '--remove-break', dest='removebreakmin', required=False, type=int,
help='Remove break time in minutes from given date (-d or --date)')
parser.add_argument('-wt', '--worktime', required=False, type=int,
help='Set work time in minutes for given date (-d or --date)')
parser.add_argument('-rc', '--recalculate', action='store_true',
help='Recalculate work time in given file')
parser.add_argument('--config', required=False, action='store_true',
help='Configure the tool')
parser.add_argument('filename', nargs='?', default=CONFIG.filename,
help='File to save work hours to (default: {})'.format(CONFIG.filename))
return parser.parse_args()
def write_configuration():
s = json.dumps(CONFIG.__dict__, indent=4)
f = open(CONFIG_FILE, 'w')
f.write(s)
f.close()
def load_configuration():
try:
global CONFIG
if not os.path.exists(CONFIG_FILE):
write_configuration()
f = open(CONFIG_FILE, 'r')
s = f.read()
CONFIG.__dict__ = json.loads(s)
f.close()
except:
pass
def ask_question(question, rettype, default):
while True:
try:
ret = raw_input(question)
if len(ret) == 0:
print('Empty answer given, using default value of {}'.format(default))
return default
if rettype == "int":
intret = int(ret)
return intret
elif rettype == "boolean":
if ret == 'y':
return True
elif ret == 'n':
return False
print('Please respond y or n..')
continue
elif rettype == "directory":
dirname = os.path.dirname(os.path.realpath(ret))
if os.path.isdir(dirname):
return ret
print("Not existing directory. Please try again...")
continue
else:
return ret
except ValueError:
print("Not a valid " + rettype + ". Please try again..")
continue
else:
break
def configure():
print('Worktime automator configuration')
print('--------------------------------')
print('Empty answers will default to current configuration setting')
global CONFIG
CONFIG.filename = ask_question("Filename to save work hour log: ", "directory", CONFIG.filename)
CONFIG.daily_work_minutes = ask_question("Daily work time in minutes: ", "int", CONFIG.daily_work_minutes)
CONFIG.daily_break_minutes = ask_question("Daily break time in minutes: ", "int", CONFIG.daily_break_minutes)
CONFIG.notifications = ask_question("Do you want notifications (y/n): ", "boolean", CONFIG.notifications)
CONFIG.lock_break = ask_question("Is locking screen considered to be break time (y/n): ", "boolean", CONFIG.lock_break)
write_configuration()
print('Configuration saved to {}!'.format(CONFIG_FILE))
def is_windows():
return os.name == 'nt'
# Display notification in UI
def notify(title, text, subtitle = None):
if CONFIG.notifications is False:
return
if is_windows():
# TODO: Windows notifications
return
# MAC notifications
if subtitle is None:
os.system("""
osascript -e 'display notification "{}" with title "{}" sound name "Submarine"'
""".format(text, title))
else:
os.system("""
osascript -e 'display notification "{}" with title "{}" subtitle "{}" sound name "Submarine"'
""".format(text, title, subtitle))
# Get csv data as list from file
def get_data(filename):
if not os.path.exists(filename):
f = open(filename, 'w')
f.close()
return []
f = open(filename, 'r')
reader = csv.reader(f, delimiter=';')
row_data = [line for line in reader]
f.close()
return row_data
# Check if screen is locked
def is_screen_locked():
if is_windows():
# TODO: Needs testing..
user32 = ctypes.windll.User32
return user32.GetForegroundWindow() % 10 == 0
else:
data = Quartz.CGSessionCopyCurrentDictionary()
return data.get('CGSSessionScreenIsLocked', 0) == 1
# Remove headers and footer from csv data
def remove_headers_and_footer(row_data):
if len(row_data) == 0:
return row_data
if row_data[0][IDX_DATE] == 'Date':
del row_data[0]
last_row_index = len(row_data) - 1
if row_data[last_row_index][IDX_DATE] == 'Total':
del row_data[-1]
return row_data
# Get current date as string
def get_current_date():
return datetime.today().strftime(CONFIG.date_format)
# Get current time as string
def get_current_time():
return datetime.now().strftime(CONFIG.time_format)
# Check if work time is started today
def is_started_today(row_data):
started_today = False
todaystr = get_current_date()
for i, row in enumerate(row_data):
if row[0] == todaystr:
started_today = True
break
return started_today
def get_index(row_data, date):
idx = -1
for i, row in enumerate(row_data):
if row[IDX_DATE] == date:
idx = i
break
return idx
# Get index of today in the csv data
def get_today_index(row_data):
todaystr = get_current_date()
return get_index(row_data, todaystr)
# Add header for explanation
def add_header(row_data):
row_data.insert(0, ['Date', 'Start', 'End', 'Overtime', 'Type', 'Break time', 'Work time', 'Status'])
return row_data
# Add footer with total overtime
def add_footer(row_data):
total_seconds = 0
for row in row_data:
if row[0] == 'Date':
continue
total_seconds = total_seconds + (int(row[IDX_OT]) * 60)
minutes = int(divmod(total_seconds, 60)[0])
hours = float(minutes) / float(60)
row_data.append(['Total', '', '', str(minutes) + 'min', "{0:.2f}".format(hours) + 'h'])
return row_data
# Write data to csv file
def write_data(filename, row_data):
f = open(filename, 'w')
writer = csv.writer(f, delimiter=';')
writer.writerows(row_data)
f.close()
# Start work time
def start_work_time(row_data):
currenttime = get_current_time()
breaks = str(CONFIG.daily_break_minutes)
worktime = str(CONFIG.daily_work_minutes)
if is_weekend():
breaks = '0'
worktime = '0'
row_data.append([get_current_date(), currenttime, currenttime, '00', 'A', breaks, worktime, 'Ongoing'])
notify("Work time", "Good morning! Work time started at " + currenttime)
return row_data
# Check if work time has changed manually
def is_changed_manually(row_data):
i = get_today_index(row_data)
if i < 0:
return False
row = row_data[i]
if len(row) < 5:
row[IDX_TYPE] = 'A'
return row[IDX_TYPE] == 'M'
# Calculate flex for single date
def calculate_flex(row_data, date):
i = get_index(row_data, date)
if i < 0:
return row_data
break_time = int(row_data[i][IDX_BREAK]) * 60
work_time = int(row_data[i][IDX_WORKTIME]) * 60
ending = date_parser.parse(date + " " + row_data[i][IDX_END])
starting = date_parser.parse(date + " " + row_data[i][IDX_START])
elapsed = ending - starting
seconds = elapsed.total_seconds() - work_time - break_time
minutes = int(divmod(seconds, 60)[0])
row_data[i][IDX_OT] = str(minutes)
return row_data
# Set end time for current date
def set_endtime(row_data):
todaystr = get_current_date()
currenttime = get_current_time()
i = get_today_index(row_data)
row_data[i][IDX_END] = currenttime
row_data = calculate_flex(row_data, todaystr)
return row_data
# End work time
def end_work_time(row_data):
i = get_today_index(row_data)
if row_data[i][IDX_STATUS] == 'Ended' or row_data[i][IDX_TYPE] != 'A':
return row_data
row_data = set_endtime(row_data)
row_data[i][IDX_STATUS] = 'Ended'
return row_data
def add_lock_break(row_data):
i = get_today_index(row_data)
currenttime = get_current_time()
date = get_current_date()
ending = date_parser.parse(date + " " + row_data[i][IDX_END])
now = date_parser.parse(date + " " + currenttime)
elapsed = now - ending
minutes = int(divmod(elapsed.total_seconds(), 60)[0])
row_data[i][IDX_BREAK] = str(int(row_data[i][IDX_BREAK]) + minutes)
row_data = calculate_flex(row_data, date)
left = ''
flex = float(row_data[i][IDX_OT]) / float(60)
if flex > 0:
left = 'You are done today! Over time for today is {0:.2f}h'.format(flex)
else:
left = 'You still have {0:.2f}h to work today'.format(-flex)
notify('Work time', 'Added automatic break of {} minutes'.format(minutes), left)
return row_data
# Resume work time
def resume_work_time(row_data):
i = get_today_index(row_data)
row_data = set_endtime(row_data)
if row_data[i][IDX_STATUS] != 'Ongoing':
if CONFIG.lock_break is True:
row_data = add_lock_break(row_data)
else:
notify_left_worktime(row_data)
row_data[i][IDX_STATUS] = 'Ongoing'
return row_data
def notify_left_worktime(row_data):
i = get_today_index(row_data)
flex = float(row_data[i][IDX_OT]) / float(60)
if flex > 0:
notify('Work time', 'You are done for today! Over time for today is {0:.2f}h'.format(flex))
else:
notify('Work time', 'You still have {0:.2f}h to work today'.format(-flex))
# Notify time left for today hourly
def notify_hourly(row_data):
i = get_today_index(row_data)
started = parse_time(row_data[i][IDX_START])
if started.strftime('%M') == datetime.now().strftime('%M') and row_data[i][IDX_STATUS] == 'Ongoing':
notify_left_worktime(row_data)
# Parse time from str
def parse_time(dur_str):
if len(dur_str) == 0 or dur_str == 'now':
return datetime.now()
todaystr = get_current_date()
return date_parser.parse(todaystr + " " + dur_str)
# Check if data has changed
def is_changed(original, new):
if len(original) != len(new):
return True
for i in range(len(original)):
if original[i] != new[i]:
return True
return False
# Handle automatic lock/unlock work time
def handle_automatic(row_data):
if is_changed_manually(row_data):
return row_data
started_today = is_started_today(row_data)
if is_screen_locked():
if started_today:
row_data = end_work_time(row_data)
else:
if not started_today:
row_data = start_work_time(row_data)
else:
notify_hourly(row_data)
row_data = resume_work_time(row_data)
return row_data
# Add flex for specific date
def add_flex(row_data, flex_date):
formatted_date = flex_date.strftime(CONFIG.date_format)
currenttime = get_current_time()
minutes = -CONFIG.daily_work_minutes
print('Flexing {} with {} minutes'.format(formatted_date, minutes))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_START] = currenttime
row_data[i][IDX_END] = currenttime
row_data[i][IDX_OT] = str(minutes)
row_data[i][IDX_TYPE] = 'F'
else:
row_data.append([formatted_date, currenttime, currenttime, str(minutes), 'F', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
return row_data
# Modify start time of the day
def modify_start(row_data, date, start_time):
formatted_date = date.strftime(CONFIG.date_format)
formatted_time = start_time.strftime(CONFIG.time_format)
print('Modifying start time of {} to {}'.format(formatted_date, formatted_time))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_START] = formatted_time
else:
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'A', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
row_data = calculate_flex(row_data, formatted_date)
return row_data
# Modify end of the day
def modify_end(row_data, date, end_time):
formatted_date = date.strftime(CONFIG.date_format)
formatted_time = end_time.strftime(CONFIG.time_format)
print('Modifying end time of {} to {}'.format(formatted_date, formatted_time))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_END] = formatted_time
row_data[i][IDX_TYPE] = 'M'
else:
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
row_data = calculate_flex(row_data, formatted_date)
return row_data
def add_break(row_data, date, time):
formatted_date = date.strftime(CONFIG.date_format)
if time > 0:
print('Adding {} minute break to {}'.format(time, formatted_date))
else:
print('Removing {} minutes from break time of {}'.format(-time, formatted_date))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_BREAK] = str(int(row_data[i][IDX_BREAK]) + time)
else:
formatted_time = get_current_time()
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes + time), str(CONFIG.daily_work_minutes), 'Ended'])
print('Total brakes {} is {} minutes'.format(formatted_date, row_data[i][IDX_BREAK]))
row_data = calculate_flex(row_data, formatted_date)
return row_data
def set_worktime(row_data, date, time):
formatted_date = date.strftime(CONFIG.date_format)
print('Setting work time to {} minutes for {}'.format(time, formatted_date))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_WORKTIME] = str(time)
else:
formatted_time = get_current_time()
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes), str(time)])
row_data = calculate_flex(row_data, formatted_date)
return row_data
def is_weekend():
return datetime.today().weekday() >= 5
def recalculate(row_data):
for row in row_data:
print('Recalculating {}'.format(row[IDX_DATE]))
row_data = calculate_flex(row_data, row[IDX_DATE])
return row_data
def end_previous(row_data):
currentdate = get_current_date()
for row in row_data:
if row[IDX_DATE] != currentdate:
row[IDX_STATUS] = 'Ended'
return row_data
def ensure_columns(row_data):
for row in row_data:
if len(row) < 5:
row.append('A')
if len(row) < 6:
row.append(str(CONFIG.daily_break_minutes))
if len(row) < 7:
row.append(str(CONFIG.daily_work_minutes))
if len(row) < 8:
row.append('Ended')
return row_data
# Main
def main():
load_configuration()
args = parse_args()
if args.config is True:
configure()
return
filename = args.filename
row_data = get_data(filename)
row_data = remove_headers_and_footer(row_data)
row_data = ensure_columns(row_data)
original_data = copy.deepcopy(row_data)
if args.recalculate is True:
row_data = recalculate(row_data)
if args.flex is True:
row_data = add_flex(row_data, args.date)
if args.start is not None:
row_data = modify_start(row_data, args.date, args.start)
if args.end is not None:
row_data = modify_end(row_data, args.date, args.end)
if args.addbreakmin is not None and args.addbreakmin > 0:
row_data = add_break(row_data, args.date, args.addbreakmin)
if args.removebreakmin is not None and args.removebreakmin > 0:
row_data = add_break(row_data, args.date, -args.removebreakmin)
if args.worktime is not None and args.worktime >= 0:
row_data = set_worktime(row_data, args.date, args.worktime)
row_data = handle_automatic(row_data)
row_data = end_previous(row_data)
row_data = sorted(row_data, key=lambda l:l[0])
if is_changed(original_data, row_data):
row_data = add_header(row_data)
row_data = add_footer(row_data)
write_data(filename, row_data)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted by user!')
| 31.257299
| 161
| 0.693619
|
import sys
import json
import csv
import os
import copy
import json
import argparse
from datetime import datetime
from datetime import timedelta
from dateutil import parser as date_parser
try:
import Quartz
except ImportError:
import ctypes
SCRIPT_PATH = os.path.dirname(os.path.realpath(__file__))
class Config:
def __init__(self):
self.daily_work_minutes = 450
self.daily_break_minutes = 30
self.notifications = True
self.date_format = '%Y-%m-%d'
self.time_format = '%H:%M:%S'
self.lock_break = False
self.filename = '{}/work_hours.csv'.format(SCRIPT_PATH)
CONFIG_FILE = '{}/config.json'.format(SCRIPT_PATH)
CONFIG = Config()
START_OF_THE_DAY = '00:00:00'
IDX_DATE = 0
IDX_START = 1
IDX_END = 2
IDX_OT = 3
IDX_TYPE = 4
IDX_BREAK = 5
IDX_WORKTIME = 6
IDX_STATUS = 7
def parse_args():
parser = argparse.ArgumentParser(description='Work hour automator')
parser.add_argument('-d', '--date', required=False, default=datetime.today(), type=lambda s: date_parser.parse(s),
help='Date for manual commands, default: today')
parser.add_argument('-f', '--flex', required=False, action='store_true',
help='Add flex day for given date (-d or --date)')
parser.add_argument('-s', '--start', nargs='?', const='now', type=lambda s: parse_time(s),
help='Modify start time for given date (-d or --date). Default: now')
parser.add_argument('-e', '--end', nargs='?', const='now', type=lambda s: parse_time(s),
help='Modify end time for given date (-d or --date). Default: now')
parser.add_argument('-ab', '--add-break', dest='addbreakmin', required=False, type=int,
help='Add break time in minutes for given date (-d or --date)')
parser.add_argument('-rb', '--remove-break', dest='removebreakmin', required=False, type=int,
help='Remove break time in minutes from given date (-d or --date)')
parser.add_argument('-wt', '--worktime', required=False, type=int,
help='Set work time in minutes for given date (-d or --date)')
parser.add_argument('-rc', '--recalculate', action='store_true',
help='Recalculate work time in given file')
parser.add_argument('--config', required=False, action='store_true',
help='Configure the tool')
parser.add_argument('filename', nargs='?', default=CONFIG.filename,
help='File to save work hours to (default: {})'.format(CONFIG.filename))
return parser.parse_args()
def write_configuration():
s = json.dumps(CONFIG.__dict__, indent=4)
f = open(CONFIG_FILE, 'w')
f.write(s)
f.close()
def load_configuration():
try:
global CONFIG
if not os.path.exists(CONFIG_FILE):
write_configuration()
f = open(CONFIG_FILE, 'r')
s = f.read()
CONFIG.__dict__ = json.loads(s)
f.close()
except:
pass
def ask_question(question, rettype, default):
while True:
try:
ret = raw_input(question)
if len(ret) == 0:
print('Empty answer given, using default value of {}'.format(default))
return default
if rettype == "int":
intret = int(ret)
return intret
elif rettype == "boolean":
if ret == 'y':
return True
elif ret == 'n':
return False
print('Please respond y or n..')
continue
elif rettype == "directory":
dirname = os.path.dirname(os.path.realpath(ret))
if os.path.isdir(dirname):
return ret
print("Not existing directory. Please try again...")
continue
else:
return ret
except ValueError:
print("Not a valid " + rettype + ". Please try again..")
continue
else:
break
def configure():
print('Worktime automator configuration')
print('--------------------------------')
print('Empty answers will default to current configuration setting')
global CONFIG
CONFIG.filename = ask_question("Filename to save work hour log: ", "directory", CONFIG.filename)
CONFIG.daily_work_minutes = ask_question("Daily work time in minutes: ", "int", CONFIG.daily_work_minutes)
CONFIG.daily_break_minutes = ask_question("Daily break time in minutes: ", "int", CONFIG.daily_break_minutes)
CONFIG.notifications = ask_question("Do you want notifications (y/n): ", "boolean", CONFIG.notifications)
CONFIG.lock_break = ask_question("Is locking screen considered to be break time (y/n): ", "boolean", CONFIG.lock_break)
write_configuration()
print('Configuration saved to {}!'.format(CONFIG_FILE))
def is_windows():
return os.name == 'nt'
def notify(title, text, subtitle = None):
if CONFIG.notifications is False:
return
if is_windows():
return
if subtitle is None:
os.system("""
osascript -e 'display notification "{}" with title "{}" sound name "Submarine"'
""".format(text, title))
else:
os.system("""
osascript -e 'display notification "{}" with title "{}" subtitle "{}" sound name "Submarine"'
""".format(text, title, subtitle))
def get_data(filename):
if not os.path.exists(filename):
f = open(filename, 'w')
f.close()
return []
f = open(filename, 'r')
reader = csv.reader(f, delimiter=';')
row_data = [line for line in reader]
f.close()
return row_data
def is_screen_locked():
if is_windows():
user32 = ctypes.windll.User32
return user32.GetForegroundWindow() % 10 == 0
else:
data = Quartz.CGSessionCopyCurrentDictionary()
return data.get('CGSSessionScreenIsLocked', 0) == 1
def remove_headers_and_footer(row_data):
if len(row_data) == 0:
return row_data
if row_data[0][IDX_DATE] == 'Date':
del row_data[0]
last_row_index = len(row_data) - 1
if row_data[last_row_index][IDX_DATE] == 'Total':
del row_data[-1]
return row_data
def get_current_date():
return datetime.today().strftime(CONFIG.date_format)
def get_current_time():
return datetime.now().strftime(CONFIG.time_format)
def is_started_today(row_data):
started_today = False
todaystr = get_current_date()
for i, row in enumerate(row_data):
if row[0] == todaystr:
started_today = True
break
return started_today
def get_index(row_data, date):
idx = -1
for i, row in enumerate(row_data):
if row[IDX_DATE] == date:
idx = i
break
return idx
def get_today_index(row_data):
todaystr = get_current_date()
return get_index(row_data, todaystr)
def add_header(row_data):
row_data.insert(0, ['Date', 'Start', 'End', 'Overtime', 'Type', 'Break time', 'Work time', 'Status'])
return row_data
def add_footer(row_data):
total_seconds = 0
for row in row_data:
if row[0] == 'Date':
continue
total_seconds = total_seconds + (int(row[IDX_OT]) * 60)
minutes = int(divmod(total_seconds, 60)[0])
hours = float(minutes) / float(60)
row_data.append(['Total', '', '', str(minutes) + 'min', "{0:.2f}".format(hours) + 'h'])
return row_data
def write_data(filename, row_data):
f = open(filename, 'w')
writer = csv.writer(f, delimiter=';')
writer.writerows(row_data)
f.close()
def start_work_time(row_data):
currenttime = get_current_time()
breaks = str(CONFIG.daily_break_minutes)
worktime = str(CONFIG.daily_work_minutes)
if is_weekend():
breaks = '0'
worktime = '0'
row_data.append([get_current_date(), currenttime, currenttime, '00', 'A', breaks, worktime, 'Ongoing'])
notify("Work time", "Good morning! Work time started at " + currenttime)
return row_data
def is_changed_manually(row_data):
i = get_today_index(row_data)
if i < 0:
return False
row = row_data[i]
if len(row) < 5:
row[IDX_TYPE] = 'A'
return row[IDX_TYPE] == 'M'
def calculate_flex(row_data, date):
i = get_index(row_data, date)
if i < 0:
return row_data
break_time = int(row_data[i][IDX_BREAK]) * 60
work_time = int(row_data[i][IDX_WORKTIME]) * 60
ending = date_parser.parse(date + " " + row_data[i][IDX_END])
starting = date_parser.parse(date + " " + row_data[i][IDX_START])
elapsed = ending - starting
seconds = elapsed.total_seconds() - work_time - break_time
minutes = int(divmod(seconds, 60)[0])
row_data[i][IDX_OT] = str(minutes)
return row_data
def set_endtime(row_data):
todaystr = get_current_date()
currenttime = get_current_time()
i = get_today_index(row_data)
row_data[i][IDX_END] = currenttime
row_data = calculate_flex(row_data, todaystr)
return row_data
def end_work_time(row_data):
i = get_today_index(row_data)
if row_data[i][IDX_STATUS] == 'Ended' or row_data[i][IDX_TYPE] != 'A':
return row_data
row_data = set_endtime(row_data)
row_data[i][IDX_STATUS] = 'Ended'
return row_data
def add_lock_break(row_data):
i = get_today_index(row_data)
currenttime = get_current_time()
date = get_current_date()
ending = date_parser.parse(date + " " + row_data[i][IDX_END])
now = date_parser.parse(date + " " + currenttime)
elapsed = now - ending
minutes = int(divmod(elapsed.total_seconds(), 60)[0])
row_data[i][IDX_BREAK] = str(int(row_data[i][IDX_BREAK]) + minutes)
row_data = calculate_flex(row_data, date)
left = ''
flex = float(row_data[i][IDX_OT]) / float(60)
if flex > 0:
left = 'You are done today! Over time for today is {0:.2f}h'.format(flex)
else:
left = 'You still have {0:.2f}h to work today'.format(-flex)
notify('Work time', 'Added automatic break of {} minutes'.format(minutes), left)
return row_data
def resume_work_time(row_data):
i = get_today_index(row_data)
row_data = set_endtime(row_data)
if row_data[i][IDX_STATUS] != 'Ongoing':
if CONFIG.lock_break is True:
row_data = add_lock_break(row_data)
else:
notify_left_worktime(row_data)
row_data[i][IDX_STATUS] = 'Ongoing'
return row_data
def notify_left_worktime(row_data):
i = get_today_index(row_data)
flex = float(row_data[i][IDX_OT]) / float(60)
if flex > 0:
notify('Work time', 'You are done for today! Over time for today is {0:.2f}h'.format(flex))
else:
notify('Work time', 'You still have {0:.2f}h to work today'.format(-flex))
def notify_hourly(row_data):
i = get_today_index(row_data)
started = parse_time(row_data[i][IDX_START])
if started.strftime('%M') == datetime.now().strftime('%M') and row_data[i][IDX_STATUS] == 'Ongoing':
notify_left_worktime(row_data)
def parse_time(dur_str):
if len(dur_str) == 0 or dur_str == 'now':
return datetime.now()
todaystr = get_current_date()
return date_parser.parse(todaystr + " " + dur_str)
def is_changed(original, new):
if len(original) != len(new):
return True
for i in range(len(original)):
if original[i] != new[i]:
return True
return False
def handle_automatic(row_data):
if is_changed_manually(row_data):
return row_data
started_today = is_started_today(row_data)
if is_screen_locked():
if started_today:
row_data = end_work_time(row_data)
else:
if not started_today:
row_data = start_work_time(row_data)
else:
notify_hourly(row_data)
row_data = resume_work_time(row_data)
return row_data
def add_flex(row_data, flex_date):
formatted_date = flex_date.strftime(CONFIG.date_format)
currenttime = get_current_time()
minutes = -CONFIG.daily_work_minutes
print('Flexing {} with {} minutes'.format(formatted_date, minutes))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_START] = currenttime
row_data[i][IDX_END] = currenttime
row_data[i][IDX_OT] = str(minutes)
row_data[i][IDX_TYPE] = 'F'
else:
row_data.append([formatted_date, currenttime, currenttime, str(minutes), 'F', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
return row_data
def modify_start(row_data, date, start_time):
formatted_date = date.strftime(CONFIG.date_format)
formatted_time = start_time.strftime(CONFIG.time_format)
print('Modifying start time of {} to {}'.format(formatted_date, formatted_time))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_START] = formatted_time
else:
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'A', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
row_data = calculate_flex(row_data, formatted_date)
return row_data
def modify_end(row_data, date, end_time):
formatted_date = date.strftime(CONFIG.date_format)
formatted_time = end_time.strftime(CONFIG.time_format)
print('Modifying end time of {} to {}'.format(formatted_date, formatted_time))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_END] = formatted_time
row_data[i][IDX_TYPE] = 'M'
else:
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes), str(CONFIG.daily_work_minutes), 'Ended'])
row_data = calculate_flex(row_data, formatted_date)
return row_data
def add_break(row_data, date, time):
formatted_date = date.strftime(CONFIG.date_format)
if time > 0:
print('Adding {} minute break to {}'.format(time, formatted_date))
else:
print('Removing {} minutes from break time of {}'.format(-time, formatted_date))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_BREAK] = str(int(row_data[i][IDX_BREAK]) + time)
else:
formatted_time = get_current_time()
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes + time), str(CONFIG.daily_work_minutes), 'Ended'])
print('Total brakes {} is {} minutes'.format(formatted_date, row_data[i][IDX_BREAK]))
row_data = calculate_flex(row_data, formatted_date)
return row_data
def set_worktime(row_data, date, time):
formatted_date = date.strftime(CONFIG.date_format)
print('Setting work time to {} minutes for {}'.format(time, formatted_date))
i = get_index(row_data, formatted_date)
if i >= 0:
row_data[i][IDX_WORKTIME] = str(time)
else:
formatted_time = get_current_time()
row_data.append([formatted_date, formatted_time, formatted_time, '00', 'M', str(CONFIG.daily_break_minutes), str(time)])
row_data = calculate_flex(row_data, formatted_date)
return row_data
def is_weekend():
return datetime.today().weekday() >= 5
def recalculate(row_data):
for row in row_data:
print('Recalculating {}'.format(row[IDX_DATE]))
row_data = calculate_flex(row_data, row[IDX_DATE])
return row_data
def end_previous(row_data):
currentdate = get_current_date()
for row in row_data:
if row[IDX_DATE] != currentdate:
row[IDX_STATUS] = 'Ended'
return row_data
def ensure_columns(row_data):
for row in row_data:
if len(row) < 5:
row.append('A')
if len(row) < 6:
row.append(str(CONFIG.daily_break_minutes))
if len(row) < 7:
row.append(str(CONFIG.daily_work_minutes))
if len(row) < 8:
row.append('Ended')
return row_data
def main():
load_configuration()
args = parse_args()
if args.config is True:
configure()
return
filename = args.filename
row_data = get_data(filename)
row_data = remove_headers_and_footer(row_data)
row_data = ensure_columns(row_data)
original_data = copy.deepcopy(row_data)
if args.recalculate is True:
row_data = recalculate(row_data)
if args.flex is True:
row_data = add_flex(row_data, args.date)
if args.start is not None:
row_data = modify_start(row_data, args.date, args.start)
if args.end is not None:
row_data = modify_end(row_data, args.date, args.end)
if args.addbreakmin is not None and args.addbreakmin > 0:
row_data = add_break(row_data, args.date, args.addbreakmin)
if args.removebreakmin is not None and args.removebreakmin > 0:
row_data = add_break(row_data, args.date, -args.removebreakmin)
if args.worktime is not None and args.worktime >= 0:
row_data = set_worktime(row_data, args.date, args.worktime)
row_data = handle_automatic(row_data)
row_data = end_previous(row_data)
row_data = sorted(row_data, key=lambda l:l[0])
if is_changed(original_data, row_data):
row_data = add_header(row_data)
row_data = add_footer(row_data)
write_data(filename, row_data)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
print('Interrupted by user!')
| true
| true
|
1c427247b13e3398f7417f8893567113ea48d937
| 550
|
py
|
Python
|
wikivir/app/migrations/0002_auto_20190507_0126.py
|
ALivingVendingMachine/wikivir
|
f642a91523653de54ac94735f9cf6e9160696c56
|
[
"MIT"
] | null | null | null |
wikivir/app/migrations/0002_auto_20190507_0126.py
|
ALivingVendingMachine/wikivir
|
f642a91523653de54ac94735f9cf6e9160696c56
|
[
"MIT"
] | null | null | null |
wikivir/app/migrations/0002_auto_20190507_0126.py
|
ALivingVendingMachine/wikivir
|
f642a91523653de54ac94735f9cf6e9160696c56
|
[
"MIT"
] | null | null | null |
# Generated by Django 2.2.1 on 2019-05-07 01:26
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='malwaresample',
name='objdump',
field=models.CharField(max_length=2560000),
),
migrations.AlterField(
model_name='malwaresample',
name='readelf',
field=models.CharField(max_length=2560000),
),
]
| 22.916667
| 55
| 0.581818
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='malwaresample',
name='objdump',
field=models.CharField(max_length=2560000),
),
migrations.AlterField(
model_name='malwaresample',
name='readelf',
field=models.CharField(max_length=2560000),
),
]
| true
| true
|
1c4272abf69356a8b1677f56657114f8b8c9d484
| 11,241
|
py
|
Python
|
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/op_extend_info_replication.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 64
|
2020-06-12T07:05:07.000Z
|
2022-03-30T03:32:50.000Z
|
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/op_extend_info_replication.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 11
|
2020-07-06T07:56:54.000Z
|
2022-01-11T11:14:40.000Z
|
huaweicloud-sdk-cbr/huaweicloudsdkcbr/v1/model/op_extend_info_replication.py
|
huaweicloud/huaweicloud-sdk-python-v3
|
7a6270390fcbf192b3882bf763e7016e6026ef78
|
[
"Apache-2.0"
] | 24
|
2020-06-08T11:42:13.000Z
|
2022-03-04T06:44:08.000Z
|
# coding: utf-8
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class OpExtendInfoReplication:
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
sensitive_list = []
openapi_types = {
'destination_backup_id': 'str',
'destination_checkpoint_id': 'str',
'destination_project_id': 'str',
'destination_region': 'str',
'source_backup_id': 'str',
'source_checkpoint_id': 'str',
'source_project_id': 'str',
'source_region': 'str',
'source_backup_name': 'str',
'destination_backup_name': 'str'
}
attribute_map = {
'destination_backup_id': 'destination_backup_id',
'destination_checkpoint_id': 'destination_checkpoint_id',
'destination_project_id': 'destination_project_id',
'destination_region': 'destination_region',
'source_backup_id': 'source_backup_id',
'source_checkpoint_id': 'source_checkpoint_id',
'source_project_id': 'source_project_id',
'source_region': 'source_region',
'source_backup_name': 'source_backup_name',
'destination_backup_name': 'destination_backup_name'
}
def __init__(self, destination_backup_id=None, destination_checkpoint_id=None, destination_project_id=None, destination_region=None, source_backup_id=None, source_checkpoint_id=None, source_project_id=None, source_region=None, source_backup_name=None, destination_backup_name=None):
"""OpExtendInfoReplication - a model defined in huaweicloud sdk"""
self._destination_backup_id = None
self._destination_checkpoint_id = None
self._destination_project_id = None
self._destination_region = None
self._source_backup_id = None
self._source_checkpoint_id = None
self._source_project_id = None
self._source_region = None
self._source_backup_name = None
self._destination_backup_name = None
self.discriminator = None
if destination_backup_id is not None:
self.destination_backup_id = destination_backup_id
if destination_checkpoint_id is not None:
self.destination_checkpoint_id = destination_checkpoint_id
self.destination_project_id = destination_project_id
self.destination_region = destination_region
self.source_backup_id = source_backup_id
if source_checkpoint_id is not None:
self.source_checkpoint_id = source_checkpoint_id
self.source_project_id = source_project_id
self.source_region = source_region
if source_backup_name is not None:
self.source_backup_name = source_backup_name
if destination_backup_name is not None:
self.destination_backup_name = destination_backup_name
@property
def destination_backup_id(self):
"""Gets the destination_backup_id of this OpExtendInfoReplication.
目标副本ID
:return: The destination_backup_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._destination_backup_id
@destination_backup_id.setter
def destination_backup_id(self, destination_backup_id):
"""Sets the destination_backup_id of this OpExtendInfoReplication.
目标副本ID
:param destination_backup_id: The destination_backup_id of this OpExtendInfoReplication.
:type: str
"""
self._destination_backup_id = destination_backup_id
@property
def destination_checkpoint_id(self):
"""Gets the destination_checkpoint_id of this OpExtendInfoReplication.
目标还原点ID
:return: The destination_checkpoint_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._destination_checkpoint_id
@destination_checkpoint_id.setter
def destination_checkpoint_id(self, destination_checkpoint_id):
"""Sets the destination_checkpoint_id of this OpExtendInfoReplication.
目标还原点ID
:param destination_checkpoint_id: The destination_checkpoint_id of this OpExtendInfoReplication.
:type: str
"""
self._destination_checkpoint_id = destination_checkpoint_id
@property
def destination_project_id(self):
"""Gets the destination_project_id of this OpExtendInfoReplication.
目标project_id
:return: The destination_project_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._destination_project_id
@destination_project_id.setter
def destination_project_id(self, destination_project_id):
"""Sets the destination_project_id of this OpExtendInfoReplication.
目标project_id
:param destination_project_id: The destination_project_id of this OpExtendInfoReplication.
:type: str
"""
self._destination_project_id = destination_project_id
@property
def destination_region(self):
"""Gets the destination_region of this OpExtendInfoReplication.
目标区域
:return: The destination_region of this OpExtendInfoReplication.
:rtype: str
"""
return self._destination_region
@destination_region.setter
def destination_region(self, destination_region):
"""Sets the destination_region of this OpExtendInfoReplication.
目标区域
:param destination_region: The destination_region of this OpExtendInfoReplication.
:type: str
"""
self._destination_region = destination_region
@property
def source_backup_id(self):
"""Gets the source_backup_id of this OpExtendInfoReplication.
源副本ID
:return: The source_backup_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._source_backup_id
@source_backup_id.setter
def source_backup_id(self, source_backup_id):
"""Sets the source_backup_id of this OpExtendInfoReplication.
源副本ID
:param source_backup_id: The source_backup_id of this OpExtendInfoReplication.
:type: str
"""
self._source_backup_id = source_backup_id
@property
def source_checkpoint_id(self):
"""Gets the source_checkpoint_id of this OpExtendInfoReplication.
源还原点ID
:return: The source_checkpoint_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._source_checkpoint_id
@source_checkpoint_id.setter
def source_checkpoint_id(self, source_checkpoint_id):
"""Sets the source_checkpoint_id of this OpExtendInfoReplication.
源还原点ID
:param source_checkpoint_id: The source_checkpoint_id of this OpExtendInfoReplication.
:type: str
"""
self._source_checkpoint_id = source_checkpoint_id
@property
def source_project_id(self):
"""Gets the source_project_id of this OpExtendInfoReplication.
源project_id
:return: The source_project_id of this OpExtendInfoReplication.
:rtype: str
"""
return self._source_project_id
@source_project_id.setter
def source_project_id(self, source_project_id):
"""Sets the source_project_id of this OpExtendInfoReplication.
源project_id
:param source_project_id: The source_project_id of this OpExtendInfoReplication.
:type: str
"""
self._source_project_id = source_project_id
@property
def source_region(self):
"""Gets the source_region of this OpExtendInfoReplication.
源区域
:return: The source_region of this OpExtendInfoReplication.
:rtype: str
"""
return self._source_region
@source_region.setter
def source_region(self, source_region):
"""Sets the source_region of this OpExtendInfoReplication.
源区域
:param source_region: The source_region of this OpExtendInfoReplication.
:type: str
"""
self._source_region = source_region
@property
def source_backup_name(self):
"""Gets the source_backup_name of this OpExtendInfoReplication.
源备份名称
:return: The source_backup_name of this OpExtendInfoReplication.
:rtype: str
"""
return self._source_backup_name
@source_backup_name.setter
def source_backup_name(self, source_backup_name):
"""Sets the source_backup_name of this OpExtendInfoReplication.
源备份名称
:param source_backup_name: The source_backup_name of this OpExtendInfoReplication.
:type: str
"""
self._source_backup_name = source_backup_name
@property
def destination_backup_name(self):
"""Gets the destination_backup_name of this OpExtendInfoReplication.
目标备份名称
:return: The destination_backup_name of this OpExtendInfoReplication.
:rtype: str
"""
return self._destination_backup_name
@destination_backup_name.setter
def destination_backup_name(self, destination_backup_name):
"""Sets the destination_backup_name of this OpExtendInfoReplication.
目标备份名称
:param destination_backup_name: The destination_backup_name of this OpExtendInfoReplication.
:type: str
"""
self._destination_backup_name = destination_backup_name
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
"""For `print`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, OpExtendInfoReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 31.844193
| 286
| 0.663731
|
import re
import six
from huaweicloudsdkcore.utils.http_utils import sanitize_for_serialization
class OpExtendInfoReplication:
sensitive_list = []
openapi_types = {
'destination_backup_id': 'str',
'destination_checkpoint_id': 'str',
'destination_project_id': 'str',
'destination_region': 'str',
'source_backup_id': 'str',
'source_checkpoint_id': 'str',
'source_project_id': 'str',
'source_region': 'str',
'source_backup_name': 'str',
'destination_backup_name': 'str'
}
attribute_map = {
'destination_backup_id': 'destination_backup_id',
'destination_checkpoint_id': 'destination_checkpoint_id',
'destination_project_id': 'destination_project_id',
'destination_region': 'destination_region',
'source_backup_id': 'source_backup_id',
'source_checkpoint_id': 'source_checkpoint_id',
'source_project_id': 'source_project_id',
'source_region': 'source_region',
'source_backup_name': 'source_backup_name',
'destination_backup_name': 'destination_backup_name'
}
def __init__(self, destination_backup_id=None, destination_checkpoint_id=None, destination_project_id=None, destination_region=None, source_backup_id=None, source_checkpoint_id=None, source_project_id=None, source_region=None, source_backup_name=None, destination_backup_name=None):
self._destination_backup_id = None
self._destination_checkpoint_id = None
self._destination_project_id = None
self._destination_region = None
self._source_backup_id = None
self._source_checkpoint_id = None
self._source_project_id = None
self._source_region = None
self._source_backup_name = None
self._destination_backup_name = None
self.discriminator = None
if destination_backup_id is not None:
self.destination_backup_id = destination_backup_id
if destination_checkpoint_id is not None:
self.destination_checkpoint_id = destination_checkpoint_id
self.destination_project_id = destination_project_id
self.destination_region = destination_region
self.source_backup_id = source_backup_id
if source_checkpoint_id is not None:
self.source_checkpoint_id = source_checkpoint_id
self.source_project_id = source_project_id
self.source_region = source_region
if source_backup_name is not None:
self.source_backup_name = source_backup_name
if destination_backup_name is not None:
self.destination_backup_name = destination_backup_name
@property
def destination_backup_id(self):
return self._destination_backup_id
@destination_backup_id.setter
def destination_backup_id(self, destination_backup_id):
self._destination_backup_id = destination_backup_id
@property
def destination_checkpoint_id(self):
return self._destination_checkpoint_id
@destination_checkpoint_id.setter
def destination_checkpoint_id(self, destination_checkpoint_id):
self._destination_checkpoint_id = destination_checkpoint_id
@property
def destination_project_id(self):
return self._destination_project_id
@destination_project_id.setter
def destination_project_id(self, destination_project_id):
self._destination_project_id = destination_project_id
@property
def destination_region(self):
return self._destination_region
@destination_region.setter
def destination_region(self, destination_region):
self._destination_region = destination_region
@property
def source_backup_id(self):
return self._source_backup_id
@source_backup_id.setter
def source_backup_id(self, source_backup_id):
self._source_backup_id = source_backup_id
@property
def source_checkpoint_id(self):
return self._source_checkpoint_id
@source_checkpoint_id.setter
def source_checkpoint_id(self, source_checkpoint_id):
self._source_checkpoint_id = source_checkpoint_id
@property
def source_project_id(self):
return self._source_project_id
@source_project_id.setter
def source_project_id(self, source_project_id):
self._source_project_id = source_project_id
@property
def source_region(self):
return self._source_region
@source_region.setter
def source_region(self, source_region):
self._source_region = source_region
@property
def source_backup_name(self):
return self._source_backup_name
@source_backup_name.setter
def source_backup_name(self, source_backup_name):
self._source_backup_name = source_backup_name
@property
def destination_backup_name(self):
return self._destination_backup_name
@destination_backup_name.setter
def destination_backup_name(self, destination_backup_name):
self._destination_backup_name = destination_backup_name
def to_dict(self):
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
if attr in self.sensitive_list:
result[attr] = "****"
else:
result[attr] = value
return result
def to_str(self):
import simplejson as json
if six.PY2:
import sys
reload(sys)
sys.setdefaultencoding("utf-8")
return json.dumps(sanitize_for_serialization(self), ensure_ascii=False)
def __repr__(self):
return self.to_str()
def __eq__(self, other):
if not isinstance(other, OpExtendInfoReplication):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| true
| true
|
1c427478f97891dc6bc4fc30bf1182bc4ef3b923
| 1,895
|
py
|
Python
|
examples/vpc/v1/port.py
|
allenLew1991/huaweicloud-sdk-python
|
13d1931372ee096ac3065251c48db16ef6c045c9
|
[
"Apache-2.0"
] | 1
|
2019-11-12T07:44:16.000Z
|
2019-11-12T07:44:16.000Z
|
examples/vpc/v1/port.py
|
ArvinSong/huaweicloud-sdk-python
|
227cd7f68a07974b16794c371f110d1927571fd0
|
[
"Apache-2.0"
] | null | null | null |
examples/vpc/v1/port.py
|
ArvinSong/huaweicloud-sdk-python
|
227cd7f68a07974b16794c371f110d1927571fd0
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
# Copyright 2019 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import os
from openstack import connection
auth_url = '******'
userDomainId = '******'
projectId = '******'
username = '******'
password = os.getenv('get_secret_code')
conn = connection.Connection(
auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password
)
def test_ports(_conn):
query = {
"limit": 2
}
objs = _conn.vpcv1.ports(**query)
for obj in objs:
print(obj)
def test_get_port(_conn):
print(_conn.vpcv1.get_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
def test_create_port(_conn):
data = {
"network_id": "6df498a2-3480-4faf-b6e7-ac25a053bbbc"
}
print(_conn.vpcv1.create_port(**data))
def test_update_port(_conn):
data = {
"name": "port_20190103"
}
print(_conn.vpcv1.update_port('0a684452-6e1c-4f07-b53a-f3be419efe9c', **data))
def test_delete_port(_conn):
print(_conn.vpcv1.delete_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
def test_find_port(_conn):
print(_conn.vpcv1.find_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
if __name__ == '__main__':
test_ports(conn)
test_get_port(conn)
test_create_port(conn)
test_update_port(conn)
test_delete_port(conn)
test_find_port(conn)
| 24.934211
| 82
| 0.700264
|
import os
from openstack import connection
auth_url = '******'
userDomainId = '******'
projectId = '******'
username = '******'
password = os.getenv('get_secret_code')
conn = connection.Connection(
auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password
)
def test_ports(_conn):
query = {
"limit": 2
}
objs = _conn.vpcv1.ports(**query)
for obj in objs:
print(obj)
def test_get_port(_conn):
print(_conn.vpcv1.get_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
def test_create_port(_conn):
data = {
"network_id": "6df498a2-3480-4faf-b6e7-ac25a053bbbc"
}
print(_conn.vpcv1.create_port(**data))
def test_update_port(_conn):
data = {
"name": "port_20190103"
}
print(_conn.vpcv1.update_port('0a684452-6e1c-4f07-b53a-f3be419efe9c', **data))
def test_delete_port(_conn):
print(_conn.vpcv1.delete_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
def test_find_port(_conn):
print(_conn.vpcv1.find_port('0a684452-6e1c-4f07-b53a-f3be419efe9c'))
if __name__ == '__main__':
test_ports(conn)
test_get_port(conn)
test_create_port(conn)
test_update_port(conn)
test_delete_port(conn)
test_find_port(conn)
| true
| true
|
1c42756ae3f5204c2d9f241ef62016ce02c92a72
| 1,908
|
py
|
Python
|
tone.py
|
Gredelston/johann
|
2c85238ad603987686d2d6c7be6abe806b963e02
|
[
"MIT"
] | 2
|
2019-06-19T14:09:09.000Z
|
2019-06-19T14:21:05.000Z
|
tone.py
|
Gredelston/johann
|
2c85238ad603987686d2d6c7be6abe806b963e02
|
[
"MIT"
] | 1
|
2019-06-19T02:18:34.000Z
|
2019-06-19T02:18:34.000Z
|
tone.py
|
Gredelston/johann
|
2c85238ad603987686d2d6c7be6abe806b963e02
|
[
"MIT"
] | null | null | null |
MIDI_NUMBERS = {
'C2': 36,
'B#2': 36,
'C#2': 37,
'Db2': 37,
'D2': 38,
'D#2': 39,
'Eb2': 39,
'E2': 40,
'Fb2': 40,
'F2': 41,
'E#2': 41,
'F#2': 42,
'Gb2': 42,
'G2': 43,
'G#2': 44,
'Ab2': 44,
'A2': 45,
'A#2': 46,
'Bb2': 46,
'B2': 47,
'Cb2': 47,
}
SEMITONE_RATIO = 2**(1/12.)
A4_FREQUENCY = 440
A4_MIDI_NUMBER = 69
class Tone(object):
"""Abstract representation of a pitch."""
def __init__(self, midi_number):
self.midi_number = midi_number
self.freq = midi_number_to_freq(self.midi_number)
def __eq__(self, other):
return self.midi_number == other.midi_number
def __lt__(self, other):
return self.midi_number < other.midi_number
def __gt__(self, other):
return self.midi_number > other.midi_number
def __sub__(self, other):
"""Subtract to determine the semitone distance between two tones."""
return self.midi_number - other.midi_number
def midi_number_to_freq(midi_number):
"""Given a midi number, calculate the frequency in Hz."""
semitones_above_a4 = midi_number - A4_MIDI_NUMBER
return A4_FREQUENCY * (SEMITONE_RATIO ** semitones_above_a4)
def name_to_midi_number(name):
"""Given a pitch name (e.g. 'A4'), calculate its midi number."""
pitch = name[:-1]
octave = int(name[-1])
if octave == 2:
return MIDI_NUMBERS[name]
elif octave < 2:
one_octave_higher_name = pitch + str(octave + 1)
return name_to_midi_number(one_octave_higher_name) - 12
elif octave > 2:
one_octave_lower_name = pitch + str(octave - 1)
return name_to_midi_number(one_octave_lower_name) + 12
def name_to_tone(name):
"""Given a pitch name (e.g. 'A4'), create a Tone."""
midi_number = name_to_midi_number(name)
return Tone(midi_number)
| 23.268293
| 76
| 0.602725
|
MIDI_NUMBERS = {
'C2': 36,
'B#2': 36,
'C#2': 37,
'Db2': 37,
'D2': 38,
'D#2': 39,
'Eb2': 39,
'E2': 40,
'Fb2': 40,
'F2': 41,
'E#2': 41,
'F#2': 42,
'Gb2': 42,
'G2': 43,
'G#2': 44,
'Ab2': 44,
'A2': 45,
'A#2': 46,
'Bb2': 46,
'B2': 47,
'Cb2': 47,
}
SEMITONE_RATIO = 2**(1/12.)
A4_FREQUENCY = 440
A4_MIDI_NUMBER = 69
class Tone(object):
def __init__(self, midi_number):
self.midi_number = midi_number
self.freq = midi_number_to_freq(self.midi_number)
def __eq__(self, other):
return self.midi_number == other.midi_number
def __lt__(self, other):
return self.midi_number < other.midi_number
def __gt__(self, other):
return self.midi_number > other.midi_number
def __sub__(self, other):
return self.midi_number - other.midi_number
def midi_number_to_freq(midi_number):
semitones_above_a4 = midi_number - A4_MIDI_NUMBER
return A4_FREQUENCY * (SEMITONE_RATIO ** semitones_above_a4)
def name_to_midi_number(name):
pitch = name[:-1]
octave = int(name[-1])
if octave == 2:
return MIDI_NUMBERS[name]
elif octave < 2:
one_octave_higher_name = pitch + str(octave + 1)
return name_to_midi_number(one_octave_higher_name) - 12
elif octave > 2:
one_octave_lower_name = pitch + str(octave - 1)
return name_to_midi_number(one_octave_lower_name) + 12
def name_to_tone(name):
midi_number = name_to_midi_number(name)
return Tone(midi_number)
| true
| true
|
1c42760a4b46ae1882ca453c7be6b790b10b0c88
| 1,023
|
py
|
Python
|
python3.4Smartforest/lib/python3.4/site-packages/django/http/utils.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/http/utils.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
python3.4Smartforest/lib/python3.4/site-packages/django/http/utils.py
|
letouriste001/SmartForest_2.0
|
109b78bf1e8c8404800f377ab969395ccbb617be
|
[
"MIT"
] | null | null | null |
"""
Functions that modify an HTTP request or response in some way.
"""
# This group of functions are run as part of the response handling, after
# everything else, including all response middleware. Think of them as
# "compulsory response middleware". Be careful about what goes here, because
# it's a little fiddly to override this behavior, so they should be truly
# universally applicable.
def conditional_content_removal(request, response):
"""
Removes the content of responses for HEAD requests, 1xx, 204 and 304
responses. Ensures compliance with RFC 7230, section 3.3.3.
"""
if 100 <= response.status_code < 200 or response.status_code in (204, 304):
if response.streaming:
response.streaming_content = []
else:
response.content = b''
response['Content-Length'] = '0'
if request.method == 'HEAD':
if response.streaming:
response.streaming_content = []
else:
response.content = b''
return response
| 1,023
| 1,023
| 0.675464
| true
| true
|
|
1c4276d0c4945e82034c9eb915df6333caeacd86
| 1,042
|
py
|
Python
|
docs/examples/cors_rest_server.py
|
rob-blackbourn/bareasgi
|
c83e3020edcb5e1650b6137c21d7f3659467e687
|
[
"Apache-2.0"
] | 15
|
2019-02-23T11:09:06.000Z
|
2020-01-01T09:25:33.000Z
|
docs/examples/cors_rest_server.py
|
rob-blackbourn/bareASGI
|
5966ab6b54bc99420b06fb90cff05d15447e98b7
|
[
"Apache-2.0"
] | 1
|
2021-09-20T10:55:49.000Z
|
2021-09-20T10:55:49.000Z
|
docs/examples/cors_rest_server.py
|
rob-blackbourn/bareasgi
|
c83e3020edcb5e1650b6137c21d7f3659467e687
|
[
"Apache-2.0"
] | 1
|
2021-01-11T09:09:29.000Z
|
2021-01-11T09:09:29.000Z
|
"""CORS REST server"""
import json
import logging
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
text_reader,
text_writer
)
from bareasgi_cors import CORSMiddleware
logging.basicConfig(level=logging.DEBUG)
async def get_info(request: HttpRequest) -> HttpResponse:
"""GET handler"""
text = json.dumps(request.info)
return HttpResponse(
200,
[(b'content-type', b'application/json')],
text_writer(text)
)
async def set_info(request: HttpRequest) -> HttpResponse:
"""POST handler"""
text = await text_reader(request.body)
data = json.loads(text)
request.info.update(data)
return HttpResponse(204)
if __name__ == "__main__":
import uvicorn
cors_middleware = CORSMiddleware()
app = Application(
info={'name': 'Michael Caine'},
middlewares=[cors_middleware]
)
app.http_router.add({'GET'}, '/info', get_info)
app.http_router.add({'POST', 'OPTIONS'}, '/info', set_info)
uvicorn.run(app, port=9010)
| 20.84
| 63
| 0.659309
|
import json
import logging
from bareasgi import (
Application,
HttpRequest,
HttpResponse,
text_reader,
text_writer
)
from bareasgi_cors import CORSMiddleware
logging.basicConfig(level=logging.DEBUG)
async def get_info(request: HttpRequest) -> HttpResponse:
text = json.dumps(request.info)
return HttpResponse(
200,
[(b'content-type', b'application/json')],
text_writer(text)
)
async def set_info(request: HttpRequest) -> HttpResponse:
text = await text_reader(request.body)
data = json.loads(text)
request.info.update(data)
return HttpResponse(204)
if __name__ == "__main__":
import uvicorn
cors_middleware = CORSMiddleware()
app = Application(
info={'name': 'Michael Caine'},
middlewares=[cors_middleware]
)
app.http_router.add({'GET'}, '/info', get_info)
app.http_router.add({'POST', 'OPTIONS'}, '/info', set_info)
uvicorn.run(app, port=9010)
| true
| true
|
1c4277779a0006e73024bd536ca755198ff017a2
| 2,434
|
py
|
Python
|
tests/test_cifti_vis_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
tests/test_cifti_vis_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
tests/test_cifti_vis_recon_all.py
|
lgrennan/ciftify
|
8488423bd081370614b676a2e1d1a8dbfd9aba1c
|
[
"MIT"
] | null | null | null |
import unittest
import logging
import importlib
import random
from mock import patch, MagicMock, mock_open
from nose.tools import raises
recon = importlib.import_module('ciftify.bin.cifti_vis_recon_all')
logging.disable(logging.CRITICAL)
class TestUserSettings(unittest.TestCase):
@raises(SystemExit)
def test_exits_gracefully_when_user_supplies_undefined_qc_mode(self):
arguments = {'<subject>': 'some_subject',
'<QCmode>': 'new_mode'}
recon.UserSettings(arguments)
assert False
class TestWriteSingleQCPage(unittest.TestCase):
@patch('ciftify.bin.cifti_vis_recon_all.generate_qc_page')
@patch('os.path.exists')
def test_exits_without_doing_work_if_page_exists(self, mock_exists,
mock_generate):
mock_exists.return_value = True
class SettingsStub(object):
def __init__(self):
self.qc_dir = '/some/path/qc'
self.subject = 'subject_1'
self.hcp_dir = '/some/other/path/hcp'
recon.write_single_qc_page(SettingsStub(), None)
assert mock_generate.call_count == 0
class TestModifyTemplateContents(unittest.TestCase):
original_vals = ['HCP_DATA_PATH', 'SUBJID']
def test_expected_strings_are_replaced(self):
settings = self.get_settings()
template_contents = get_template_contents(self.original_vals)
modified_text = recon.modify_template_contents(template_contents,
settings)
for val in self.original_vals:
assert val not in modified_text
def get_settings(self):
class SettingsStub(object):
def __init__(self):
self.hcp_dir = '/path/num1'
self.subject = 'subject_id'
return SettingsStub()
def get_template_contents(keys):
# Not a stroke, just randomly generated text
mock_contents = ['Behind sooner dining so window excuse he summer.',
' Breakfast met certainty and fulfilled propriety led. ',
' Waited get either are wooded little her. Contrasted ',
'unreserved as mr particular collecting it everything as ',
'indulgence. Seems ask meant merry could put. Age old begin ',
'had boy noisy table front whole given.']
mock_contents.extend(keys)
random.shuffle(mock_contents)
template_contents = ' '.join(mock_contents)
return template_contents
| 34.771429
| 74
| 0.67954
|
import unittest
import logging
import importlib
import random
from mock import patch, MagicMock, mock_open
from nose.tools import raises
recon = importlib.import_module('ciftify.bin.cifti_vis_recon_all')
logging.disable(logging.CRITICAL)
class TestUserSettings(unittest.TestCase):
@raises(SystemExit)
def test_exits_gracefully_when_user_supplies_undefined_qc_mode(self):
arguments = {'<subject>': 'some_subject',
'<QCmode>': 'new_mode'}
recon.UserSettings(arguments)
assert False
class TestWriteSingleQCPage(unittest.TestCase):
@patch('ciftify.bin.cifti_vis_recon_all.generate_qc_page')
@patch('os.path.exists')
def test_exits_without_doing_work_if_page_exists(self, mock_exists,
mock_generate):
mock_exists.return_value = True
class SettingsStub(object):
def __init__(self):
self.qc_dir = '/some/path/qc'
self.subject = 'subject_1'
self.hcp_dir = '/some/other/path/hcp'
recon.write_single_qc_page(SettingsStub(), None)
assert mock_generate.call_count == 0
class TestModifyTemplateContents(unittest.TestCase):
original_vals = ['HCP_DATA_PATH', 'SUBJID']
def test_expected_strings_are_replaced(self):
settings = self.get_settings()
template_contents = get_template_contents(self.original_vals)
modified_text = recon.modify_template_contents(template_contents,
settings)
for val in self.original_vals:
assert val not in modified_text
def get_settings(self):
class SettingsStub(object):
def __init__(self):
self.hcp_dir = '/path/num1'
self.subject = 'subject_id'
return SettingsStub()
def get_template_contents(keys):
mock_contents = ['Behind sooner dining so window excuse he summer.',
' Breakfast met certainty and fulfilled propriety led. ',
' Waited get either are wooded little her. Contrasted ',
'unreserved as mr particular collecting it everything as ',
'indulgence. Seems ask meant merry could put. Age old begin ',
'had boy noisy table front whole given.']
mock_contents.extend(keys)
random.shuffle(mock_contents)
template_contents = ' '.join(mock_contents)
return template_contents
| true
| true
|
1c4278254d38a54507753c51c7f075a29ccdb76d
| 761
|
py
|
Python
|
textattack/constraints/overlap/levenshtein_edit_distance.py
|
fighting41love/TextAttack
|
24e48f0022dc3a7bdcd5cbb3430f1c72cfcb522d
|
[
"MIT"
] | 2
|
2020-07-08T08:55:37.000Z
|
2020-09-03T00:57:38.000Z
|
textattack/constraints/overlap/levenshtein_edit_distance.py
|
fighting41love/TextAttack
|
24e48f0022dc3a7bdcd5cbb3430f1c72cfcb522d
|
[
"MIT"
] | null | null | null |
textattack/constraints/overlap/levenshtein_edit_distance.py
|
fighting41love/TextAttack
|
24e48f0022dc3a7bdcd5cbb3430f1c72cfcb522d
|
[
"MIT"
] | null | null | null |
import editdistance
from textattack.constraints import Constraint
class LevenshteinEditDistance(Constraint):
""" A constraint on edit distance (Levenshtein Distance).
"""
def __init__(self, max_edit_distance):
if not isinstance(max_edit_distance, int):
raise TypeError('max_edit_distance must be an int')
self.max_edit_distance = max_edit_distance
def _check_constraint(self, transformed_text, current_text, original_text=None):
if not original_text:
return True
edit_distance = editdistance.eval(original_text.text, transformed_text.text)
return edit_distance <= self.max_edit_distance
def extra_repr_keys(self):
return ['max_edit_distance']
| 33.086957
| 84
| 0.704336
|
import editdistance
from textattack.constraints import Constraint
class LevenshteinEditDistance(Constraint):
def __init__(self, max_edit_distance):
if not isinstance(max_edit_distance, int):
raise TypeError('max_edit_distance must be an int')
self.max_edit_distance = max_edit_distance
def _check_constraint(self, transformed_text, current_text, original_text=None):
if not original_text:
return True
edit_distance = editdistance.eval(original_text.text, transformed_text.text)
return edit_distance <= self.max_edit_distance
def extra_repr_keys(self):
return ['max_edit_distance']
| true
| true
|
1c427830e6cb82adabfc919a96e8b7c6985a9f0c
| 2,943
|
py
|
Python
|
pystiche/image/transforms/functional/_crop.py
|
sourcery-ai-bot/pystiche
|
57bc6cfd074d47e8ad98a4f6e570b92ce37131bc
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/image/transforms/functional/_crop.py
|
sourcery-ai-bot/pystiche
|
57bc6cfd074d47e8ad98a4f6e570b92ce37131bc
|
[
"BSD-3-Clause"
] | null | null | null |
pystiche/image/transforms/functional/_crop.py
|
sourcery-ai-bot/pystiche
|
57bc6cfd074d47e8ad98a4f6e570b92ce37131bc
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Tuple, Union, cast
import torch
from pystiche.image.utils import (
extract_image_size,
force_batched_image,
is_edge_size,
is_image_size,
)
from pystiche.misc import verify_str_arg
__all__ = [
"crop",
"top_left_crop",
"bottom_left_crop",
"top_right_crop",
"bottom_right_crop",
"center_crop",
]
def _parse_size(size: Union[Tuple[int, int], int]) -> Tuple[int, int]:
if is_image_size(size):
return cast(Tuple[int, int], size)
elif is_edge_size(size):
edge_size = cast(int, size)
return edge_size, edge_size
else:
raise RuntimeError
@force_batched_image
def crop(
x: torch.Tensor,
origin: Tuple[int, int],
size: Union[Tuple[int, int], int],
vert_anchor: str = "top",
horz_anchor: str = "left",
) -> torch.Tensor:
verify_str_arg(vert_anchor, "vert_anchor", ("top", "bottom"))
verify_str_arg(horz_anchor, "horz_anchor", ("left", "right"))
vert_origin, horz_origin = origin
height, width = _parse_size(size)
def create_vert_slice() -> slice:
if vert_anchor == "top":
return slice(vert_origin, vert_origin + height)
else: # vert_anchor == "bottom"
return slice(vert_origin - height, vert_origin)
def create_horz_slice() -> slice:
if horz_anchor == "left":
return slice(horz_origin, horz_origin + width)
else: # horz_anchor == "right"
return slice(horz_origin - width, horz_origin)
vert_slice = create_vert_slice()
horz_slice = create_horz_slice()
return x[:, :, vert_slice, horz_slice]
def top_left_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
origin = (0, 0)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="top", horz_anchor="left")
)
def bottom_left_crop(
x: torch.Tensor, size: Union[Tuple[int, int], int]
) -> torch.Tensor:
height, _ = extract_image_size(x)
origin = (height, 0)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="bottom", horz_anchor="left")
)
def top_right_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
_, width = extract_image_size(x)
origin = (0, width)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="top", horz_anchor="right")
)
def bottom_right_crop(
x: torch.Tensor, size: Union[Tuple[int, int], int]
) -> torch.Tensor:
origin = extract_image_size(x)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="bottom", horz_anchor="right")
)
def center_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
image_size = extract_image_size(x)
size = _parse_size(size)
vert_origin = (image_size[0] - size[0]) // 2
horz_origin = (image_size[1] - size[1]) // 2
return cast(torch.Tensor, crop(x, (vert_origin, horz_origin), size))
| 28.298077
| 87
| 0.646279
|
from typing import Tuple, Union, cast
import torch
from pystiche.image.utils import (
extract_image_size,
force_batched_image,
is_edge_size,
is_image_size,
)
from pystiche.misc import verify_str_arg
__all__ = [
"crop",
"top_left_crop",
"bottom_left_crop",
"top_right_crop",
"bottom_right_crop",
"center_crop",
]
def _parse_size(size: Union[Tuple[int, int], int]) -> Tuple[int, int]:
if is_image_size(size):
return cast(Tuple[int, int], size)
elif is_edge_size(size):
edge_size = cast(int, size)
return edge_size, edge_size
else:
raise RuntimeError
@force_batched_image
def crop(
x: torch.Tensor,
origin: Tuple[int, int],
size: Union[Tuple[int, int], int],
vert_anchor: str = "top",
horz_anchor: str = "left",
) -> torch.Tensor:
verify_str_arg(vert_anchor, "vert_anchor", ("top", "bottom"))
verify_str_arg(horz_anchor, "horz_anchor", ("left", "right"))
vert_origin, horz_origin = origin
height, width = _parse_size(size)
def create_vert_slice() -> slice:
if vert_anchor == "top":
return slice(vert_origin, vert_origin + height)
else:
return slice(vert_origin - height, vert_origin)
def create_horz_slice() -> slice:
if horz_anchor == "left":
return slice(horz_origin, horz_origin + width)
else:
return slice(horz_origin - width, horz_origin)
vert_slice = create_vert_slice()
horz_slice = create_horz_slice()
return x[:, :, vert_slice, horz_slice]
def top_left_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
origin = (0, 0)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="top", horz_anchor="left")
)
def bottom_left_crop(
x: torch.Tensor, size: Union[Tuple[int, int], int]
) -> torch.Tensor:
height, _ = extract_image_size(x)
origin = (height, 0)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="bottom", horz_anchor="left")
)
def top_right_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
_, width = extract_image_size(x)
origin = (0, width)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="top", horz_anchor="right")
)
def bottom_right_crop(
x: torch.Tensor, size: Union[Tuple[int, int], int]
) -> torch.Tensor:
origin = extract_image_size(x)
return cast(
torch.Tensor, crop(x, origin, size, vert_anchor="bottom", horz_anchor="right")
)
def center_crop(x: torch.Tensor, size: Union[Tuple[int, int], int]) -> torch.Tensor:
image_size = extract_image_size(x)
size = _parse_size(size)
vert_origin = (image_size[0] - size[0]) // 2
horz_origin = (image_size[1] - size[1]) // 2
return cast(torch.Tensor, crop(x, (vert_origin, horz_origin), size))
| true
| true
|
1c4279bd3f0aba85457a2b68efce65dc3e1e88d8
| 248
|
py
|
Python
|
text/combilex.py
|
dan-wells/FastSpeech2
|
5a3b18a5abced0909b6192cbf75aa90e1c2b3141
|
[
"MIT"
] | null | null | null |
text/combilex.py
|
dan-wells/FastSpeech2
|
5a3b18a5abced0909b6192cbf75aa90e1c2b3141
|
[
"MIT"
] | null | null | null |
text/combilex.py
|
dan-wells/FastSpeech2
|
5a3b18a5abced0909b6192cbf75aa90e1c2b3141
|
[
"MIT"
] | null | null | null |
valid_symbols = [
'3', '5', '@', 'A', 'D', 'E', 'I', 'N', 'O', 'S', 'T', 'U', 'V', 'Z',
'a', 'b', 'd', 'dZ', 'e', 'e~', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'l=',
'm', 'm=', 'n', 'n=', 'o~', 'p', 'r', 's', 't', 'tS', 'u', 'v', 'w', 'z'
]
| 41.333333
| 76
| 0.225806
|
valid_symbols = [
'3', '5', '@', 'A', 'D', 'E', 'I', 'N', 'O', 'S', 'T', 'U', 'V', 'Z',
'a', 'b', 'd', 'dZ', 'e', 'e~', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'l=',
'm', 'm=', 'n', 'n=', 'o~', 'p', 'r', 's', 't', 'tS', 'u', 'v', 'w', 'z'
]
| true
| true
|
1c4279f080c7b2a4d74637ea2ab486b9984af5d2
| 2,808
|
py
|
Python
|
vitrage-4.3.1/vitrage/tests/unit/common/test_utils.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 89
|
2015-09-30T21:42:17.000Z
|
2022-03-28T16:31:19.000Z
|
vitrage-4.3.1/vitrage/tests/unit/common/test_utils.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 5
|
2019-08-14T06:46:03.000Z
|
2021-12-13T20:01:25.000Z
|
vitrage-4.3.1/vitrage/tests/unit/common/test_utils.py
|
scottwedge/OpenStack-Stein
|
7077d1f602031dace92916f14e36b124f474de15
|
[
"Apache-2.0"
] | 43
|
2015-11-04T15:54:27.000Z
|
2021-12-10T14:24:03.000Z
|
# Copyright 2016 - Alcatel-Lucent
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import itertools
from vitrage.common import utils
from vitrage.tests import base
from vitrage.tests.base import IsEmpty
class UtilsTest(base.BaseTest):
def _assert_set_equal(self, s1, s2, message):
self.assert_dict_equal(dict.fromkeys(s1, 0),
dict.fromkeys(s2, 0),
message)
def test_get_portion(self):
all_items = list(range(14))
self._check_portions(all_items, 4)
self._check_portions(all_items, 3)
self._check_portions(all_items, 2)
self._check_portions(all_items, 1)
self._check_portions(all_items, 51)
self._check_portions(all_items, 100)
all_items = [0]
self._check_portions(all_items, 10)
all_items = []
self._check_portions(all_items, 2)
self._check_portions_bad_params(all_items, 0, 0)
self._check_portions_bad_params(all_items, -1, 0)
self._check_portions_bad_params(all_items, 10, 10)
def _check_portions_bad_params(self, all_items, num, ind):
exception = None
try:
utils.get_portion(all_items, num, ind)
except Exception as e:
exception = e
self.assertIsNotNone(exception, 'get_portion incorrect params')
def _check_portions(self, all_items, chunks_count):
chunks = []
for i in range(chunks_count):
chunks.append(set(utils.get_portion(all_items, chunks_count, i)))
union = (a for a in itertools.chain(*chunks))
self._assert_set_equal(union, set(all_items), 'chunks union differs')
combinations = itertools.combinations(range(len(chunks)), 2)
for i, j in combinations:
self.assertThat(chunks[i].intersection(chunks[j]), IsEmpty(),
"Each two chunks should not have "
"intersecting items")
max_size = len(max(chunks, key=lambda x: len(x)))
min_size = len(min(chunks, key=lambda x: len(x)))
expected_max_difference = 1 if len(all_items) % len(chunks) else 0
self.assertEqual(expected_max_difference, max_size - min_size,
'chunks sizes should not differ by more than 1')
| 39
| 77
| 0.656695
|
import itertools
from vitrage.common import utils
from vitrage.tests import base
from vitrage.tests.base import IsEmpty
class UtilsTest(base.BaseTest):
def _assert_set_equal(self, s1, s2, message):
self.assert_dict_equal(dict.fromkeys(s1, 0),
dict.fromkeys(s2, 0),
message)
def test_get_portion(self):
all_items = list(range(14))
self._check_portions(all_items, 4)
self._check_portions(all_items, 3)
self._check_portions(all_items, 2)
self._check_portions(all_items, 1)
self._check_portions(all_items, 51)
self._check_portions(all_items, 100)
all_items = [0]
self._check_portions(all_items, 10)
all_items = []
self._check_portions(all_items, 2)
self._check_portions_bad_params(all_items, 0, 0)
self._check_portions_bad_params(all_items, -1, 0)
self._check_portions_bad_params(all_items, 10, 10)
def _check_portions_bad_params(self, all_items, num, ind):
exception = None
try:
utils.get_portion(all_items, num, ind)
except Exception as e:
exception = e
self.assertIsNotNone(exception, 'get_portion incorrect params')
def _check_portions(self, all_items, chunks_count):
chunks = []
for i in range(chunks_count):
chunks.append(set(utils.get_portion(all_items, chunks_count, i)))
union = (a for a in itertools.chain(*chunks))
self._assert_set_equal(union, set(all_items), 'chunks union differs')
combinations = itertools.combinations(range(len(chunks)), 2)
for i, j in combinations:
self.assertThat(chunks[i].intersection(chunks[j]), IsEmpty(),
"Each two chunks should not have "
"intersecting items")
max_size = len(max(chunks, key=lambda x: len(x)))
min_size = len(min(chunks, key=lambda x: len(x)))
expected_max_difference = 1 if len(all_items) % len(chunks) else 0
self.assertEqual(expected_max_difference, max_size - min_size,
'chunks sizes should not differ by more than 1')
| true
| true
|
1c427c528ea8c95efada7e9ee342ac634c701320
| 5,867
|
py
|
Python
|
pcapparser/packet_parser.py
|
sangyf/pcap-parser
|
933b792d6588c7eb5c6a270f989748dc8b3c1d61
|
[
"BSD-3-Clause"
] | 1
|
2021-02-23T03:35:34.000Z
|
2021-02-23T03:35:34.000Z
|
pcapparser/packet_parser.py
|
sangyf/pcap-parser
|
933b792d6588c7eb5c6a270f989748dc8b3c1d61
|
[
"BSD-3-Clause"
] | null | null | null |
pcapparser/packet_parser.py
|
sangyf/pcap-parser
|
933b792d6588c7eb5c6a270f989748dc8b3c1d61
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import unicode_literals, print_function, division
__author__ = 'dongliu'
import struct
import socket
from pcapparser.constant import *
class TcpPack:
""" a tcp packet, header fields and data. """
def __init__(self, source, source_port, dest, dest_port, flags, seq, ack_seq, body):
self.source = source
self.source_port = source_port
self.dest = dest
self.dest_port = dest_port
self.flags = flags
self.seq = seq
self.ack_seq = ack_seq
self.body = body
self.key = None
self.micro_second = None
self.fin = flags & 1
self.syn = (flags >> 1) & 1
# rst = (flags >> 2) & 1
# psh = (flags >> 3) & 1
self.ack = (flags >> 4) & 1
# urg = (flags >> 5) & 1
def __str__(self):
return "%s:%d --> %s:%d, seq:%d, ack_seq:%s size:%d fin:%d syn:%d ack:%d" % \
(self.source, self.source_port, self.dest, self.dest_port, self.seq,
self.ack_seq, len(self.body), self.fin, self.syn, self.ack)
def gen_key(self):
if self.key:
return self.key
skey = '%s:%d' % (self.source, self.source_port)
dkey = '%s:%d' % (self.dest, self.dest_port)
if skey < dkey:
self.key = skey + '-' + dkey
else:
self.key = dkey + '-' + skey
return self.key
def source_key(self):
return '%s:%d' % (self.source, self.source_port)
# http://standards.ieee.org/about/get/802/802.3.html
def dl_parse_ethernet(link_packet):
""" parse Ethernet packet """
eth_header_len = 14
# ethernet header
ethernet_header = link_packet[0:eth_header_len]
(network_protocol, ) = struct.unpack(b'!12xH', ethernet_header)
if network_protocol == NetworkProtocol.P802_1Q:
# 802.1q, we need to skip two bytes and read another two bytes to get protocol/len
type_or_len = link_packet[eth_header_len:eth_header_len + 4]
eth_header_len += 4
network_protocol, = struct.unpack(b'!2xH', type_or_len)
if network_protocol == NetworkProtocol.PPPOE_SESSION:
# skip PPPOE SESSION Header
eth_header_len += 8
type_or_len = link_packet[eth_header_len - 2:eth_header_len]
network_protocol, = struct.unpack(b'!H', type_or_len)
if network_protocol < 1536:
# TODO n_protocol means package len
pass
return network_protocol, link_packet[eth_header_len:]
# http://www.tcpdump.org/linktypes/LINKTYPE_LINUX_SLL.html
def dl_parse_linux_sll(link_packet):
""" parse linux sll packet """
sll_header_len = 16
# Linux cooked header
linux_cooked = link_packet[0:sll_header_len]
packet_type, link_type_address_type, link_type_address_len, link_type_address, n_protocol \
= struct.unpack(b'!HHHQH', linux_cooked)
return n_protocol, link_packet[sll_header_len:]
# see http://en.wikipedia.org/wiki/Ethertype
def parse_ip_packet(network_protocol, ip_packet):
# ip header
if network_protocol == NetworkProtocol.IP or network_protocol == NetworkProtocol.PPP_IP:
ip_base_header_len = 20
ip_header = ip_packet[0:ip_base_header_len]
(ip_info, ip_length, transport_protocol) = struct.unpack(b'!BxH5xB10x', ip_header)
# real ip header len.
ip_header_len = (ip_info & 0xF) * 4
ip_version = (ip_info >> 4) & 0xF
# skip all extra header fields.
if ip_header_len > ip_base_header_len:
pass
source = socket.inet_ntoa(ip_header[12:16])
dest = socket.inet_ntoa(ip_header[16:])
return transport_protocol, source, dest, ip_packet[ip_header_len:ip_length]
elif network_protocol == NetworkProtocol.IPV6:
# TODO: deal with ipv6 package
return None, None, None, None
else:
# skip
return None, None, None, None
def parse_tcp_packet(tcp_packet):
"""read tcp data.http only build on tcp, so we do not need to support other protocols."""
tcp_base_header_len = 20
# tcp header
tcp_header = tcp_packet[0:tcp_base_header_len]
source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header)
# real tcp header len
tcp_header_len = ((t_f >> 4) & 0xF) * 4
# skip extension headers
if tcp_header_len > tcp_base_header_len:
pass
# body
body = tcp_packet[tcp_header_len:]
return source_port, dest_port, flags, seq, ack_seq, body
def get_link_layer_parser(link_type):
if link_type == LinkLayerType.ETHERNET:
return dl_parse_ethernet
elif link_type == LinkLayerType.LINUX_SLL:
return dl_parse_linux_sll
else:
return None
def parse_udp_packet(ip_body):
udp_header = ip_body[0:8]
source_port, dest_port, length, check_sum = struct.unpack(b'!HHHH', udp_header)
return source_port, dest_port, ip_body[8:length]
def read_tcp_packet(read_packet):
""" generator, read a *TCP* package once."""
for link_type, micro_second, link_packet in read_packet():
parse_link_layer = get_link_layer_parser(link_type)
if parse_link_layer is None:
# skip unknown link layer packet
continue
network_protocol, link_layer_body = parse_link_layer(link_packet)
transport_protocol, source, dest, ip_body = parse_ip_packet(network_protocol, link_layer_body)
if transport_protocol is None:
continue
# tcp
if transport_protocol == TransferProtocol.TCP:
source_port, dest_port, flags, seq, ack_seq, body = parse_tcp_packet(ip_body)
yield TcpPack(source, source_port, dest, dest_port, flags, seq, ack_seq, body)
elif transport_protocol == TransferProtocol.UDP:
# source_port, dest_port, udp_body = parse_udp_packet(ip_body)
continue
| 34.110465
| 102
| 0.654508
|
from __future__ import unicode_literals, print_function, division
__author__ = 'dongliu'
import struct
import socket
from pcapparser.constant import *
class TcpPack:
def __init__(self, source, source_port, dest, dest_port, flags, seq, ack_seq, body):
self.source = source
self.source_port = source_port
self.dest = dest
self.dest_port = dest_port
self.flags = flags
self.seq = seq
self.ack_seq = ack_seq
self.body = body
self.key = None
self.micro_second = None
self.fin = flags & 1
self.syn = (flags >> 1) & 1
self.ack = (flags >> 4) & 1
def __str__(self):
return "%s:%d --> %s:%d, seq:%d, ack_seq:%s size:%d fin:%d syn:%d ack:%d" % \
(self.source, self.source_port, self.dest, self.dest_port, self.seq,
self.ack_seq, len(self.body), self.fin, self.syn, self.ack)
def gen_key(self):
if self.key:
return self.key
skey = '%s:%d' % (self.source, self.source_port)
dkey = '%s:%d' % (self.dest, self.dest_port)
if skey < dkey:
self.key = skey + '-' + dkey
else:
self.key = dkey + '-' + skey
return self.key
def source_key(self):
return '%s:%d' % (self.source, self.source_port)
def dl_parse_ethernet(link_packet):
eth_header_len = 14
ethernet_header = link_packet[0:eth_header_len]
(network_protocol, ) = struct.unpack(b'!12xH', ethernet_header)
if network_protocol == NetworkProtocol.P802_1Q:
type_or_len = link_packet[eth_header_len:eth_header_len + 4]
eth_header_len += 4
network_protocol, = struct.unpack(b'!2xH', type_or_len)
if network_protocol == NetworkProtocol.PPPOE_SESSION:
eth_header_len += 8
type_or_len = link_packet[eth_header_len - 2:eth_header_len]
network_protocol, = struct.unpack(b'!H', type_or_len)
if network_protocol < 1536:
pass
return network_protocol, link_packet[eth_header_len:]
def dl_parse_linux_sll(link_packet):
sll_header_len = 16
linux_cooked = link_packet[0:sll_header_len]
packet_type, link_type_address_type, link_type_address_len, link_type_address, n_protocol \
= struct.unpack(b'!HHHQH', linux_cooked)
return n_protocol, link_packet[sll_header_len:]
def parse_ip_packet(network_protocol, ip_packet):
if network_protocol == NetworkProtocol.IP or network_protocol == NetworkProtocol.PPP_IP:
ip_base_header_len = 20
ip_header = ip_packet[0:ip_base_header_len]
(ip_info, ip_length, transport_protocol) = struct.unpack(b'!BxH5xB10x', ip_header)
ip_header_len = (ip_info & 0xF) * 4
ip_version = (ip_info >> 4) & 0xF
if ip_header_len > ip_base_header_len:
pass
source = socket.inet_ntoa(ip_header[12:16])
dest = socket.inet_ntoa(ip_header[16:])
return transport_protocol, source, dest, ip_packet[ip_header_len:ip_length]
elif network_protocol == NetworkProtocol.IPV6:
return None, None, None, None
else:
return None, None, None, None
def parse_tcp_packet(tcp_packet):
tcp_base_header_len = 20
tcp_header = tcp_packet[0:tcp_base_header_len]
source_port, dest_port, seq, ack_seq, t_f, flags = struct.unpack(b'!HHIIBB6x', tcp_header)
tcp_header_len = ((t_f >> 4) & 0xF) * 4
if tcp_header_len > tcp_base_header_len:
pass
body = tcp_packet[tcp_header_len:]
return source_port, dest_port, flags, seq, ack_seq, body
def get_link_layer_parser(link_type):
if link_type == LinkLayerType.ETHERNET:
return dl_parse_ethernet
elif link_type == LinkLayerType.LINUX_SLL:
return dl_parse_linux_sll
else:
return None
def parse_udp_packet(ip_body):
udp_header = ip_body[0:8]
source_port, dest_port, length, check_sum = struct.unpack(b'!HHHH', udp_header)
return source_port, dest_port, ip_body[8:length]
def read_tcp_packet(read_packet):
for link_type, micro_second, link_packet in read_packet():
parse_link_layer = get_link_layer_parser(link_type)
if parse_link_layer is None:
continue
network_protocol, link_layer_body = parse_link_layer(link_packet)
transport_protocol, source, dest, ip_body = parse_ip_packet(network_protocol, link_layer_body)
if transport_protocol is None:
continue
if transport_protocol == TransferProtocol.TCP:
source_port, dest_port, flags, seq, ack_seq, body = parse_tcp_packet(ip_body)
yield TcpPack(source, source_port, dest, dest_port, flags, seq, ack_seq, body)
elif transport_protocol == TransferProtocol.UDP:
continue
| true
| true
|
1c427e093c9dfbef2750930fa240588794cddfc7
| 6,603
|
py
|
Python
|
tensorflow/vocab.py
|
swyjay/MRC
|
47b0baeaa1544dbf4d763471692c508cb32ec93d
|
[
"Apache-2.0"
] | 1
|
2021-04-19T18:02:40.000Z
|
2021-04-19T18:02:40.000Z
|
tensorflow/vocab.py
|
swyjay/MRC
|
47b0baeaa1544dbf4d763471692c508cb32ec93d
|
[
"Apache-2.0"
] | null | null | null |
tensorflow/vocab.py
|
swyjay/MRC
|
47b0baeaa1544dbf4d763471692c508cb32ec93d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf8 -*-
# ==============================================================================
# Copyright 2017 Baidu.com, Inc. All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
This module implements the Vocab class for converting string to id and back
"""
import numpy as np
class Vocab(object):
"""
Implements a vocabulary to store the tokens in the data, with their corresponding embeddings.
"""
def __init__(self, filename=None, initial_tokens=None, lower=False):
self.id2token = {}
self.token2id = {}
self.token_cnt = {}
self.lower = lower
self.embed_dim = None
self.embeddings = None
self.pad_token = '<blank>'
self.unk_token = '<unk>'
self.initial_tokens = initial_tokens if initial_tokens is not None else []
self.initial_tokens.extend([self.pad_token, self.unk_token])
for token in self.initial_tokens:
self.add(token)
if filename is not None:
self.load_from_file(filename)
def size(self):
"""
get the size of vocabulary
Returns:
an integer indicating the size
"""
return len(self.id2token)
def load_from_file(self, file_path):
"""
loads the vocab from file_path
Args:
file_path: a file with a word in each line
"""
for line in open(file_path, 'r'):
token = line.rstrip('\n')
self.add(token)
def get_id(self, token):
"""
gets the id of a token, returns the id of unk token if token is not in vocab
Args:
key: a string indicating the word
Returns:
an integer
"""
token = token.lower() if self.lower else token
try:
return self.token2id[token]
except KeyError:
return self.token2id[self.unk_token]
def get_token(self, idx):
"""
gets the token corresponding to idx, returns unk token if idx is not in vocab
Args:
idx: an integer
returns:
a token string
"""
try:
return self.id2token[idx]
except KeyError:
return self.unk_token
def add(self, token, cnt=1):
"""
adds the token to vocab
Args:
token: a string
cnt: a num indicating the count of the token to add, default is 1
"""
token = token.lower() if self.lower else token
if token in self.token2id:
idx = self.token2id[token]
else:
idx = len(self.id2token)
self.id2token[idx] = token
self.token2id[token] = idx
if cnt > 0:
if token in self.token_cnt:
self.token_cnt[token] += cnt
else:
self.token_cnt[token] = cnt
return idx
def filter_tokens_by_cnt(self, min_cnt):
"""
filter the tokens in vocab by their count
Args:
min_cnt: tokens with frequency less than min_cnt is filtered
"""
filtered_tokens = [token for token in self.token2id if self.token_cnt[token] >= min_cnt]
# rebuild the token x id map
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
def randomly_init_embeddings(self, embed_dim):
"""
randomly initializes the embeddings for each token
Args:
embed_dim: the size of the embedding for each token
"""
self.embed_dim = embed_dim
self.embeddings = np.random.rand(self.size(), embed_dim)
for token in [self.pad_token, self.unk_token]:
self.embeddings[self.get_id(token)] = np.zeros([self.embed_dim])
def load_pretrained_embeddings(self, embedding_path):
"""
loads the pretrained embeddings from embedding_path,
tokens not in pretrained embeddings will be filtered
Args:
embedding_path: the path of the pretrained embedding file
"""
trained_embeddings = {}
with open(embedding_path, 'r',encoding='utf-8') as fin:
for line in fin:
contents = line.strip().split()
token = contents[0]
if token not in self.token2id:
continue
trained_embeddings[token] = list(map(float, contents[1:]))
if self.embed_dim is None:
self.embed_dim = len(contents) - 1
filtered_tokens = trained_embeddings.keys()
# rebuild the token x id map
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
# load embeddings
self.embeddings = np.zeros([self.size(), self.embed_dim])
for token in self.token2id.keys():
if token in trained_embeddings:
self.embeddings[self.get_id(token)] = trained_embeddings[token]
def convert_to_ids(self, tokens):
"""
Convert a list of tokens to ids, use unk_token if the token is not in vocab.
Args:
tokens: a list of token
Returns:
a list of ids
"""
vec = [self.get_id(label) for label in tokens]
return vec
def recover_from_ids(self, ids, stop_id=None):
"""
Convert a list of ids to tokens, stop converting if the stop_id is encountered
Args:
ids: a list of ids to convert
stop_id: the stop id, default is None
Returns:
a list of tokens
"""
tokens = []
for i in ids:
tokens += [self.get_token(i)]
if stop_id is not None and i == stop_id:
break
return tokens
| 33.517766
| 97
| 0.566712
|
import numpy as np
class Vocab(object):
def __init__(self, filename=None, initial_tokens=None, lower=False):
self.id2token = {}
self.token2id = {}
self.token_cnt = {}
self.lower = lower
self.embed_dim = None
self.embeddings = None
self.pad_token = '<blank>'
self.unk_token = '<unk>'
self.initial_tokens = initial_tokens if initial_tokens is not None else []
self.initial_tokens.extend([self.pad_token, self.unk_token])
for token in self.initial_tokens:
self.add(token)
if filename is not None:
self.load_from_file(filename)
def size(self):
return len(self.id2token)
def load_from_file(self, file_path):
for line in open(file_path, 'r'):
token = line.rstrip('\n')
self.add(token)
def get_id(self, token):
token = token.lower() if self.lower else token
try:
return self.token2id[token]
except KeyError:
return self.token2id[self.unk_token]
def get_token(self, idx):
try:
return self.id2token[idx]
except KeyError:
return self.unk_token
def add(self, token, cnt=1):
token = token.lower() if self.lower else token
if token in self.token2id:
idx = self.token2id[token]
else:
idx = len(self.id2token)
self.id2token[idx] = token
self.token2id[token] = idx
if cnt > 0:
if token in self.token_cnt:
self.token_cnt[token] += cnt
else:
self.token_cnt[token] = cnt
return idx
def filter_tokens_by_cnt(self, min_cnt):
filtered_tokens = [token for token in self.token2id if self.token_cnt[token] >= min_cnt]
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
def randomly_init_embeddings(self, embed_dim):
self.embed_dim = embed_dim
self.embeddings = np.random.rand(self.size(), embed_dim)
for token in [self.pad_token, self.unk_token]:
self.embeddings[self.get_id(token)] = np.zeros([self.embed_dim])
def load_pretrained_embeddings(self, embedding_path):
trained_embeddings = {}
with open(embedding_path, 'r',encoding='utf-8') as fin:
for line in fin:
contents = line.strip().split()
token = contents[0]
if token not in self.token2id:
continue
trained_embeddings[token] = list(map(float, contents[1:]))
if self.embed_dim is None:
self.embed_dim = len(contents) - 1
filtered_tokens = trained_embeddings.keys()
self.token2id = {}
self.id2token = {}
for token in self.initial_tokens:
self.add(token, cnt=0)
for token in filtered_tokens:
self.add(token, cnt=0)
self.embeddings = np.zeros([self.size(), self.embed_dim])
for token in self.token2id.keys():
if token in trained_embeddings:
self.embeddings[self.get_id(token)] = trained_embeddings[token]
def convert_to_ids(self, tokens):
vec = [self.get_id(label) for label in tokens]
return vec
def recover_from_ids(self, ids, stop_id=None):
tokens = []
for i in ids:
tokens += [self.get_token(i)]
if stop_id is not None and i == stop_id:
break
return tokens
| true
| true
|
1c427ea8b340360aec22c298b306b66233fa109d
| 1,103
|
py
|
Python
|
tests/test_frame.py
|
himbeles/geo3d
|
b9a01868207e9f2c0364eb3a6c130c9304cde0b6
|
[
"MIT"
] | 4
|
2020-05-07T18:57:07.000Z
|
2021-06-07T02:34:16.000Z
|
tests/test_frame.py
|
himbeles/geo3d
|
b9a01868207e9f2c0364eb3a6c130c9304cde0b6
|
[
"MIT"
] | 2
|
2021-02-10T07:37:40.000Z
|
2021-02-10T09:31:37.000Z
|
tests/test_frame.py
|
himbeles/geo3d
|
b9a01868207e9f2c0364eb3a6c130c9304cde0b6
|
[
"MIT"
] | null | null | null |
import pytest
from numpy import sqrt
from geo3d import (
Frame,
frame_wizard,
Point,
Vector,
UnitFrame,
RotationMatrix,
transformation_between_frames,
)
def test_frame_wizard():
t = frame_wizard([0, 0, 1], [0, 1, 0], "z", "y", [0, 0, 0])
assert t == UnitFrame
def test_manual_frame_creation():
rot = RotationMatrix.from_euler_angles("xyz", [90, -45, 45], degrees=True)
vec = Vector([3, 4, 6])
f = Frame(rotation_matrix=rot, translation_vector=vec)
assert f.translation == vec
assert f.rotation == rot
def test_express_frame_in_frame(example_frames):
fa,fb,fc = example_frames
t = fb.express_in_frame(fa)
assert t.euler_angles("XYZ", degrees=True) == pytest.approx([180, 0, -45])
assert t.translation.as_array() == pytest.approx([sqrt(2), 0, -4])
def test_transformation_between_frames(example_frames):
fa,fb,fc = example_frames
t = transformation_between_frames(fa, fb)
assert t.euler_angles("XYZ", degrees=True) == pytest.approx([180, 0, -45])
assert t.translation.as_array() == pytest.approx([1, 1, 4])
| 26.902439
| 78
| 0.673617
|
import pytest
from numpy import sqrt
from geo3d import (
Frame,
frame_wizard,
Point,
Vector,
UnitFrame,
RotationMatrix,
transformation_between_frames,
)
def test_frame_wizard():
t = frame_wizard([0, 0, 1], [0, 1, 0], "z", "y", [0, 0, 0])
assert t == UnitFrame
def test_manual_frame_creation():
rot = RotationMatrix.from_euler_angles("xyz", [90, -45, 45], degrees=True)
vec = Vector([3, 4, 6])
f = Frame(rotation_matrix=rot, translation_vector=vec)
assert f.translation == vec
assert f.rotation == rot
def test_express_frame_in_frame(example_frames):
fa,fb,fc = example_frames
t = fb.express_in_frame(fa)
assert t.euler_angles("XYZ", degrees=True) == pytest.approx([180, 0, -45])
assert t.translation.as_array() == pytest.approx([sqrt(2), 0, -4])
def test_transformation_between_frames(example_frames):
fa,fb,fc = example_frames
t = transformation_between_frames(fa, fb)
assert t.euler_angles("XYZ", degrees=True) == pytest.approx([180, 0, -45])
assert t.translation.as_array() == pytest.approx([1, 1, 4])
| true
| true
|
1c4283124f120be686eb14d6792903d15ec1e39e
| 5,637
|
py
|
Python
|
pyglet-hg/pyglet/gl/glu_info.py
|
sangh/LaserShow
|
abc95e465e3455dc220cc602dd58358c84666f29
|
[
"BSD-3-Clause"
] | 2
|
2017-05-10T08:27:22.000Z
|
2019-10-05T14:55:00.000Z
|
pyglet/GL/glu_info.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
pyglet/GL/glu_info.py
|
seeminglee/pyglet64
|
3dd167b5b0d3ad132a157e404586e53c2bb21736
|
[
"BSD-3-Clause"
] | null | null | null |
# ----------------------------------------------------------------------------
# pyglet
# Copyright (c) 2006-2008 Alex Holkner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of pyglet nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# ----------------------------------------------------------------------------
'''Information about version and extensions of current GLU implementation.
Usage::
from pyglet.gl import glu_info
if glu_info.have_extension('GLU_EXT_nurbs_tessellator'):
# ...
If multiple contexts are in use you can use a separate GLUInfo object for each
context. Call `set_active_context` after switching to the desired context for
each GLUInfo::
from pyglet.gl.glu_info import GLUInfo
info = GLUInfo()
info.set_active_context()
if info.have_version(1, 3):
# ...
Note that GLUInfo only returns meaningful information if a context has been
created.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import warnings
from pyglet.gl.glu import *
class GLUInfo(object):
'''Information interface for the GLU library.
A default instance is created automatically when the first OpenGL context
is created. You can use the module functions as a convenience for
this default instance's methods.
If you are using more than one context, you must call `set_active_context`
when the context is active for this `GLUInfo` instance.
'''
have_context = False
version = '0.0.0'
extensions = []
_have_info = False
def set_active_context(self):
'''Store information for the currently active context.
This method is called automatically for the default context.
'''
self.have_context = True
if not self._have_info:
self.extensions = \
cast(gluGetString(GLU_EXTENSIONS), c_char_p).value.split()
self.version = cast(gluGetString(GLU_VERSION), c_char_p).value
self._have_info = True
def have_version(self, major, minor=0, release=0):
'''Determine if a version of GLU is supported.
:Parameters:
`major` : int
The major revision number (typically 1).
`minor` : int
The minor revision number.
`release` : int
The release number.
:rtype: bool
:return: True if the requested or a later version is supported.
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
ver = '%s.0.0' % self.version.split(' ', 1)[0]
imajor, iminor, irelease = [int(v) for v in ver.split('.', 3)[:3]]
return imajor > major or \
(imajor == major and iminor > minor) or \
(imajor == major and iminor == minor and irelease >= release)
def get_version(self):
'''Get the current GLU version.
:return: the GLU version
:rtype: str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.version
def have_extension(self, extension):
'''Determine if a GLU extension is available.
:Parameters:
`extension` : str
The name of the extension to test for, including its
``GLU_`` prefix.
:return: True if the extension is provided by the implementation.
:rtype: bool
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return extension in self.extensions
def get_extensions(self):
'''Get a list of available GLU extensions.
:return: a list of the available extensions.
:rtype: list of str
'''
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.extensions
# Single instance useful for apps with only a single context (or all contexts
# have same GLU driver, common case).
_glu_info = GLUInfo()
set_active_context = _glu_info.set_active_context
have_version = _glu_info.have_version
get_version = _glu_info.get_version
have_extension = _glu_info.have_extension
get_extensions = _glu_info.get_extensions
| 35.012422
| 78
| 0.659038
|
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
from ctypes import *
import warnings
from pyglet.gl.glu import *
class GLUInfo(object):
have_context = False
version = '0.0.0'
extensions = []
_have_info = False
def set_active_context(self):
self.have_context = True
if not self._have_info:
self.extensions = \
cast(gluGetString(GLU_EXTENSIONS), c_char_p).value.split()
self.version = cast(gluGetString(GLU_VERSION), c_char_p).value
self._have_info = True
def have_version(self, major, minor=0, release=0):
if not self.have_context:
warnings.warn('No GL context created yet.')
ver = '%s.0.0' % self.version.split(' ', 1)[0]
imajor, iminor, irelease = [int(v) for v in ver.split('.', 3)[:3]]
return imajor > major or \
(imajor == major and iminor > minor) or \
(imajor == major and iminor == minor and irelease >= release)
def get_version(self):
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.version
def have_extension(self, extension):
if not self.have_context:
warnings.warn('No GL context created yet.')
return extension in self.extensions
def get_extensions(self):
if not self.have_context:
warnings.warn('No GL context created yet.')
return self.extensions
_glu_info = GLUInfo()
set_active_context = _glu_info.set_active_context
have_version = _glu_info.have_version
get_version = _glu_info.get_version
have_extension = _glu_info.have_extension
get_extensions = _glu_info.get_extensions
| true
| true
|
1c4283e833a86ce40a8d18bb9afcf660fc39bd7c
| 36,879
|
py
|
Python
|
electrum/address_synchronizer.py
|
bitspill/flo-electrum-1
|
5975c105d682719d5d8aad50b80e795fc7cfbf9d
|
[
"MIT"
] | null | null | null |
electrum/address_synchronizer.py
|
bitspill/flo-electrum-1
|
5975c105d682719d5d8aad50b80e795fc7cfbf9d
|
[
"MIT"
] | 1
|
2018-09-10T18:45:13.000Z
|
2018-09-10T18:45:13.000Z
|
electrum/address_synchronizer.py
|
bitspill/flo-electrum-1
|
5975c105d682719d5d8aad50b80e795fc7cfbf9d
|
[
"MIT"
] | 1
|
2018-07-14T08:17:34.000Z
|
2018-07-14T08:17:34.000Z
|
# Electrum - lightweight Bitcoin client
# Copyright (C) 2018 The Electrum Developers
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import threading
import asyncio
import itertools
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Optional
from . import bitcoin
from .bitcoin import COINBASE_MATURITY, TYPE_ADDRESS, TYPE_PUBKEY
from .util import PrintError, profiler, bfh, TxMinedInfo
from .transaction import Transaction, TxOutput
from .synchronizer import Synchronizer
from .verifier import SPV
from .blockchain import hash_header
from .i18n import _
if TYPE_CHECKING:
from .storage import WalletStorage
from .network import Network
TX_HEIGHT_LOCAL = -2
TX_HEIGHT_UNCONF_PARENT = -1
TX_HEIGHT_UNCONFIRMED = 0
class AddTransactionException(Exception):
pass
class UnrelatedTransactionException(AddTransactionException):
def __str__(self):
return _("Transaction is unrelated to this wallet.")
class AddressSynchronizer(PrintError):
"""
inherited by wallet
"""
def __init__(self, storage: 'WalletStorage'):
self.storage = storage
self.network = None # type: Network
# verifier (SPV) and synchronizer are started in start_network
self.synchronizer = None # type: Synchronizer
self.verifier = None # type: SPV
# locks: if you need to take multiple ones, acquire them in the order they are defined here!
self.lock = threading.RLock()
self.transaction_lock = threading.RLock()
# address -> list(txid, height)
self.history = storage.get('addr_history',{})
# Verified transactions. txid -> TxMinedInfo. Access with self.lock.
verified_tx = storage.get('verified_tx3', {})
self.verified_tx = {} # type: Dict[str, TxMinedInfo]
for txid, (height, timestamp, txpos, header_hash, flodata) in verified_tx.items():
self.verified_tx[txid] = TxMinedInfo(height=height,
conf=None,
timestamp=timestamp,
txpos=txpos,
header_hash=header_hash,
flodata=flodata)
# Transactions pending verification. txid -> tx_height. Access with self.lock.
self.unverified_tx = defaultdict(int)
# true when synchronized
self.up_to_date = False
# thread local storage for caching stuff
self.threadlocal_cache = threading.local()
self.load_and_cleanup()
def with_transaction_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.transaction_lock:
return func(self, *args, **kwargs)
return func_wrapper
def load_and_cleanup(self):
self.load_transactions()
self.load_local_history()
self.check_history()
self.load_unverified_transactions()
self.remove_local_transactions_we_dont_have()
def is_mine(self, address):
return address in self.history
def get_addresses(self):
return sorted(self.history.keys())
def get_address_history(self, addr):
h = []
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
related_txns = self._history_local.get(addr, set())
for tx_hash in related_txns:
tx_height = self.get_tx_height(tx_hash).height
h.append((tx_hash, tx_height))
return h
def get_address_history_len(self, addr: str) -> int:
"""Return number of transactions where address is involved."""
return len(self._history_local.get(addr, ()))
def get_txin_address(self, txi):
addr = txi.get('address')
if addr and addr != "(pubkey)":
return addr
prevout_hash = txi.get('prevout_hash')
prevout_n = txi.get('prevout_n')
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
return addr
return None
def get_txout_address(self, txo: TxOutput):
if txo.type == TYPE_ADDRESS:
addr = txo.address
elif txo.type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(txo.address))
else:
addr = None
return addr
def load_unverified_transactions(self):
# review transactions that are in the history
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
def start_network(self, network):
self.network = network
if self.network is not None:
self.synchronizer = Synchronizer(self)
self.verifier = SPV(self.network, self)
def stop_threads(self, write_to_disk=True):
if self.network:
if self.synchronizer:
asyncio.run_coroutine_threadsafe(self.synchronizer.stop(), self.network.asyncio_loop)
self.synchronizer = None
if self.verifier:
asyncio.run_coroutine_threadsafe(self.verifier.stop(), self.network.asyncio_loop)
self.verifier = None
self.storage.put('stored_height', self.get_local_height())
if write_to_disk:
self.save_transactions()
self.save_verified_tx()
self.storage.write()
def add_address(self, address):
if address not in self.history:
self.history[address] = []
self.set_up_to_date(False)
if self.synchronizer:
self.synchronizer.add(address)
def get_conflicting_transactions(self, tx_hash, tx):
"""Returns a set of transaction hashes from the wallet history that are
directly conflicting with tx, i.e. they have common outpoints being
spent with tx. If the tx is already in wallet history, that will not be
reported as a conflict.
"""
conflicting_txns = set()
with self.transaction_lock:
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
spending_tx_hash = self.spent_outpoints[prevout_hash].get(prevout_n)
if spending_tx_hash is None:
continue
# this outpoint has already been spent, by spending_tx
assert spending_tx_hash in self.transactions
conflicting_txns |= {spending_tx_hash}
if tx_hash in conflicting_txns:
# this tx is already in history, so it conflicts with itself
if len(conflicting_txns) > 1:
raise Exception('Found conflicting transactions already in wallet history.')
conflicting_txns -= {tx_hash}
return conflicting_txns
def add_transaction(self, tx_hash, tx, allow_unrelated=False):
assert tx_hash, tx_hash
assert tx, tx
assert tx.is_complete()
# assert tx_hash == tx.txid() # disabled as expensive; test done by Synchronizer.
# we need self.transaction_lock but get_tx_height will take self.lock
# so we need to take that too here, to enforce order of locks
with self.lock, self.transaction_lock:
# NOTE: returning if tx in self.transactions might seem like a good idea
# BUT we track is_mine inputs in a txn, and during subsequent calls
# of add_transaction tx, we might learn of more-and-more inputs of
# being is_mine, as we roll the gap_limit forward
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
tx_height = self.get_tx_height(tx_hash).height
if not allow_unrelated:
# note that during sync, if the transactions are not properly sorted,
# it could happen that we think tx is unrelated but actually one of the inputs is is_mine.
# this is the main motivation for allow_unrelated
is_mine = any([self.is_mine(self.get_txin_address(txin)) for txin in tx.inputs()])
is_for_me = any([self.is_mine(self.get_txout_address(txo)) for txo in tx.outputs()])
if not is_mine and not is_for_me:
raise UnrelatedTransactionException()
# Find all conflicting transactions.
# In case of a conflict,
# 1. confirmed > mempool > local
# 2. this new txn has priority over existing ones
# When this method exits, there must NOT be any conflict, so
# either keep this txn and remove all conflicting (along with dependencies)
# or drop this txn
conflicting_txns = self.get_conflicting_transactions(tx_hash, tx)
if conflicting_txns:
existing_mempool_txn = any(
self.get_tx_height(tx_hash2).height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT)
for tx_hash2 in conflicting_txns)
existing_confirmed_txn = any(
self.get_tx_height(tx_hash2).height > 0
for tx_hash2 in conflicting_txns)
if existing_confirmed_txn and tx_height <= 0:
# this is a non-confirmed tx that conflicts with confirmed txns; drop.
return False
if existing_mempool_txn and tx_height == TX_HEIGHT_LOCAL:
# this is a local tx that conflicts with non-local txns; drop.
return False
# keep this txn and remove all conflicting
to_remove = set()
to_remove |= conflicting_txns
for conflicting_tx_hash in conflicting_txns:
to_remove |= self.get_depending_transactions(conflicting_tx_hash)
for tx_hash2 in to_remove:
self.remove_transaction(tx_hash2)
# add inputs
def add_value_from_prev_output():
dd = self.txo.get(prevout_hash, {})
# note: this nested loop takes linear time in num is_mine outputs of prev_tx
for addr, outputs in dd.items():
# note: instead of [(n, v, is_cb), ...]; we could store: {n -> (v, is_cb)}
for n, v, is_cb in outputs:
if n == prevout_n:
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = set()
d[addr].add((ser, v))
return
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d' % prevout_n
self.spent_outpoints[prevout_hash][prevout_n] = tx_hash
add_value_from_prev_output()
# add outputs
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
v = txo[2]
ser = tx_hash + ':%d'%n
addr = self.get_txout_address(txo)
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
# give v to txi that spends me
next_tx = self.spent_outpoints[tx_hash].get(n)
if next_tx is not None:
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = set()
if (ser, v) not in dd[addr]:
dd[addr].add((ser, v))
self._add_tx_to_local_history(next_tx)
# add to local history
self._add_tx_to_local_history(tx_hash)
# save
self.transactions[tx_hash] = tx
return True
def remove_transaction(self, tx_hash):
def remove_from_spent_outpoints():
# undo spends in spent_outpoints
if tx is not None: # if we have the tx, this branch is faster
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
else: # expensive but always works
for prevout_hash, d in list(self.spent_outpoints.items()):
for prevout_n, spending_txid in d.items():
if spending_txid == tx_hash:
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
# Remove this tx itself; if nothing spends from it.
# It is not so clear what to do if other txns spend from it, but it will be
# removed when those other txns are removed.
if not self.spent_outpoints[tx_hash]:
self.spent_outpoints.pop(tx_hash)
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
tx = self.transactions.pop(tx_hash, None)
remove_from_spent_outpoints()
self._remove_tx_from_local_history(tx_hash)
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
def get_depending_transactions(self, tx_hash):
"""Returns all (grand-)children of tx_hash in this wallet."""
children = set()
for other_hash in self.spent_outpoints[tx_hash].values():
children.add(other_hash)
children |= self.get_depending_transactions(other_hash)
return children
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_unverified_tx(tx_hash, tx_height)
self.add_transaction(tx_hash, tx, allow_unrelated=True)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
# make tx local
self.unverified_tx.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
# add it in case it was previously unconfirmed
self.add_unverified_tx(tx_hash, tx_height)
# if addr is new, we have to recompute txi and txo
tx = self.transactions.get(tx_hash)
if tx is None:
continue
self.add_transaction(tx_hash, tx, allow_unrelated=True)
# Store fees
self.tx_fees.update(tx_fees)
@profiler
def load_transactions(self):
# load txi, txo, tx_fees
# bookkeeping data of is_mine inputs of transactions
self.txi = self.storage.get('txi', {}) # txid -> address -> (prev_outpoint, value)
for txid, d in list(self.txi.items()):
for addr, lst in d.items():
self.txi[txid][addr] = set([tuple(x) for x in lst])
# bookkeeping data of is_mine outputs of transactions
self.txo = self.storage.get('txo', {}) # txid -> address -> (output_index, value, is_coinbase)
self.tx_fees = self.storage.get('tx_fees', {})
tx_list = self.storage.get('transactions', {})
# load transactions
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None:
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
# load spent_outpoints
_spent_outpoints = self.storage.get('spent_outpoints', {})
self.spent_outpoints = defaultdict(dict)
for prevout_hash, d in _spent_outpoints.items():
for prevout_n_str, spending_txid in d.items():
prevout_n = int(prevout_n_str)
if spending_txid not in self.transactions:
continue # only care about txns we have
self.spent_outpoints[prevout_hash][prevout_n] = spending_txid
@profiler
def load_local_history(self):
self._history_local = {} # address -> set(txid)
self._address_history_changed_events = defaultdict(asyncio.Event) # address -> Event
for txid in itertools.chain(self.txi, self.txo):
self._add_tx_to_local_history(txid)
@profiler
def check_history(self):
save = False
hist_addrs_mine = list(filter(lambda k: self.is_mine(k), self.history.keys()))
hist_addrs_not_mine = list(filter(lambda k: not self.is_mine(k), self.history.keys()))
for addr in hist_addrs_not_mine:
self.history.pop(addr)
save = True
for addr in hist_addrs_mine:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx, allow_unrelated=True)
save = True
if save:
self.save_transactions()
def remove_local_transactions_we_dont_have(self):
txid_set = set(self.txi) | set(self.txo)
for txid in txid_set:
tx_height = self.get_tx_height(txid).height
if tx_height == TX_HEIGHT_LOCAL and txid not in self.transactions:
self.remove_transaction(txid)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('addr_history', self.history)
self.storage.put('spent_outpoints', self.spent_outpoints)
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
verified_tx_to_save = {}
for txid, tx_info in self.verified_tx.items():
verified_tx_to_save[txid] = (tx_info.height, tx_info.timestamp,
tx_info.txpos, tx_info.header_hash, tx_info.flodata)
self.storage.put('verified_tx3', verified_tx_to_save)
if write:
self.storage.write()
def clear_history(self):
with self.lock:
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.spent_outpoints = defaultdict(dict)
self.history = {}
self.verified_tx = {}
self.transactions = {} # type: Dict[str, Transaction]
self.save_transactions()
def get_txpos(self, tx_hash):
"""Returns (height, txpos) tuple, even if the tx is unverified."""
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
return info.height, info.txpos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def with_local_height_cached(func):
# get local height only once, as it's relatively expensive.
# take care that nested calls work as expected
def f(self, *args, **kwargs):
orig_val = getattr(self.threadlocal_cache, 'local_height', None)
self.threadlocal_cache.local_height = orig_val or self.get_local_height()
try:
return func(self, *args, **kwargs)
finally:
self.threadlocal_cache.local_height = orig_val
return f
@with_local_height_cached
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.history.keys()
domain = set(domain)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
tx_mined_status = self.get_tx_height(tx_hash)
history.append((tx_hash, tx_mined_status, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, tx_mined_status, delta in history:
h2.append((tx_hash, tx_mined_status, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def _add_tx_to_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
cur_hist.add(txid)
self._history_local[addr] = cur_hist
self._mark_address_history_changed(addr)
def _remove_tx_from_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
try:
cur_hist.remove(txid)
except KeyError:
pass
else:
self._history_local[addr] = cur_hist
def _mark_address_history_changed(self, addr: str) -> None:
# history for this address changed, wake up coroutines:
self._address_history_changed_events[addr].set()
# clear event immediately so that coroutines can wait() for the next change:
self._address_history_changed_events[addr].clear()
async def wait_for_address_history_to_change(self, addr: str) -> None:
"""Wait until the server tells us about a new transaction related to addr.
Unconfirmed and confirmed transactions are not distinguished, and so e.g. SPV
is not taken into account.
"""
assert self.is_mine(addr), "address needs to be is_mine to be watched"
await self._address_history_changed_events[addr].wait()
def add_unverified_tx(self, tx_hash, tx_height):
if tx_hash in self.verified_tx:
if tx_height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT):
with self.lock:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
else:
with self.lock:
# tx will be verified only if height > 0
self.unverified_tx[tx_hash] = tx_height
def remove_unverified_tx(self, tx_hash, tx_height):
with self.lock:
new_height = self.unverified_tx.get(tx_hash)
if new_height == tx_height:
self.unverified_tx.pop(tx_hash, None)
def add_verified_tx(self, tx_hash: str, info: TxMinedInfo):
# Remove from the unverified map and add to the verified map
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info
tx_mined_status = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', self, tx_hash, tx_mined_status)
def get_unverified_txs(self):
'''Returns a map from tx hash to transaction height'''
with self.lock:
return dict(self.unverified_tx) # copy
def undo_verifications(self, blockchain, height):
'''Used by the verifier when a reorg has happened'''
txs = set()
with self.lock:
for tx_hash, info in list(self.verified_tx.items()):
tx_height = info.height
if tx_height >= height:
header = blockchain.read_header(tx_height)
if not header or hash_header(header) != info.header_hash:
self.verified_tx.pop(tx_hash, None)
# NOTE: we should add these txns to self.unverified_tx,
# but with what height?
# If on the new fork after the reorg, the txn is at the
# same height, we will not get a status update for the
# address. If the txn is not mined or at a diff height,
# we should get a status update. Unless we put tx into
# unverified_tx, it will turn into local. So we put it
# into unverified_tx with the old height, and if we get
# a status update, that will overwrite it.
self.unverified_tx[tx_hash] = tx_height
txs.add(tx_hash)
return txs
def get_local_height(self):
""" return last known height if we are offline """
cached_local_height = getattr(self.threadlocal_cache, 'local_height', None)
if cached_local_height is not None:
return cached_local_height
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash: str) -> TxMinedInfo:
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - info.height + 1, 0)
return info._replace(conf=conf)
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return TxMinedInfo(height=height, conf=0)
else:
# local transaction
return TxMinedInfo(height=TX_HEIGHT_LOCAL, conf=0)
def get_flodata(self, tx_hash: str):
""" Given a transaction, returns flodata """
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
flodata = info[5]
return flodata
elif tx_hash in self.unverified_tx:
tx = self.transactions.get(tx_hash)
flodata = tx.flodata[5:]
return flodata
else:
# local transaction
tx = self.transactions.get(tx_hash)
flodata = tx.flodata[5:]
return flodata
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if self.network:
self.network.notify('status')
if up_to_date:
self.save_transactions(write=True)
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
@with_transaction_lock
def get_tx_delta(self, tx_hash, address):
"""effect of tx on address"""
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
@with_transaction_lock
def get_tx_value(self, txid):
"""effect of tx on the entire domain"""
delta = 0
for addr, d in self.txi.get(txid, {}).items():
for n, v in d:
delta -= v
for addr, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx: Transaction):
""" effect of tx on wallet """
is_relevant = False # "related to wallet?"
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for txin in tx.inputs():
addr = self.get_txin_address(txin)
if self.is_mine(addr):
is_mine = True
is_relevant = True
d = self.txo.get(txin['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == txin['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for o in tx.outputs():
v_out += o.value
if self.is_mine(o.address):
v_out_mine += o.value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_fee(self, tx: Transaction) -> Optional[int]:
if not tx:
return None
if hasattr(tx, '_cached_fee'):
return tx._cached_fee
with self.lock, self.transaction_lock:
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
if fee is None:
txid = tx.txid()
fee = self.tx_fees.get(txid)
# only cache non-None, as None can still change while syncing
if fee is not None:
tx._cached_fee = fee
return fee
def get_addr_io(self, address):
with self.lock, self.transaction_lock:
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
@with_local_height_cached
def get_addr_balance(self, address):
"""Return the balance of a FLO address:
confirmed and matured, unconfirmed, unmatured
"""
received, sent = self.get_addr_io(address)
c = u = x = 0
local_height = self.get_local_height()
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > local_height:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
@with_local_height_cached
def get_utxos(self, domain=None, excluded=None, mature=False, confirmed_only=False, nonlocal_only=False):
coins = []
if domain is None:
domain = self.get_addresses()
domain = set(domain)
if excluded:
domain = set(domain) - excluded
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos.values():
if confirmed_only and x['height'] <= 0:
continue
if nonlocal_only and x['height'] == TX_HEIGHT_LOCAL:
continue
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
domain = set(domain)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def is_used(self, address):
h = self.history.get(address,[])
return len(h) != 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def synchronize(self):
pass
| 42.003417
| 109
| 0.569809
|
import threading
import asyncio
import itertools
from collections import defaultdict
from typing import TYPE_CHECKING, Dict, Optional
from . import bitcoin
from .bitcoin import COINBASE_MATURITY, TYPE_ADDRESS, TYPE_PUBKEY
from .util import PrintError, profiler, bfh, TxMinedInfo
from .transaction import Transaction, TxOutput
from .synchronizer import Synchronizer
from .verifier import SPV
from .blockchain import hash_header
from .i18n import _
if TYPE_CHECKING:
from .storage import WalletStorage
from .network import Network
TX_HEIGHT_LOCAL = -2
TX_HEIGHT_UNCONF_PARENT = -1
TX_HEIGHT_UNCONFIRMED = 0
class AddTransactionException(Exception):
pass
class UnrelatedTransactionException(AddTransactionException):
def __str__(self):
return _("Transaction is unrelated to this wallet.")
class AddressSynchronizer(PrintError):
def __init__(self, storage: 'WalletStorage'):
self.storage = storage
self.network = None
self.synchronizer = None
self.verifier = None
self.lock = threading.RLock()
self.transaction_lock = threading.RLock()
self.history = storage.get('addr_history',{})
verified_tx = storage.get('verified_tx3', {})
self.verified_tx = {}
for txid, (height, timestamp, txpos, header_hash, flodata) in verified_tx.items():
self.verified_tx[txid] = TxMinedInfo(height=height,
conf=None,
timestamp=timestamp,
txpos=txpos,
header_hash=header_hash,
flodata=flodata)
self.unverified_tx = defaultdict(int)
self.up_to_date = False
self.threadlocal_cache = threading.local()
self.load_and_cleanup()
def with_transaction_lock(func):
def func_wrapper(self, *args, **kwargs):
with self.transaction_lock:
return func(self, *args, **kwargs)
return func_wrapper
def load_and_cleanup(self):
self.load_transactions()
self.load_local_history()
self.check_history()
self.load_unverified_transactions()
self.remove_local_transactions_we_dont_have()
def is_mine(self, address):
return address in self.history
def get_addresses(self):
return sorted(self.history.keys())
def get_address_history(self, addr):
h = []
with self.lock, self.transaction_lock:
related_txns = self._history_local.get(addr, set())
for tx_hash in related_txns:
tx_height = self.get_tx_height(tx_hash).height
h.append((tx_hash, tx_height))
return h
def get_address_history_len(self, addr: str) -> int:
return len(self._history_local.get(addr, ()))
def get_txin_address(self, txi):
addr = txi.get('address')
if addr and addr != "(pubkey)":
return addr
prevout_hash = txi.get('prevout_hash')
prevout_n = txi.get('prevout_n')
dd = self.txo.get(prevout_hash, {})
for addr, l in dd.items():
for n, v, is_cb in l:
if n == prevout_n:
return addr
return None
def get_txout_address(self, txo: TxOutput):
if txo.type == TYPE_ADDRESS:
addr = txo.address
elif txo.type == TYPE_PUBKEY:
addr = bitcoin.public_key_to_p2pkh(bfh(txo.address))
else:
addr = None
return addr
def load_unverified_transactions(self):
for addr, hist in self.history.items():
for tx_hash, tx_height in hist:
self.add_unverified_tx(tx_hash, tx_height)
def start_network(self, network):
self.network = network
if self.network is not None:
self.synchronizer = Synchronizer(self)
self.verifier = SPV(self.network, self)
def stop_threads(self, write_to_disk=True):
if self.network:
if self.synchronizer:
asyncio.run_coroutine_threadsafe(self.synchronizer.stop(), self.network.asyncio_loop)
self.synchronizer = None
if self.verifier:
asyncio.run_coroutine_threadsafe(self.verifier.stop(), self.network.asyncio_loop)
self.verifier = None
self.storage.put('stored_height', self.get_local_height())
if write_to_disk:
self.save_transactions()
self.save_verified_tx()
self.storage.write()
def add_address(self, address):
if address not in self.history:
self.history[address] = []
self.set_up_to_date(False)
if self.synchronizer:
self.synchronizer.add(address)
def get_conflicting_transactions(self, tx_hash, tx):
conflicting_txns = set()
with self.transaction_lock:
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
spending_tx_hash = self.spent_outpoints[prevout_hash].get(prevout_n)
if spending_tx_hash is None:
continue
assert spending_tx_hash in self.transactions
conflicting_txns |= {spending_tx_hash}
if tx_hash in conflicting_txns:
if len(conflicting_txns) > 1:
raise Exception('Found conflicting transactions already in wallet history.')
conflicting_txns -= {tx_hash}
return conflicting_txns
def add_transaction(self, tx_hash, tx, allow_unrelated=False):
assert tx_hash, tx_hash
assert tx, tx
assert tx.is_complete()
nsaction_lock:
is_coinbase = tx.inputs()[0]['type'] == 'coinbase'
tx_height = self.get_tx_height(tx_hash).height
if not allow_unrelated:
is_mine = any([self.is_mine(self.get_txin_address(txin)) for txin in tx.inputs()])
is_for_me = any([self.is_mine(self.get_txout_address(txo)) for txo in tx.outputs()])
if not is_mine and not is_for_me:
raise UnrelatedTransactionException()
conflicting_txns = self.get_conflicting_transactions(tx_hash, tx)
if conflicting_txns:
existing_mempool_txn = any(
self.get_tx_height(tx_hash2).height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT)
for tx_hash2 in conflicting_txns)
existing_confirmed_txn = any(
self.get_tx_height(tx_hash2).height > 0
for tx_hash2 in conflicting_txns)
if existing_confirmed_txn and tx_height <= 0:
return False
if existing_mempool_txn and tx_height == TX_HEIGHT_LOCAL:
return False
to_remove = set()
to_remove |= conflicting_txns
for conflicting_tx_hash in conflicting_txns:
to_remove |= self.get_depending_transactions(conflicting_tx_hash)
for tx_hash2 in to_remove:
self.remove_transaction(tx_hash2)
def add_value_from_prev_output():
dd = self.txo.get(prevout_hash, {})
for addr, outputs in dd.items():
for n, v, is_cb in outputs:
if n == prevout_n:
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = set()
d[addr].add((ser, v))
return
self.txi[tx_hash] = d = {}
for txi in tx.inputs():
if txi['type'] == 'coinbase':
continue
prevout_hash = txi['prevout_hash']
prevout_n = txi['prevout_n']
ser = prevout_hash + ':%d' % prevout_n
self.spent_outpoints[prevout_hash][prevout_n] = tx_hash
add_value_from_prev_output()
self.txo[tx_hash] = d = {}
for n, txo in enumerate(tx.outputs()):
v = txo[2]
ser = tx_hash + ':%d'%n
addr = self.get_txout_address(txo)
if addr and self.is_mine(addr):
if d.get(addr) is None:
d[addr] = []
d[addr].append((n, v, is_coinbase))
next_tx = self.spent_outpoints[tx_hash].get(n)
if next_tx is not None:
dd = self.txi.get(next_tx, {})
if dd.get(addr) is None:
dd[addr] = set()
if (ser, v) not in dd[addr]:
dd[addr].add((ser, v))
self._add_tx_to_local_history(next_tx)
self._add_tx_to_local_history(tx_hash)
self.transactions[tx_hash] = tx
return True
def remove_transaction(self, tx_hash):
def remove_from_spent_outpoints():
if tx is not None:
for txin in tx.inputs():
if txin['type'] == 'coinbase':
continue
prevout_hash = txin['prevout_hash']
prevout_n = txin['prevout_n']
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
else:
for prevout_hash, d in list(self.spent_outpoints.items()):
for prevout_n, spending_txid in d.items():
if spending_txid == tx_hash:
self.spent_outpoints[prevout_hash].pop(prevout_n, None)
if not self.spent_outpoints[prevout_hash]:
self.spent_outpoints.pop(prevout_hash)
if not self.spent_outpoints[tx_hash]:
self.spent_outpoints.pop(tx_hash)
with self.transaction_lock:
self.print_error("removing tx from history", tx_hash)
tx = self.transactions.pop(tx_hash, None)
remove_from_spent_outpoints()
self._remove_tx_from_local_history(tx_hash)
self.txi.pop(tx_hash, None)
self.txo.pop(tx_hash, None)
def get_depending_transactions(self, tx_hash):
children = set()
for other_hash in self.spent_outpoints[tx_hash].values():
children.add(other_hash)
children |= self.get_depending_transactions(other_hash)
return children
def receive_tx_callback(self, tx_hash, tx, tx_height):
self.add_unverified_tx(tx_hash, tx_height)
self.add_transaction(tx_hash, tx, allow_unrelated=True)
def receive_history_callback(self, addr, hist, tx_fees):
with self.lock:
old_hist = self.get_address_history(addr)
for tx_hash, height in old_hist:
if (tx_hash, height) not in hist:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx.pop(tx_hash, None)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
self.history[addr] = hist
for tx_hash, tx_height in hist:
self.add_unverified_tx(tx_hash, tx_height)
tx = self.transactions.get(tx_hash)
if tx is None:
continue
self.add_transaction(tx_hash, tx, allow_unrelated=True)
self.tx_fees.update(tx_fees)
@profiler
def load_transactions(self):
self.txi = self.storage.get('txi', {})
for txid, d in list(self.txi.items()):
for addr, lst in d.items():
self.txi[txid][addr] = set([tuple(x) for x in lst])
self.txo = self.storage.get('txo', {})
self.tx_fees = self.storage.get('tx_fees', {})
tx_list = self.storage.get('transactions', {})
self.transactions = {}
for tx_hash, raw in tx_list.items():
tx = Transaction(raw)
self.transactions[tx_hash] = tx
if self.txi.get(tx_hash) is None and self.txo.get(tx_hash) is None:
self.print_error("removing unreferenced tx", tx_hash)
self.transactions.pop(tx_hash)
_spent_outpoints = self.storage.get('spent_outpoints', {})
self.spent_outpoints = defaultdict(dict)
for prevout_hash, d in _spent_outpoints.items():
for prevout_n_str, spending_txid in d.items():
prevout_n = int(prevout_n_str)
if spending_txid not in self.transactions:
continue
self.spent_outpoints[prevout_hash][prevout_n] = spending_txid
@profiler
def load_local_history(self):
self._history_local = {}
self._address_history_changed_events = defaultdict(asyncio.Event)
for txid in itertools.chain(self.txi, self.txo):
self._add_tx_to_local_history(txid)
@profiler
def check_history(self):
save = False
hist_addrs_mine = list(filter(lambda k: self.is_mine(k), self.history.keys()))
hist_addrs_not_mine = list(filter(lambda k: not self.is_mine(k), self.history.keys()))
for addr in hist_addrs_not_mine:
self.history.pop(addr)
save = True
for addr in hist_addrs_mine:
hist = self.history[addr]
for tx_hash, tx_height in hist:
if self.txi.get(tx_hash) or self.txo.get(tx_hash):
continue
tx = self.transactions.get(tx_hash)
if tx is not None:
self.add_transaction(tx_hash, tx, allow_unrelated=True)
save = True
if save:
self.save_transactions()
def remove_local_transactions_we_dont_have(self):
txid_set = set(self.txi) | set(self.txo)
for txid in txid_set:
tx_height = self.get_tx_height(txid).height
if tx_height == TX_HEIGHT_LOCAL and txid not in self.transactions:
self.remove_transaction(txid)
@profiler
def save_transactions(self, write=False):
with self.transaction_lock:
tx = {}
for k,v in self.transactions.items():
tx[k] = str(v)
self.storage.put('transactions', tx)
self.storage.put('txi', self.txi)
self.storage.put('txo', self.txo)
self.storage.put('tx_fees', self.tx_fees)
self.storage.put('addr_history', self.history)
self.storage.put('spent_outpoints', self.spent_outpoints)
if write:
self.storage.write()
def save_verified_tx(self, write=False):
with self.lock:
verified_tx_to_save = {}
for txid, tx_info in self.verified_tx.items():
verified_tx_to_save[txid] = (tx_info.height, tx_info.timestamp,
tx_info.txpos, tx_info.header_hash, tx_info.flodata)
self.storage.put('verified_tx3', verified_tx_to_save)
if write:
self.storage.write()
def clear_history(self):
with self.lock:
with self.transaction_lock:
self.txi = {}
self.txo = {}
self.tx_fees = {}
self.spent_outpoints = defaultdict(dict)
self.history = {}
self.verified_tx = {}
self.transactions = {}
self.save_transactions()
def get_txpos(self, tx_hash):
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
return info.height, info.txpos
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return (height, 0) if height > 0 else ((1e9 - height), 0)
else:
return (1e9+1, 0)
def with_local_height_cached(func):
# take care that nested calls work as expected
def f(self, *args, **kwargs):
orig_val = getattr(self.threadlocal_cache, 'local_height', None)
self.threadlocal_cache.local_height = orig_val or self.get_local_height()
try:
return func(self, *args, **kwargs)
finally:
self.threadlocal_cache.local_height = orig_val
return f
@with_local_height_cached
def get_history(self, domain=None):
# get domain
if domain is None:
domain = self.history.keys()
domain = set(domain)
# 1. Get the history of each address in the domain, maintain the
# delta of a tx as the sum of its deltas on domain addresses
tx_deltas = defaultdict(int)
for addr in domain:
h = self.get_address_history(addr)
for tx_hash, height in h:
delta = self.get_tx_delta(tx_hash, addr)
if delta is None or tx_deltas[tx_hash] is None:
tx_deltas[tx_hash] = None
else:
tx_deltas[tx_hash] += delta
# 2. create sorted history
history = []
for tx_hash in tx_deltas:
delta = tx_deltas[tx_hash]
tx_mined_status = self.get_tx_height(tx_hash)
history.append((tx_hash, tx_mined_status, delta))
history.sort(key = lambda x: self.get_txpos(x[0]), reverse=True)
# 3. add balance
c, u, x = self.get_balance(domain)
balance = c + u + x
h2 = []
for tx_hash, tx_mined_status, delta in history:
h2.append((tx_hash, tx_mined_status, delta, balance))
if balance is None or delta is None:
balance = None
else:
balance -= delta
h2.reverse()
# fixme: this may happen if history is incomplete
if balance not in [None, 0]:
self.print_error("Error: history not synchronized")
return []
return h2
def _add_tx_to_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
cur_hist.add(txid)
self._history_local[addr] = cur_hist
self._mark_address_history_changed(addr)
def _remove_tx_from_local_history(self, txid):
with self.transaction_lock:
for addr in itertools.chain(self.txi.get(txid, []), self.txo.get(txid, [])):
cur_hist = self._history_local.get(addr, set())
try:
cur_hist.remove(txid)
except KeyError:
pass
else:
self._history_local[addr] = cur_hist
def _mark_address_history_changed(self, addr: str) -> None:
# history for this address changed, wake up coroutines:
self._address_history_changed_events[addr].set()
# clear event immediately so that coroutines can wait() for the next change:
self._address_history_changed_events[addr].clear()
async def wait_for_address_history_to_change(self, addr: str) -> None:
assert self.is_mine(addr), "address needs to be is_mine to be watched"
await self._address_history_changed_events[addr].wait()
def add_unverified_tx(self, tx_hash, tx_height):
if tx_hash in self.verified_tx:
if tx_height in (TX_HEIGHT_UNCONFIRMED, TX_HEIGHT_UNCONF_PARENT):
with self.lock:
self.verified_tx.pop(tx_hash)
if self.verifier:
self.verifier.remove_spv_proof_for_tx(tx_hash)
else:
with self.lock:
# tx will be verified only if height > 0
self.unverified_tx[tx_hash] = tx_height
def remove_unverified_tx(self, tx_hash, tx_height):
with self.lock:
new_height = self.unverified_tx.get(tx_hash)
if new_height == tx_height:
self.unverified_tx.pop(tx_hash, None)
def add_verified_tx(self, tx_hash: str, info: TxMinedInfo):
# Remove from the unverified map and add to the verified map
with self.lock:
self.unverified_tx.pop(tx_hash, None)
self.verified_tx[tx_hash] = info
tx_mined_status = self.get_tx_height(tx_hash)
self.network.trigger_callback('verified', self, tx_hash, tx_mined_status)
def get_unverified_txs(self):
with self.lock:
return dict(self.unverified_tx) # copy
def undo_verifications(self, blockchain, height):
txs = set()
with self.lock:
for tx_hash, info in list(self.verified_tx.items()):
tx_height = info.height
if tx_height >= height:
header = blockchain.read_header(tx_height)
if not header or hash_header(header) != info.header_hash:
self.verified_tx.pop(tx_hash, None)
# NOTE: we should add these txns to self.unverified_tx,
# but with what height?
# If on the new fork after the reorg, the txn is at the
# same height, we will not get a status update for the
# address. If the txn is not mined or at a diff height,
# we should get a status update. Unless we put tx into
# unverified_tx, it will turn into local. So we put it
# into unverified_tx with the old height, and if we get
# a status update, that will overwrite it.
self.unverified_tx[tx_hash] = tx_height
txs.add(tx_hash)
return txs
def get_local_height(self):
cached_local_height = getattr(self.threadlocal_cache, 'local_height', None)
if cached_local_height is not None:
return cached_local_height
return self.network.get_local_height() if self.network else self.storage.get('stored_height', 0)
def get_tx_height(self, tx_hash: str) -> TxMinedInfo:
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
conf = max(self.get_local_height() - info.height + 1, 0)
return info._replace(conf=conf)
elif tx_hash in self.unverified_tx:
height = self.unverified_tx[tx_hash]
return TxMinedInfo(height=height, conf=0)
else:
# local transaction
return TxMinedInfo(height=TX_HEIGHT_LOCAL, conf=0)
def get_flodata(self, tx_hash: str):
with self.lock:
if tx_hash in self.verified_tx:
info = self.verified_tx[tx_hash]
flodata = info[5]
return flodata
elif tx_hash in self.unverified_tx:
tx = self.transactions.get(tx_hash)
flodata = tx.flodata[5:]
return flodata
else:
# local transaction
tx = self.transactions.get(tx_hash)
flodata = tx.flodata[5:]
return flodata
def set_up_to_date(self, up_to_date):
with self.lock:
self.up_to_date = up_to_date
if self.network:
self.network.notify('status')
if up_to_date:
self.save_transactions(write=True)
# if the verifier is also up to date, persist that too;
# otherwise it will persist its results when it finishes
if self.verifier and self.verifier.is_up_to_date():
self.save_verified_tx(write=True)
def is_up_to_date(self):
with self.lock: return self.up_to_date
@with_transaction_lock
def get_tx_delta(self, tx_hash, address):
delta = 0
# substract the value of coins sent from address
d = self.txi.get(tx_hash, {}).get(address, [])
for n, v in d:
delta -= v
# add the value of the coins received at address
d = self.txo.get(tx_hash, {}).get(address, [])
for n, v, cb in d:
delta += v
return delta
@with_transaction_lock
def get_tx_value(self, txid):
delta = 0
for addr, d in self.txi.get(txid, {}).items():
for n, v in d:
delta -= v
for addr, d in self.txo.get(txid, {}).items():
for n, v, cb in d:
delta += v
return delta
def get_wallet_delta(self, tx: Transaction):
is_relevant = False # "related to wallet?"
is_mine = False
is_pruned = False
is_partial = False
v_in = v_out = v_out_mine = 0
for txin in tx.inputs():
addr = self.get_txin_address(txin)
if self.is_mine(addr):
is_mine = True
is_relevant = True
d = self.txo.get(txin['prevout_hash'], {}).get(addr, [])
for n, v, cb in d:
if n == txin['prevout_n']:
value = v
break
else:
value = None
if value is None:
is_pruned = True
else:
v_in += value
else:
is_partial = True
if not is_mine:
is_partial = False
for o in tx.outputs():
v_out += o.value
if self.is_mine(o.address):
v_out_mine += o.value
is_relevant = True
if is_pruned:
# some inputs are mine:
fee = None
if is_mine:
v = v_out_mine - v_out
else:
# no input is mine
v = v_out_mine
else:
v = v_out_mine - v_in
if is_partial:
# some inputs are mine, but not all
fee = None
else:
# all inputs are mine
fee = v_in - v_out
if not is_mine:
fee = None
return is_relevant, is_mine, v, fee
def get_tx_fee(self, tx: Transaction) -> Optional[int]:
if not tx:
return None
if hasattr(tx, '_cached_fee'):
return tx._cached_fee
with self.lock, self.transaction_lock:
is_relevant, is_mine, v, fee = self.get_wallet_delta(tx)
if fee is None:
txid = tx.txid()
fee = self.tx_fees.get(txid)
# only cache non-None, as None can still change while syncing
if fee is not None:
tx._cached_fee = fee
return fee
def get_addr_io(self, address):
with self.lock, self.transaction_lock:
h = self.get_address_history(address)
received = {}
sent = {}
for tx_hash, height in h:
l = self.txo.get(tx_hash, {}).get(address, [])
for n, v, is_cb in l:
received[tx_hash + ':%d'%n] = (height, v, is_cb)
for tx_hash, height in h:
l = self.txi.get(tx_hash, {}).get(address, [])
for txi, v in l:
sent[txi] = height
return received, sent
def get_addr_utxo(self, address):
coins, spent = self.get_addr_io(address)
for txi in spent:
coins.pop(txi)
out = {}
for txo, v in coins.items():
tx_height, value, is_cb = v
prevout_hash, prevout_n = txo.split(':')
x = {
'address':address,
'value':value,
'prevout_n':int(prevout_n),
'prevout_hash':prevout_hash,
'height':tx_height,
'coinbase':is_cb
}
out[txo] = x
return out
# return the total amount ever received by an address
def get_addr_received(self, address):
received, sent = self.get_addr_io(address)
return sum([v for height, v, is_cb in received.values()])
@with_local_height_cached
def get_addr_balance(self, address):
received, sent = self.get_addr_io(address)
c = u = x = 0
local_height = self.get_local_height()
for txo, (tx_height, v, is_cb) in received.items():
if is_cb and tx_height + COINBASE_MATURITY > local_height:
x += v
elif tx_height > 0:
c += v
else:
u += v
if txo in sent:
if sent[txo] > 0:
c -= v
else:
u -= v
return c, u, x
@with_local_height_cached
def get_utxos(self, domain=None, excluded=None, mature=False, confirmed_only=False, nonlocal_only=False):
coins = []
if domain is None:
domain = self.get_addresses()
domain = set(domain)
if excluded:
domain = set(domain) - excluded
for addr in domain:
utxos = self.get_addr_utxo(addr)
for x in utxos.values():
if confirmed_only and x['height'] <= 0:
continue
if nonlocal_only and x['height'] == TX_HEIGHT_LOCAL:
continue
if mature and x['coinbase'] and x['height'] + COINBASE_MATURITY > self.get_local_height():
continue
coins.append(x)
continue
return coins
def get_balance(self, domain=None):
if domain is None:
domain = self.get_addresses()
domain = set(domain)
cc = uu = xx = 0
for addr in domain:
c, u, x = self.get_addr_balance(addr)
cc += c
uu += u
xx += x
return cc, uu, xx
def is_used(self, address):
h = self.history.get(address,[])
return len(h) != 0
def is_empty(self, address):
c, u, x = self.get_addr_balance(address)
return c+u+x == 0
def synchronize(self):
pass
| true
| true
|
1c4283ea17546442c7e578c2a51d2bd4b6ed76e0
| 2,080
|
bzl
|
Python
|
rules/autogen.bzl
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | 1
|
2021-11-29T14:56:50.000Z
|
2021-11-29T14:56:50.000Z
|
rules/autogen.bzl
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | null | null | null |
rules/autogen.bzl
|
GregAC/opentitan
|
40b607b776d7b10cfc2899cc0d724d00dc0c91a2
|
[
"Apache-2.0"
] | null | null | null |
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""Autogeneration rules for OpenTitan.
The rules in this file are for autogenerating various file resources
used by the OpenTitan build, such as register definition files generated
from hjson register descriptions.
"""
def _hjson_header(ctx):
header = ctx.actions.declare_file("{}.h".format(ctx.label.name))
ctx.actions.run(
outputs = [header],
inputs = ctx.files.srcs + ctx.files._tool,
arguments = [
"-D",
"-o",
header.path,
] + [src.path for src in ctx.files.srcs],
executable = ctx.files._tool[0],
)
return [
CcInfo(compilation_context = cc_common.create_compilation_context(
includes = depset([header.dirname]),
headers = depset([header]),
)),
DefaultInfo(files = depset([header])),
]
autogen_hjson_header = rule(
implementation = _hjson_header,
attrs = {
"srcs": attr.label_list(allow_files = True),
"_tool": attr.label(default = "//util:regtool.py", allow_files = True),
},
)
def _chip_info(ctx):
header = ctx.actions.declare_file("chip_info.h")
ctx.actions.run(
outputs = [header],
inputs = ctx.files.version + ctx.files._tool,
arguments = [
"-o",
header.dirname,
"--ot_version_file",
ctx.files.version[0].path,
],
executable = ctx.files._tool[0],
)
return [
CcInfo(compilation_context = cc_common.create_compilation_context(
includes = depset([header.dirname]),
headers = depset([header]),
)),
DefaultInfo(files = depset([header])),
]
autogen_chip_info = rule(
implementation = _chip_info,
attrs = {
"version": attr.label(default = "//util:ot_version_file", allow_files = True),
"_tool": attr.label(default = "//util:rom_chip_info.py", allow_files = True),
},
)
| 30.588235
| 86
| 0.608173
|
def _hjson_header(ctx):
header = ctx.actions.declare_file("{}.h".format(ctx.label.name))
ctx.actions.run(
outputs = [header],
inputs = ctx.files.srcs + ctx.files._tool,
arguments = [
"-D",
"-o",
header.path,
] + [src.path for src in ctx.files.srcs],
executable = ctx.files._tool[0],
)
return [
CcInfo(compilation_context = cc_common.create_compilation_context(
includes = depset([header.dirname]),
headers = depset([header]),
)),
DefaultInfo(files = depset([header])),
]
autogen_hjson_header = rule(
implementation = _hjson_header,
attrs = {
"srcs": attr.label_list(allow_files = True),
"_tool": attr.label(default = "//util:regtool.py", allow_files = True),
},
)
def _chip_info(ctx):
header = ctx.actions.declare_file("chip_info.h")
ctx.actions.run(
outputs = [header],
inputs = ctx.files.version + ctx.files._tool,
arguments = [
"-o",
header.dirname,
"--ot_version_file",
ctx.files.version[0].path,
],
executable = ctx.files._tool[0],
)
return [
CcInfo(compilation_context = cc_common.create_compilation_context(
includes = depset([header.dirname]),
headers = depset([header]),
)),
DefaultInfo(files = depset([header])),
]
autogen_chip_info = rule(
implementation = _chip_info,
attrs = {
"version": attr.label(default = "//util:ot_version_file", allow_files = True),
"_tool": attr.label(default = "//util:rom_chip_info.py", allow_files = True),
},
)
| true
| true
|
1c4285d039fa87dc0c7b7b34e87c167d6c330c37
| 39,015
|
py
|
Python
|
src/sage/rings/fraction_field.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 1
|
2020-08-30T04:27:27.000Z
|
2020-08-30T04:27:27.000Z
|
src/sage/rings/fraction_field.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | null | null | null |
src/sage/rings/fraction_field.py
|
fchapoton/sage
|
765c5cb3e24dd134708eca97e4c52e0221cd94ba
|
[
"BSL-1.0"
] | 1
|
2020-07-23T10:40:14.000Z
|
2020-07-23T10:40:14.000Z
|
# -*- coding: utf-8 -*-
r"""
Fraction Field of Integral Domains
AUTHORS:
- William Stein (with input from David Joyner, David Kohel, and Joe
Wetherell)
- Burcin Erocal
- Julian Rüth (2017-06-27): embedding into the field of fractions and its
section
EXAMPLES:
Quotienting is a constructor for an element of the fraction field::
sage: R.<x> = QQ[]
sage: (x^2-1)/(x+1)
x - 1
sage: parent((x^2-1)/(x+1))
Fraction Field of Univariate Polynomial Ring in x over Rational Field
The GCD is not taken (since it doesn't converge sometimes) in the
inexact case::
sage: Z.<z> = CC[]
sage: I = CC.gen()
sage: (1+I+z)/(z+0.1*I)
(z + 1.00000000000000 + I)/(z + 0.100000000000000*I)
sage: (1+I*z)/(z+1.1)
(I*z + 1.00000000000000)/(z + 1.10000000000000)
TESTS::
sage: F = FractionField(IntegerRing())
sage: F == loads(dumps(F))
True
::
sage: F = FractionField(PolynomialRing(RationalField(),'x'))
sage: F == loads(dumps(F))
True
::
sage: F = FractionField(PolynomialRing(IntegerRing(),'x'))
sage: F == loads(dumps(F))
True
::
sage: F = FractionField(PolynomialRing(RationalField(),2,'x'))
sage: F == loads(dumps(F))
True
"""
# ****************************************************************************
#
# Sage: System for Algebra and Geometry Experimentation
#
# Copyright (C) 2005 William Stein <wstein@gmail.com>
# 2017 Julian Rüth <julian.rueth@fsfe.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# http://www.gnu.org/licenses/
# ****************************************************************************
from __future__ import absolute_import
from six.moves import range
import six
from . import ring
from . import fraction_field_element
import sage.misc.latex as latex
from sage.misc.cachefunc import cached_method
from sage.rings.integer_ring import ZZ
from sage.structure.richcmp import richcmp
from sage.structure.parent import Parent
from sage.structure.coerce import py_scalar_to_element
from sage.structure.coerce_maps import CallableConvertMap, DefaultConvertMap_unique
from sage.categories.basic import QuotientFields, Rings
from sage.categories.map import Section
def FractionField(R, names=None):
"""
Create the fraction field of the integral domain ``R``.
INPUT:
- ``R`` -- an integral domain
- ``names`` -- ignored
EXAMPLES:
We create some example fraction fields::
sage: FractionField(IntegerRing())
Rational Field
sage: FractionField(PolynomialRing(RationalField(),'x'))
Fraction Field of Univariate Polynomial Ring in x over Rational Field
sage: FractionField(PolynomialRing(IntegerRing(),'x'))
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: FractionField(PolynomialRing(RationalField(),2,'x'))
Fraction Field of Multivariate Polynomial Ring in x0, x1 over Rational Field
Dividing elements often implicitly creates elements of the fraction
field::
sage: x = PolynomialRing(RationalField(), 'x').gen()
sage: f = x/(x+1)
sage: g = x**3/(x+1)
sage: f/g
1/x^2
sage: g/f
x^2
The input must be an integral domain::
sage: Frac(Integers(4))
Traceback (most recent call last):
...
TypeError: R must be an integral domain.
"""
if not ring.is_Ring(R):
raise TypeError("R must be a ring")
if not R.is_integral_domain():
raise TypeError("R must be an integral domain.")
return R.fraction_field()
def is_FractionField(x):
"""
Test whether or not ``x`` inherits from :class:`FractionField_generic`.
EXAMPLES::
sage: from sage.rings.fraction_field import is_FractionField
sage: is_FractionField(Frac(ZZ['x']))
True
sage: is_FractionField(QQ)
False
"""
return isinstance(x, FractionField_generic)
class FractionField_generic(ring.Field):
"""
The fraction field of an integral domain.
"""
def __init__(self, R,
element_class=fraction_field_element.FractionFieldElement,
category=QuotientFields()):
"""
Create the fraction field of the integral domain ``R``.
INPUT:
- ``R`` -- an integral domain
EXAMPLES::
sage: Frac(QQ['x'])
Fraction Field of Univariate Polynomial Ring in x over Rational Field
sage: Frac(QQ['x,y']).variable_names()
('x', 'y')
sage: category(Frac(QQ['x']))
Category of quotient fields
"""
self._R = R
self._element_class = element_class
cat = category
if self in Rings().Infinite():
cat = cat.Infinite()
elif self in Rings().Finite():
cat = cat.Finite()
Parent.__init__(self, base=R, names=R._names, category=cat)
def __reduce__(self):
"""
For pickling.
TESTS::
sage: K = Frac(QQ['x'])
sage: loads(dumps(K)) is K
True
"""
return FractionField, (self._R,)
def _coerce_map_from_(self, S):
"""
Return ``True`` if elements of ``S`` can be coerced into this
fraction field.
This fraction field has coercions from:
- itself
- any fraction field where the base ring coerces to the base
ring of this fraction field
- any ring that coerces to the base ring of this fraction field
EXAMPLES::
sage: F = QQ['x,y'].fraction_field()
sage: F.has_coerce_map_from(F) # indirect doctest
True
::
sage: F.has_coerce_map_from(ZZ['x,y'].fraction_field())
True
::
sage: F.has_coerce_map_from(ZZ['x,y,z'].fraction_field())
False
::
sage: F.has_coerce_map_from(ZZ)
True
Test coercions::
sage: F.coerce(1)
1
sage: F.coerce(int(1))
1
sage: F.coerce(1/2)
1/2
::
sage: K = ZZ['x,y'].fraction_field()
sage: x,y = K.gens()
sage: F.coerce(F.gen())
x
sage: F.coerce(x)
x
sage: F.coerce(x/y)
x/y
sage: L = ZZ['x'].fraction_field()
sage: K.coerce(L.gen())
x
We demonstrate that :trac:`7958` is resolved in the case of
number fields::
sage: _.<x> = ZZ[]
sage: K.<a> = NumberField(x^5-3*x^4+2424*x^3+2*x-232)
sage: R = K.ring_of_integers()
sage: S.<y> = R[]
sage: F = FractionField(S)
sage: F(1/a)
(a^4 - 3*a^3 + 2424*a^2 + 2)/232
Some corner cases have been known to fail in the past (:trac:`5917`)::
sage: F1 = FractionField( QQ['a'] )
sage: R12 = F1['x','y']
sage: R12('a')
a
sage: F1(R12(F1('a')))
a
sage: F2 = FractionField( QQ['a','b'] )
sage: R22 = F2['x','y']
sage: R22('a')
a
sage: F2(R22(F2('a')))
a
Coercion from Laurent polynomials now works (:trac:`15345`)::
sage: R = LaurentPolynomialRing(ZZ, 'x')
sage: T = PolynomialRing(ZZ, 'x')
sage: R.gen() + FractionField(T).gen()
2*x
sage: 1/(R.gen() + 1)
1/(x + 1)
sage: R = LaurentPolynomialRing(ZZ, 'x,y')
sage: FF = FractionField(PolynomialRing(ZZ, 'x,y'))
sage: prod(R.gens()) + prod(FF.gens())
2*x*y
sage: 1/(R.gen(0) + R.gen(1))
1/(x + y)
Coercion from a localization::
sage: R.<x> = ZZ[]
sage: L = Localization(R, (x**2 + 1,7))
sage: F = L.fraction_field()
sage: f = F.coerce_map_from(L); f
Coercion map:
From: Univariate Polynomial Ring in x over Integer Ring localized at (7, x^2 + 1)
To: Fraction Field of Univariate Polynomial Ring in x over Integer Ring
sage: f(L(1/7)) == 1/7
True
"""
from sage.rings.rational_field import QQ
from sage.rings.number_field.number_field_base import NumberField
from sage.rings.polynomial.laurent_polynomial_ring import \
LaurentPolynomialRing_generic
if S is self._R:
parent = self._R.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(self._R, self, category=parent.homset_category())
def wrapper(x):
return self._element_class(self, x.numerator(), x.denominator())
# The case ``S`` being `\QQ` requires special handling since `\QQ` is
# not implemented as a ``FractionField_generic``.
if S is QQ and self._R.has_coerce_map_from(ZZ):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
# special treatment for localizations
from sage.rings.localization import Localization
if isinstance(S, Localization):
parent = S.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(S, self, category=parent.homset_category())
# Number fields also need to be handled separately.
if isinstance(S, NumberField):
return CallableConvertMap(S, self,
self._number_field_to_frac_of_ring_of_integers,
parent_as_first_arg=False)
# special treatment for LaurentPolynomialRings
if isinstance(S, LaurentPolynomialRing_generic):
def converter(x, y=None):
if y is None:
return self._element_class(self, *x._fraction_pair())
xnum, xden = x._fraction_pair()
ynum, yden = y._fraction_pair()
return self._element_class(self, xnum * yden, xden * ynum)
return CallableConvertMap(S, self, converter, parent_as_first_arg=False)
if (isinstance(S, FractionField_generic) and
self._R.has_coerce_map_from(S.ring())):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
if self._R.has_coerce_map_from(S):
return CallableConvertMap(S, self, self._element_class,
parent_as_first_arg=True)
return None
def _number_field_to_frac_of_ring_of_integers(self, x):
r"""
Return the number field element ``x`` as an element of ``self``,
explicitly treating the numerator of ``x`` as an element of the ring
of integers and the denominator as an integer.
INPUT:
- ``x`` -- Number field element
OUTPUT:
- Element of ``self``
TESTS:
We demonstrate that :trac:`7958` is resolved in the case of
number fields::
sage: _.<x> = ZZ[]
sage: K.<a> = NumberField(x^5-3*x^4+2424*x^3+2*x-232)
sage: R = K.ring_of_integers()
sage: S.<y> = R[]
sage: F = FractionField(S) # indirect doctest
sage: F(1/a)
(a^4 - 3*a^3 + 2424*a^2 + 2)/232
"""
f = x.polynomial() # Polynomial over QQ
d = f.denominator() # Integer
return self._element_class(self, numerator=d * x, denominator=d)
def is_field(self, proof=True):
"""
Return ``True``, since the fraction field is a field.
EXAMPLES::
sage: Frac(ZZ).is_field()
True
"""
return True
def is_finite(self):
"""
Tells whether this fraction field is finite.
.. NOTE::
A fraction field is finite if and only if the associated
integral domain is finite.
EXAMPLES::
sage: Frac(QQ['a','b','c']).is_finite()
False
"""
return self._R.is_finite()
def base_ring(self):
"""
Return the base ring of ``self``.
This is the base ring of the ring
which this fraction field is the fraction field of.
EXAMPLES::
sage: R = Frac(ZZ['t'])
sage: R.base_ring()
Integer Ring
"""
return self._R.base_ring()
def characteristic(self):
"""
Return the characteristic of this fraction field.
EXAMPLES::
sage: R = Frac(ZZ['t'])
sage: R.base_ring()
Integer Ring
sage: R = Frac(ZZ['t']); R.characteristic()
0
sage: R = Frac(GF(5)['w']); R.characteristic()
5
"""
return self._R.characteristic()
def _repr_(self):
"""
Return a string representation of ``self``.
EXAMPLES::
sage: Frac(ZZ['x']) # indirect doctest
Fraction Field of Univariate Polynomial Ring in x over Integer Ring
"""
return "Fraction Field of %s" % self._R
def _latex_(self):
r"""
Return a latex representation of ``self``.
EXAMPLES::
sage: latex(Frac(GF(7)['x,y,z'])) # indirect doctest
\mathrm{Frac}(\Bold{F}_{7}[x, y, z])
"""
return "\\mathrm{Frac}(%s)" % latex.latex(self._R)
def _magma_init_(self, magma):
"""
Return a string representation of ``self`` in the given magma instance.
EXAMPLES::
sage: QQ['x'].fraction_field()._magma_init_(magma) # optional - magma
'SageCreateWithNames(FieldOfFractions(SageCreateWithNames(PolynomialRing(_sage_ref...),["x"])),["x"])'
sage: GF(9,'a')['x,y,z'].fraction_field()._magma_init_(magma) # optional - magma
'SageCreateWithNames(FieldOfFractions(SageCreateWithNames(PolynomialRing(_sage_ref...,3,"grevlex"),["x","y","z"])),["x","y","z"])'
``_magma_init_`` gets called implicitly below::
sage: magma(QQ['x,y'].fraction_field()) # optional - magma
Multivariate rational function field of rank 2 over Rational Field
Variables: x, y
sage: magma(ZZ['x'].fraction_field()) # optional - magma
Univariate rational function field over Integer Ring
Variables: x
Verify that conversion is being properly cached::
sage: k = Frac(QQ['x,z']) # optional - magma
sage: magma(k) is magma(k) # optional - magma
True
"""
s = 'FieldOfFractions(%s)' % self.ring()._magma_init_(magma)
return magma._with_names(s, self.variable_names())
def ring(self):
"""
Return the ring that this is the fraction field of.
EXAMPLES::
sage: R = Frac(QQ['x,y'])
sage: R
Fraction Field of Multivariate Polynomial Ring in x, y over Rational Field
sage: R.ring()
Multivariate Polynomial Ring in x, y over Rational Field
"""
return self._R
@cached_method
def is_exact(self):
"""
Return if ``self`` is exact which is if the underlying ring is exact.
EXAMPLES::
sage: Frac(ZZ['x']).is_exact()
True
sage: Frac(CDF['x']).is_exact()
False
"""
return self.ring().is_exact()
def _element_constructor_(self, x, y=None, coerce=True):
"""
Construct an element of this fraction field.
EXAMPLES::
sage: F = QQ['x,y'].fraction_field()
sage: F._element_constructor_(1)
1
sage: F._element_constructor_(F.gen(0)/F.gen(1))
x/y
sage: F._element_constructor_('1 + x/y')
(x + y)/y
::
sage: K = ZZ['x,y'].fraction_field()
sage: x,y = K.gens()
::
sage: F._element_constructor_(x/y)
x/y
TESTS:
The next example failed before :trac:`4376`::
sage: K(pari((x + 1)/(x^2 + x + 1)))
(x + 1)/(x^2 + x + 1)
These examples failed before :trac:`11368`::
sage: R.<x, y, z> = PolynomialRing(QQ)
sage: S = R.fraction_field()
sage: S(pari((x + y)/y))
(x + y)/y
sage: S(pari(x + y + 1/z))
(x*z + y*z + 1)/z
This example failed before :trac:`23664`::
sage: P0.<x> = ZZ[]
sage: P1.<y> = Frac(P0)[]
sage: frac = (x/(x^2 + 1))*y + 1/(x^3 + 1)
sage: Frac(ZZ['x,y'])(frac)
(x^4*y + x^2 + x*y + 1)/(x^5 + x^3 + x^2 + 1)
Test conversions where `y` is a string but `x` not::
sage: K = ZZ['x,y'].fraction_field()
sage: K._element_constructor_(2, 'x+y')
2/(x + y)
sage: K._element_constructor_(1, 'z')
Traceback (most recent call last):
...
TypeError: unable to evaluate 'z' in Fraction Field of Multivariate Polynomial Ring in x, y over Integer Ring
Check that :trac:`17971` is fixed::
sage: A.<a,c> = Frac(PolynomialRing(QQ,'a,c'))
sage: B.<d,e> = PolynomialRing(A,'d,e')
sage: R.<x> = PolynomialRing(B,'x')
sage: (a*d*x^2+a+e+1).resultant(-4*c^2*x+1)
a*d + 16*c^4*e + 16*a*c^4 + 16*c^4
Check that :trac:`24539` is fixed::
sage: tau = polygen(QQ, 'tau')
sage: PolynomialRing(CyclotomicField(2), 'z').fraction_field()(tau/(1+tau))
z/(z + 1)
Check that :trac:`26150` is fixed::
sage: z = SR.var('z')
sage: CyclotomicField(2)['z'].fraction_field()(2*(4*z + 5)/((z + 1)*(z - 1)^4))
(8*z + 10)/(z^5 - 3*z^4 + 2*z^3 + 2*z^2 - 3*z + 1)
::
sage: T.<t> = ZZ[]
sage: S.<s> = ZZ[]
sage: S.fraction_field()(s/(s+1), (t-1)/(t+2))
(s^2 + 2*s)/(s^2 - 1)
"""
if y is None:
ring_one = self.ring().one()
try:
return self._element_class(self, x, ring_one, coerce=coerce)
except (TypeError, ValueError):
pass
y = self._element_class(self, ring_one, ring_one,
coerce=False, reduce=False)
else:
try:
return self._element_class(self, x, y, coerce=coerce)
except (TypeError, ValueError):
pass
if isinstance(x, six.string_types):
from sage.misc.sage_eval import sage_eval
try:
x = sage_eval(x, self.gens_dict_recursive())
except NameError:
raise TypeError("unable to evaluate {!r} in {}".format(x, self))
if isinstance(y, six.string_types):
from sage.misc.sage_eval import sage_eval
try:
y = sage_eval(y, self.gens_dict_recursive())
except NameError:
raise TypeError("unable to evaluate {!r} in {}".format(y, self))
x = py_scalar_to_element(x)
y = py_scalar_to_element(y)
from sage.libs.pari.all import pari_gen
if isinstance(x, pari_gen) and x.type() == 't_POL':
# This recursive approach is needed because PARI
# represents multivariate polynomials as iterated
# univariate polynomials (see the above examples).
# Below, v is the variable with highest priority,
# and the x[i] are rational functions in the
# remaining variables.
v = self._element_class(self, x.variable(), 1)
x = sum(self(x[i]) * v**i for i in range(x.poldegree() + 1))
def resolve_fractions(x, y):
xn = x.numerator()
xd = x.denominator()
yn = y.numerator()
yd = y.denominator()
try:
return (xn * yd, yn * xd)
except (AttributeError, TypeError, ValueError):
pass
try:
P = yd.parent()
return (P(xn) * yd, yn * P(xd))
except (AttributeError, TypeError, ValueError):
pass
try:
P = xd.parent()
return (xn * P(yd), P(yn) * xd)
except (AttributeError, TypeError, ValueError):
pass
raise TypeError
while True:
x0, y0 = x, y
try:
x, y = resolve_fractions(x0, y0)
except (AttributeError, TypeError):
raise TypeError("cannot convert {!r}/{!r} to an element of {}".format(
x0, y0, self))
try:
return self._element_class(self, x, y, coerce=coerce)
except TypeError:
if not x != x0:
raise
def construction(self):
"""
EXAMPLES::
sage: Frac(ZZ['x']).construction()
(FractionField, Univariate Polynomial Ring in x over Integer Ring)
sage: K = Frac(GF(3)['t'])
sage: f, R = K.construction()
sage: f(R)
Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 3
sage: f(R) == K
True
"""
from sage.categories.pushout import FractionField
return FractionField(), self.ring()
def __eq__(self, other):
"""
Check whether ``self`` is equal to ``other``.
EXAMPLES::
sage: Frac(ZZ['x']) == Frac(ZZ['x'])
True
sage: Frac(ZZ['x']) == Frac(QQ['x'])
False
sage: Frac(ZZ['x']) == Frac(ZZ['y'])
False
sage: Frac(ZZ['x']) == QQ['x']
False
"""
if not isinstance(other, FractionField_generic):
return False
return self._R == other._R
def __ne__(self, other):
"""
Check whether ``self`` is not equal to ``other``.
EXAMPLES::
sage: Frac(ZZ['x']) != Frac(ZZ['x'])
False
sage: Frac(ZZ['x']) != Frac(QQ['x'])
True
sage: Frac(ZZ['x']) != Frac(ZZ['y'])
True
sage: Frac(ZZ['x']) != QQ['x']
True
"""
return not (self == other)
def __hash__(self):
"""
Compute the hash of ``self``.
EXAMPLES::
sage: h0 = hash(Frac(ZZ['x']))
sage: h1 = hash(Frac(ZZ['x']))
sage: h2 = hash(Frac(QQ['x']))
sage: h3 = hash(ZZ['x'])
sage: h0 == h1 and h1 != h2 and h1 != h3
True
"""
# to avoid having exactly the same hash as the base ring,
# we change this hash using a random number
return hash(self._R) ^ 147068341996611
def ngens(self):
"""
This is the same as for the parent object.
EXAMPLES::
sage: R = Frac(PolynomialRing(QQ,'z',10)); R
Fraction Field of Multivariate Polynomial Ring in z0, z1, z2, z3, z4, z5, z6, z7, z8, z9 over Rational Field
sage: R.ngens()
10
"""
return self._R.ngens()
def gen(self, i=0):
"""
Return the ``i``-th generator of ``self``.
EXAMPLES::
sage: R = Frac(PolynomialRing(QQ,'z',10)); R
Fraction Field of Multivariate Polynomial Ring in z0, z1, z2, z3, z4, z5, z6, z7, z8, z9 over Rational Field
sage: R.0
z0
sage: R.gen(3)
z3
sage: R.3
z3
"""
x = self._R.gen(i)
one = self._R.one()
r = self._element_class(self, x, one, coerce=False, reduce=False)
return r
def _is_valid_homomorphism_(self, codomain, im_gens, base_map=None):
"""
Check if the homomorphism defined by sending generators of this
fraction field to ``im_gens`` in ``codomain`` is valid.
EXAMPLES::
sage: F = QQ['x,y'].fraction_field()
sage: x,y = F.gens()
sage: F._is_valid_homomorphism_(F, [y,x])
True
sage: R = ZZ['x']; x = R.gen()
sage: F._is_valid_homomorphism_(R, [x, x])
False
TESTS::
sage: F._is_valid_homomorphism_(ZZ, [])
False
Test homomorphisms::
sage: phi = F.hom([2*y, x])
sage: phi(x+y)
x + 2*y
sage: phi(x/y)
2*y/x
"""
if len(im_gens) != self.ngens():
return False
# It is very difficult to check that the image of any element
# is invertible. Checking that the image of each generator
# is a unit is not sufficient. So we just give up and check
# that elements of the base ring coerce to the codomain
if base_map is None and not codomain.has_coerce_map_from(self.base_ring()):
return False
return True
def random_element(self, *args, **kwds):
"""
Return a random element in this fraction field.
The arguments are passed to the random generator of the underlying ring.
EXAMPLES::
sage: F = ZZ['x'].fraction_field()
sage: F.random_element() # random
(2*x - 8)/(-x^2 + x)
::
sage: f = F.random_element(degree=5)
sage: f.numerator().degree()
5
sage: f.denominator().degree()
5
"""
return self._element_class(self, self._R.random_element(*args, **kwds),
self._R._random_nonzero_element(*args, **kwds),
coerce=False, reduce=True)
def some_elements(self):
r"""
Return some elements in this field.
EXAMPLES::
sage: R.<x> = QQ[]
sage: R.fraction_field().some_elements()
[0,
1,
x,
2*x,
x/(x^2 + 2*x + 1),
1/x^2,
...
(2*x^2 + 2)/(x^2 + 2*x + 1),
(2*x^2 + 2)/x^3,
(2*x^2 + 2)/(x^2 - 1),
2]
"""
ret = [self.zero(), self.one()]
for a in self._R.some_elements():
for b in self._R.some_elements():
if a != b and self(a) and self(b):
ret.append(self(a)/self(b))
return ret
def _gcd_univariate_polynomial(self, f, g):
r"""
Helper method used to compute polynomial gcds over this field.
See :meth:`sage.rings.polynomial.polynomial_element.Polynomial.gcd`.
TESTS::
sage: A.<x,y> = ZZ[]
sage: C.<z> = Frac(A)[]
sage: c = (2*y^2 - 11*x - 2*y + 1)/(-x^2 + x*y - 2*y^2)
sage: p = (c*z^2 + x^10*z + 1)^6
sage: q = (z^2 + c*x^10*z + 1)^6
sage: g = p.gcd(q)
sage: g
1
sage: g.parent() is p.parent()
True
sage: (p*(z-x)).gcd(q*(z-x))
z - x
sage: C.zero().gcd(2*z)
z
sage: (x*z).gcd(0)
z
sage: C.zero().gcd(0)
0
"""
if g.is_zero():
if f.is_zero():
return f
else:
return f.monic()
Pol = f.parent()
Num = Pol.change_ring(self.base())
f1 = Num(f.numerator())
g1 = Num(g.numerator())
return Pol(f1.gcd(g1)).monic()
class FractionField_1poly_field(FractionField_generic):
"""
The fraction field of a univariate polynomial ring over a field.
Many of the functions here are included for coherence with number fields.
"""
def __init__(self, R,
element_class=fraction_field_element.FractionFieldElement_1poly_field):
"""
Just change the default for ``element_class``.
EXAMPLES::
sage: R.<t> = QQ[]; K = R.fraction_field()
sage: K._element_class
<type 'sage.rings.fraction_field_element.FractionFieldElement_1poly_field'>
"""
FractionField_generic.__init__(self, R, element_class)
def ring_of_integers(self):
"""
Return the ring of integers in this fraction field.
EXAMPLES::
sage: K = FractionField(GF(5)['t'])
sage: K.ring_of_integers()
Univariate Polynomial Ring in t over Finite Field of size 5
"""
return self._R
def maximal_order(self):
"""
Return the maximal order in this fraction field.
EXAMPLES::
sage: K = FractionField(GF(5)['t'])
sage: K.maximal_order()
Univariate Polynomial Ring in t over Finite Field of size 5
"""
return self._R
def class_number(self):
"""
Here for compatibility with number fields and function fields.
EXAMPLES::
sage: R.<t> = GF(5)[]; K = R.fraction_field()
sage: K.class_number()
1
"""
return 1
def _factor_univariate_polynomial(self, f):
r"""
Return the factorization of ``f`` over this field.
EXAMPLES::
sage: k.<a> = GF(9)
sage: K = k['t'].fraction_field()
sage: R.<x> = K[]
sage: f = x^3 + a
sage: f.factor()
(x + 2*a + 1)^3
"""
# The default implementation would try to convert this element to singular and factor there.
# This fails silently over some base fields, see #23642, so we convert
# to the function field and factor there.
return f.change_ring(self.function_field()).factor().base_change(f.parent())
def function_field(self):
r"""
Return the isomorphic function field.
EXAMPLES::
sage: R.<t> = GF(5)[]
sage: K = R.fraction_field()
sage: K.function_field()
Rational function field in t over Finite Field of size 5
.. SEEALSO::
:meth:`sage.rings.function_field.RationalFunctionField.field`
"""
from sage.rings.all import FunctionField
return FunctionField(self.base_ring(), names=self.variable_name())
def _coerce_map_from_(self, R):
r"""
Return a coerce map from ``R`` to this field.
EXAMPLES::
sage: R.<t> = GF(5)[]
sage: K = R.fraction_field()
sage: L = K.function_field()
sage: f = K.coerce_map_from(L); f # indirect doctest
Isomorphism:
From: Rational function field in t over Finite Field of size 5
To: Fraction Field of Univariate Polynomial Ring in t over Finite Field of size 5
sage: f(~L.gen())
1/t
"""
from sage.rings.function_field.function_field import RationalFunctionField
if isinstance(R, RationalFunctionField) and self.variable_name() == R.variable_name() and self.base_ring() is R.constant_base_field():
from sage.categories.all import Hom
parent = Hom(R, self)
from sage.rings.function_field.maps import FunctionFieldToFractionField
return parent.__make_element_class__(FunctionFieldToFractionField)(parent)
return super(FractionField_1poly_field, self)._coerce_map_from_(R)
class FractionFieldEmbedding(DefaultConvertMap_unique):
r"""
The embedding of an integral domain into its field of fractions.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.fraction_field().coerce_map_from(R); f
Coercion map:
From: Univariate Polynomial Ring in x over Rational Field
To: Fraction Field of Univariate Polynomial Ring in x over Rational Field
TESTS::
sage: from sage.rings.fraction_field import FractionFieldEmbedding
sage: isinstance(f, FractionFieldEmbedding)
True
sage: TestSuite(f).run()
Check that :trac:`23185` has been resolved::
sage: R.<x> = QQ[]
sage: K.<x> = FunctionField(QQ)
sage: R.is_subring(K)
True
sage: R.is_subring(R.fraction_field())
True
"""
def is_surjective(self):
r"""
Return whether this map is surjective.
EXAMPLES::
sage: R.<x> = QQ[]
sage: R.fraction_field().coerce_map_from(R).is_surjective()
False
"""
return self.domain().is_field()
def is_injective(self):
r"""
Return whether this map is injective.
EXAMPLES:
The map from an integral domain to its fraction field is always
injective::
sage: R.<x> = QQ[]
sage: R.fraction_field().coerce_map_from(R).is_injective()
True
"""
return True
def section(self):
r"""
Return a section of this map.
EXAMPLES::
sage: R.<x> = QQ[]
sage: R.fraction_field().coerce_map_from(R).section()
Section map:
From: Fraction Field of Univariate Polynomial Ring in x over Rational Field
To: Univariate Polynomial Ring in x over Rational Field
"""
from sage.categories.sets_with_partial_maps import SetsWithPartialMaps
from sage.all import Hom
parent = Hom(self.codomain(), self.domain(), SetsWithPartialMaps())
return parent.__make_element_class__(FractionFieldEmbeddingSection)(self)
def _richcmp_(self, other, op):
r"""
Compare this element to ``other`` with respect to ``op``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.fraction_field().coerce_map_from(R)
sage: S.<y> = GF(2)[]
sage: g = S.fraction_field().coerce_map_from(S)
sage: f == g # indirect doctest
False
sage: f == f
True
"""
if type(self) != type(other):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op)
def __hash__(self):
r"""
Return a hash value for this embedding.
EXAMPLES::
sage: R.<x> = QQ[]
sage: hash(R.fraction_field().coerce_map_from(R)) == hash(R.fraction_field().coerce_map_from(R))
True
"""
return hash((type(self), self.domain()))
class FractionFieldEmbeddingSection(Section):
r"""
The section of the embedding of an integral domain into its field of
fractions.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.fraction_field().coerce_map_from(R).section(); f
Section map:
From: Fraction Field of Univariate Polynomial Ring in x over Rational Field
To: Univariate Polynomial Ring in x over Rational Field
TESTS::
sage: from sage.rings.fraction_field import FractionFieldEmbeddingSection
sage: isinstance(f, FractionFieldEmbeddingSection)
True
sage: TestSuite(f).run()
"""
def _call_(self, x, check=True):
r"""
Evaluate this map at ``x``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: K = R.fraction_field()
sage: x = K.gen()
sage: f = K.coerce_map_from(R).section()
sage: f(x)
x
sage: f(1/x)
Traceback (most recent call last):
...
TypeError: fraction must have unit denominator
TESTS:
Over inexact rings, we have to take the precision of the denominators
into account::
sage: R=ZpCR(2)
sage: S.<x> = R[]
sage: f = x/S(R(3,absprec=2))
sage: S(f)
(1 + 2 + O(2^2))*x
Test for Localization::
sage: R.<x> = ZZ[]
sage: L = Localization(R, x**2+2*x+ 1)
sage: 1/(x+1) in L # indirect doctest
True
sage: 1/(x+2) in L # indirect doctest
False
"""
codom = self.codomain()
if self.domain()._R is codom:
num = x.numerator()
den = x.denominator()
else:
# codomain may different from the fraction fields base ring
# for example for localizations
num = codom(x.numerator())
den = codom(x.denominator())
if codom.is_exact() and den.is_one():
return num
if check and not den.is_unit():
# This should probably be a ValueError.
# However, too much existing code is expecting this to throw a
# TypeError, so we decided to keep it for the time being.
raise TypeError("fraction must have unit denominator")
return num * den.inverse_of_unit()
def _call_with_args(self, x, args=(), kwds={}):
r"""
Evaluation this map at ``x``.
INPUT:
- ``check`` -- whether or not to check
EXAMPLES::
sage: R.<x> = QQ[]
sage: K = R.fraction_field()
sage: R(K.gen(), check=True)
x
"""
check = kwds.pop('check', True)
if args or kwds:
raise NotImplementedError("__call__ can not be called with additional arguments other than check=True/False")
return self._call_(x, check=check)
def _richcmp_(self, other, op):
r"""
Compare this element to ``other`` with respect to ``op``.
EXAMPLES::
sage: R.<x> = QQ[]
sage: f = R.fraction_field().coerce_map_from(R).section()
sage: S.<y> = GF(2)[]
sage: g = S.fraction_field().coerce_map_from(S).section()
sage: f == g # indirect doctest
False
sage: f == f
True
"""
if type(self) != type(other):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op)
def __hash__(self):
r"""
Return a hash value for this section.
EXAMPLES::
sage: R.<x> = QQ[]
sage: hash(R.fraction_field().coerce_map_from(R).section()) == hash(R.fraction_field().coerce_map_from(R).section())
True
"""
return hash((type(self), self.codomain()))
| 30.480469
| 142
| 0.528976
|
from __future__ import absolute_import
from six.moves import range
import six
from . import ring
from . import fraction_field_element
import sage.misc.latex as latex
from sage.misc.cachefunc import cached_method
from sage.rings.integer_ring import ZZ
from sage.structure.richcmp import richcmp
from sage.structure.parent import Parent
from sage.structure.coerce import py_scalar_to_element
from sage.structure.coerce_maps import CallableConvertMap, DefaultConvertMap_unique
from sage.categories.basic import QuotientFields, Rings
from sage.categories.map import Section
def FractionField(R, names=None):
if not ring.is_Ring(R):
raise TypeError("R must be a ring")
if not R.is_integral_domain():
raise TypeError("R must be an integral domain.")
return R.fraction_field()
def is_FractionField(x):
return isinstance(x, FractionField_generic)
class FractionField_generic(ring.Field):
def __init__(self, R,
element_class=fraction_field_element.FractionFieldElement,
category=QuotientFields()):
self._R = R
self._element_class = element_class
cat = category
if self in Rings().Infinite():
cat = cat.Infinite()
elif self in Rings().Finite():
cat = cat.Finite()
Parent.__init__(self, base=R, names=R._names, category=cat)
def __reduce__(self):
return FractionField, (self._R,)
def _coerce_map_from_(self, S):
from sage.rings.rational_field import QQ
from sage.rings.number_field.number_field_base import NumberField
from sage.rings.polynomial.laurent_polynomial_ring import \
LaurentPolynomialRing_generic
if S is self._R:
parent = self._R.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(self._R, self, category=parent.homset_category())
def wrapper(x):
return self._element_class(self, x.numerator(), x.denominator())
if S is QQ and self._R.has_coerce_map_from(ZZ):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
from sage.rings.localization import Localization
if isinstance(S, Localization):
parent = S.Hom(self)
return parent.__make_element_class__(FractionFieldEmbedding)(S, self, category=parent.homset_category())
if isinstance(S, NumberField):
return CallableConvertMap(S, self,
self._number_field_to_frac_of_ring_of_integers,
parent_as_first_arg=False)
if isinstance(S, LaurentPolynomialRing_generic):
def converter(x, y=None):
if y is None:
return self._element_class(self, *x._fraction_pair())
xnum, xden = x._fraction_pair()
ynum, yden = y._fraction_pair()
return self._element_class(self, xnum * yden, xden * ynum)
return CallableConvertMap(S, self, converter, parent_as_first_arg=False)
if (isinstance(S, FractionField_generic) and
self._R.has_coerce_map_from(S.ring())):
return CallableConvertMap(S, self, wrapper, parent_as_first_arg=False)
if self._R.has_coerce_map_from(S):
return CallableConvertMap(S, self, self._element_class,
parent_as_first_arg=True)
return None
def _number_field_to_frac_of_ring_of_integers(self, x):
f = x.polynomial()
d = f.denominator()
return self._element_class(self, numerator=d * x, denominator=d)
def is_field(self, proof=True):
return True
def is_finite(self):
return self._R.is_finite()
def base_ring(self):
return self._R.base_ring()
def characteristic(self):
return self._R.characteristic()
def _repr_(self):
return "Fraction Field of %s" % self._R
def _latex_(self):
return "\\mathrm{Frac}(%s)" % latex.latex(self._R)
def _magma_init_(self, magma):
s = 'FieldOfFractions(%s)' % self.ring()._magma_init_(magma)
return magma._with_names(s, self.variable_names())
def ring(self):
return self._R
@cached_method
def is_exact(self):
return self.ring().is_exact()
def _element_constructor_(self, x, y=None, coerce=True):
if y is None:
ring_one = self.ring().one()
try:
return self._element_class(self, x, ring_one, coerce=coerce)
except (TypeError, ValueError):
pass
y = self._element_class(self, ring_one, ring_one,
coerce=False, reduce=False)
else:
try:
return self._element_class(self, x, y, coerce=coerce)
except (TypeError, ValueError):
pass
if isinstance(x, six.string_types):
from sage.misc.sage_eval import sage_eval
try:
x = sage_eval(x, self.gens_dict_recursive())
except NameError:
raise TypeError("unable to evaluate {!r} in {}".format(x, self))
if isinstance(y, six.string_types):
from sage.misc.sage_eval import sage_eval
try:
y = sage_eval(y, self.gens_dict_recursive())
except NameError:
raise TypeError("unable to evaluate {!r} in {}".format(y, self))
x = py_scalar_to_element(x)
y = py_scalar_to_element(y)
from sage.libs.pari.all import pari_gen
if isinstance(x, pari_gen) and x.type() == 't_POL':
v = self._element_class(self, x.variable(), 1)
x = sum(self(x[i]) * v**i for i in range(x.poldegree() + 1))
def resolve_fractions(x, y):
xn = x.numerator()
xd = x.denominator()
yn = y.numerator()
yd = y.denominator()
try:
return (xn * yd, yn * xd)
except (AttributeError, TypeError, ValueError):
pass
try:
P = yd.parent()
return (P(xn) * yd, yn * P(xd))
except (AttributeError, TypeError, ValueError):
pass
try:
P = xd.parent()
return (xn * P(yd), P(yn) * xd)
except (AttributeError, TypeError, ValueError):
pass
raise TypeError
while True:
x0, y0 = x, y
try:
x, y = resolve_fractions(x0, y0)
except (AttributeError, TypeError):
raise TypeError("cannot convert {!r}/{!r} to an element of {}".format(
x0, y0, self))
try:
return self._element_class(self, x, y, coerce=coerce)
except TypeError:
if not x != x0:
raise
def construction(self):
from sage.categories.pushout import FractionField
return FractionField(), self.ring()
def __eq__(self, other):
if not isinstance(other, FractionField_generic):
return False
return self._R == other._R
def __ne__(self, other):
return not (self == other)
def __hash__(self):
return hash(self._R) ^ 147068341996611
def ngens(self):
return self._R.ngens()
def gen(self, i=0):
x = self._R.gen(i)
one = self._R.one()
r = self._element_class(self, x, one, coerce=False, reduce=False)
return r
def _is_valid_homomorphism_(self, codomain, im_gens, base_map=None):
if len(im_gens) != self.ngens():
return False
if base_map is None and not codomain.has_coerce_map_from(self.base_ring()):
return False
return True
def random_element(self, *args, **kwds):
return self._element_class(self, self._R.random_element(*args, **kwds),
self._R._random_nonzero_element(*args, **kwds),
coerce=False, reduce=True)
def some_elements(self):
ret = [self.zero(), self.one()]
for a in self._R.some_elements():
for b in self._R.some_elements():
if a != b and self(a) and self(b):
ret.append(self(a)/self(b))
return ret
def _gcd_univariate_polynomial(self, f, g):
if g.is_zero():
if f.is_zero():
return f
else:
return f.monic()
Pol = f.parent()
Num = Pol.change_ring(self.base())
f1 = Num(f.numerator())
g1 = Num(g.numerator())
return Pol(f1.gcd(g1)).monic()
class FractionField_1poly_field(FractionField_generic):
def __init__(self, R,
element_class=fraction_field_element.FractionFieldElement_1poly_field):
FractionField_generic.__init__(self, R, element_class)
def ring_of_integers(self):
return self._R
def maximal_order(self):
return self._R
def class_number(self):
return 1
def _factor_univariate_polynomial(self, f):
urn f.change_ring(self.function_field()).factor().base_change(f.parent())
def function_field(self):
from sage.rings.all import FunctionField
return FunctionField(self.base_ring(), names=self.variable_name())
def _coerce_map_from_(self, R):
from sage.rings.function_field.function_field import RationalFunctionField
if isinstance(R, RationalFunctionField) and self.variable_name() == R.variable_name() and self.base_ring() is R.constant_base_field():
from sage.categories.all import Hom
parent = Hom(R, self)
from sage.rings.function_field.maps import FunctionFieldToFractionField
return parent.__make_element_class__(FunctionFieldToFractionField)(parent)
return super(FractionField_1poly_field, self)._coerce_map_from_(R)
class FractionFieldEmbedding(DefaultConvertMap_unique):
def is_surjective(self):
return self.domain().is_field()
def is_injective(self):
return True
def section(self):
from sage.categories.sets_with_partial_maps import SetsWithPartialMaps
from sage.all import Hom
parent = Hom(self.codomain(), self.domain(), SetsWithPartialMaps())
return parent.__make_element_class__(FractionFieldEmbeddingSection)(self)
def _richcmp_(self, other, op):
if type(self) != type(other):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op)
def __hash__(self):
return hash((type(self), self.domain()))
class FractionFieldEmbeddingSection(Section):
def _call_(self, x, check=True):
codom = self.codomain()
if self.domain()._R is codom:
num = x.numerator()
den = x.denominator()
else:
num = codom(x.numerator())
den = codom(x.denominator())
if codom.is_exact() and den.is_one():
return num
if check and not den.is_unit():
raise TypeError("fraction must have unit denominator")
return num * den.inverse_of_unit()
def _call_with_args(self, x, args=(), kwds={}):
check = kwds.pop('check', True)
if args or kwds:
raise NotImplementedError("__call__ can not be called with additional arguments other than check=True/False")
return self._call_(x, check=check)
def _richcmp_(self, other, op):
if type(self) != type(other):
return NotImplemented
return richcmp((self.domain(), self.codomain()), (other.domain(), other.codomain()), op)
def __hash__(self):
return hash((type(self), self.codomain()))
| true
| true
|
1c4286d1bec411fe939dc034241b702c6929d4c7
| 333
|
py
|
Python
|
examples/ipython_extensions/my_system/extension.py
|
nixocio/manage
|
ced0ce7fe6aff05cb0f0d8d1a37d5fdfaace1d55
|
[
"ISC"
] | 111
|
2016-06-14T03:21:36.000Z
|
2019-07-19T16:48:26.000Z
|
examples/ipython_extensions/my_system/extension.py
|
nixocio/manage
|
ced0ce7fe6aff05cb0f0d8d1a37d5fdfaace1d55
|
[
"ISC"
] | 14
|
2016-06-23T03:09:32.000Z
|
2017-10-11T00:38:15.000Z
|
examples/ipython_extensions/my_system/extension.py
|
nixocio/manage
|
ced0ce7fe6aff05cb0f0d8d1a37d5fdfaace1d55
|
[
"ISC"
] | 12
|
2016-06-26T18:56:35.000Z
|
2019-04-28T08:19:56.000Z
|
# coding: utf-8
import IPython.core.error
def shutdown_hook(ipython):
print('\nThis is an extension exit hook')
raise IPython.core.error.TryNext
def load_ipython_extension(ipython):
print('\nExtension is being loaded!')
print(ipython)
ipython.set_hook('shutdown_hook', shutdown_hook, _warn_deprecated=False)
| 22.2
| 76
| 0.744745
|
import IPython.core.error
def shutdown_hook(ipython):
print('\nThis is an extension exit hook')
raise IPython.core.error.TryNext
def load_ipython_extension(ipython):
print('\nExtension is being loaded!')
print(ipython)
ipython.set_hook('shutdown_hook', shutdown_hook, _warn_deprecated=False)
| true
| true
|
1c4286dc73c971e2e33bea18a4ca19da4a639cd8
| 16
|
py
|
Python
|
btd6_memory_info/generated/System/Enum/enum.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/System/Enum/enum.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
btd6_memory_info/generated/System/Enum/enum.py
|
56kyle/bloons_auto
|
419d55b51d1cddc49099593970adf1c67985b389
|
[
"MIT"
] | null | null | null |
class Enum: pass
| 16
| 16
| 0.8125
|
class Enum: pass
| true
| true
|
1c42876ca9960da64105a1ab05d91e42a1d00a17
| 921
|
py
|
Python
|
spider_learn/scrapy_learn/mySpider/mySpider/spiders/cast.py
|
Fly365/py-learn
|
a7c747af70ca2d6dcf244dc89344fbd26b125987
|
[
"Apache-2.0"
] | null | null | null |
spider_learn/scrapy_learn/mySpider/mySpider/spiders/cast.py
|
Fly365/py-learn
|
a7c747af70ca2d6dcf244dc89344fbd26b125987
|
[
"Apache-2.0"
] | null | null | null |
spider_learn/scrapy_learn/mySpider/mySpider/spiders/cast.py
|
Fly365/py-learn
|
a7c747af70ca2d6dcf244dc89344fbd26b125987
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
import scrapy
from scrapy_learn.mySpider.mySpider.items import CastItem
class CastSpider(scrapy.Spider):
name = 'cast'
allowed_domains = ['cast.cn']
start_urls = ['http://cast.cn/']
# 1. 负责解析返回的网页数据,提取结构化数据(生成item)
# 2. 生成下一页的URL请求
def parse(self, response):
#filename = "tea.html"
#open(filename,"w").write(response.body)
items = []
for each in response.xpath("//div[@class='li_txt']"):
# 将获取数据封装到对象
item = CastItem()
# extract() 方法返回的都是Unicode字符串
name = each.xpath("h3/text()").extract()
title = each.xpath("h4/text()").extract()
info = each.xpath("p/text()").extract()
# xpath返回的是包含一个元素的列表
item["name"] = name[0]
item["title"] = title[0]
item["info"] = info[0]
items.append(item)
return items
| 28.78125
| 61
| 0.543974
|
import scrapy
from scrapy_learn.mySpider.mySpider.items import CastItem
class CastSpider(scrapy.Spider):
name = 'cast'
allowed_domains = ['cast.cn']
start_urls = ['http://cast.cn/']
def parse(self, response):
items = []
for each in response.xpath("//div[@class='li_txt']"):
item = CastItem()
name = each.xpath("h3/text()").extract()
title = each.xpath("h4/text()").extract()
info = each.xpath("p/text()").extract()
item["name"] = name[0]
item["title"] = title[0]
item["info"] = info[0]
items.append(item)
return items
| true
| true
|
1c42881d1d279e5163eb221403b46e7740fa6127
| 275
|
py
|
Python
|
supportbot/supportbot/doctype/conversation_log/test_conversation_log.py
|
deepshig/supportbot
|
252fa5d56d5ebbff296a84585bbde05c3b549999
|
[
"MIT"
] | 1
|
2021-04-18T22:08:37.000Z
|
2021-04-18T22:08:37.000Z
|
supportbot/supportbot/doctype/conversation_log/test_conversation_log.py
|
deepshig/supportbot
|
252fa5d56d5ebbff296a84585bbde05c3b549999
|
[
"MIT"
] | null | null | null |
supportbot/supportbot/doctype/conversation_log/test_conversation_log.py
|
deepshig/supportbot
|
252fa5d56d5ebbff296a84585bbde05c3b549999
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe and Contributors
# See license.txt
from __future__ import unicode_literals
import frappe
import unittest
# test_records = frappe.get_test_records('conversation_log')
class Testconversation_log(unittest.TestCase):
pass
| 21.153846
| 60
| 0.781818
|
from __future__ import unicode_literals
import frappe
import unittest
class Testconversation_log(unittest.TestCase):
pass
| true
| true
|
1c42886a320fe961c8578ac295c288cb156c5e72
| 18,244
|
py
|
Python
|
pandaharvester/harvesterzipper/base_zipper.py
|
PanDAWMS/panda-havester
|
90aefac49bb85dc815791105e65c602979079aaa
|
[
"Apache-2.0"
] | 11
|
2017-06-01T10:16:58.000Z
|
2019-11-22T08:41:36.000Z
|
pandaharvester/harvesterzipper/base_zipper.py
|
PanDAWMS/panda-havester
|
90aefac49bb85dc815791105e65c602979079aaa
|
[
"Apache-2.0"
] | 34
|
2016-10-25T19:15:24.000Z
|
2021-03-05T12:59:04.000Z
|
pandaharvester/harvesterzipper/base_zipper.py
|
PanDAWMS/panda-havester
|
90aefac49bb85dc815791105e65c602979079aaa
|
[
"Apache-2.0"
] | 17
|
2016-10-24T13:29:45.000Z
|
2021-03-23T17:35:27.000Z
|
import os
import uuid
import time
import multiprocessing
import tempfile
import gc
from concurrent.futures import ThreadPoolExecutor as Pool
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
# base class for zipper plugin
class BaseZipper(PluginBase):
# constructor
def __init__(self, **kwarg):
self.zipDir = "${SRCDIR}"
self.zip_tmp_log = None
self.zip_jobSpec = None
PluginBase.__init__(self, **kwarg)
# zip output files
def simple_zip_output(self, jobspec, tmp_log):
tmp_log.debug('start')
self.zip_tmp_log = tmp_log
self.zip_jobSpec = jobspec
argDictList = []
try:
for fileSpec in jobspec.outFiles:
if self.zipDir == "${SRCDIR}":
# the same directory as src
zipDir = os.path.dirname(next(iter(fileSpec.associatedFiles)).path)
elif self.zipDir == "${WORKDIR}":
# work dir
workSpec = jobspec.get_workspec_list()[0]
zipDir = workSpec.get_access_point()
else:
zipDir = self.zipDir
zipPath = os.path.join(zipDir, fileSpec.lfn)
argDict = dict()
argDict['zipPath'] = zipPath
argDict['associatedFiles'] = []
for assFileSpec in fileSpec.associatedFiles:
if os.path.exists(assFileSpec.path):
argDict['associatedFiles'].append(assFileSpec.path)
else:
assFileSpec.status = 'failed'
argDictList.append(argDict)
# parallel execution
try:
nThreadsForZip = harvester_config.stager.nThreadsForZip
except Exception:
nThreadsForZip = multiprocessing.cpu_count()
with Pool(max_workers=nThreadsForZip) as pool:
retValList = pool.map(self.make_one_zip, argDictList)
# check returns
for fileSpec, retVal in zip(jobspec.outFiles, retValList):
tmpRet, errMsg, fileInfo = retVal
if tmpRet is True:
# set path
fileSpec.path = fileInfo['path']
fileSpec.fsize = fileInfo['fsize']
fileSpec.chksum = fileInfo['chksum']
msgStr = 'fileSpec.path - {0}, fileSpec.fsize - {1}, fileSpec.chksum(adler32) - {2}' \
.format(fileSpec.path, fileSpec.fsize, fileSpec.chksum)
tmp_log.debug(msgStr)
else:
tmp_log.error('got {0} with {1} when zipping {2}'.format(tmpRet, errMsg, fileSpec.lfn))
return tmpRet, 'failed to zip with {0}'.format(errMsg)
except Exception:
errMsg = core_utils.dump_error_message(tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
tmp_log.debug('done')
return True, ''
# make one zip file
def make_one_zip(self, arg_dict):
try:
zipPath = arg_dict['zipPath']
lfn = os.path.basename(zipPath)
self.zip_tmp_log.debug('{0} start zipPath={1} with {2} files'.format(lfn, zipPath,
len(arg_dict['associatedFiles'])))
# make zip if doesn't exist
if not os.path.exists(zipPath):
# tmp file names
tmpZipPath = zipPath + '.' + str(uuid.uuid4())
tmpZipPathIn = tmpZipPath + '.in'
with open(tmpZipPathIn, "w") as f:
for associatedFile in arg_dict['associatedFiles']:
f.write("{0}\n".format(associatedFile))
# make command
com = 'tar -c -f {0} -T {1} '.format(tmpZipPath, tmpZipPathIn)
com += "--transform 's/.*\///' "
# execute
p = subprocess.Popen(com,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
if retCode != 0:
msgStr = 'failed to make zip for {0} with {1}:{2}'.format(lfn, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
# avoid overwriting
lockName = 'zip.lock.{0}'.format(lfn)
lockInterval = 60
tmpStat = False
# get lock
for i in range(lockInterval):
tmpStat = self.dbInterface.get_object_lock(lockName, lock_interval=lockInterval)
if tmpStat:
break
time.sleep(1)
# failed to lock
if not tmpStat:
msgStr = 'failed to lock for {0}'.format(lfn)
self.zip_tmp_log.error(msgStr)
return None, msgStr
if not os.path.exists(zipPath):
os.rename(tmpZipPath, zipPath)
# release lock
self.dbInterface.release_object_lock(lockName)
# make return
fileInfo = dict()
fileInfo['path'] = zipPath
# get size
statInfo = os.stat(zipPath)
fileInfo['fsize'] = statInfo.st_size
fileInfo['chksum'] = core_utils.calc_adler32(zipPath)
except Exception:
errMsg = core_utils.dump_error_message(self.zip_tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
self.zip_tmp_log.debug('{0} done'.format(lfn))
return True, '', fileInfo
# zip output files; file operations are done on remote side with ssh
def ssh_zip_output(self, jobspec, tmp_log):
tmp_log.debug('start')
self.zip_tmp_log = tmp_log
self.zip_jobSpec = jobspec
argDictList = []
outFiles_list = list(jobspec.outFiles)
try:
try:
nThreadsForZip = harvester_config.stager.nThreadsForZip
except Exception:
nThreadsForZip = multiprocessing.cpu_count()
# check associate file existence
def _check_assfile_existence(fileSpec):
# ass_file_paths_str = ' '.join([ assFileSpec.path for assFileSpec in fileSpec.associatedFiles ])
# tmpfile over shared fs
tmpArgFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_check-exist.tmp',
dir=os.path.dirname(next(iter(fileSpec.associatedFiles)).path))
for assFileSpec in fileSpec.associatedFiles:
tmpArgFile.write('{0}\n'.format(assFileSpec.path))
tmpArgFile.close()
# record set
existence_set = set()
# make command
# '"for i in $(cat {arg_file}); do test -f $i && echo \'T\' || echo \'F\'; done" '
com = ( 'ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"cat {arg_file} | xargs -I%% sh -c \' test -f %% && echo T || echo F \' " '
).format(
sshkey=self.sshkey,
userhost=self.userhost,
arg_file=tmpArgFile.name,
)
# execute
p = subprocess.Popen(com,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
if retCode != 0:
msgStr = 'failed to existence of associate files with {0}:{1}'.format(stdOut, stdErr)
tmp_log.error(msgStr)
else:
try:
ret_list = stdOut.strip('\n').split('\n')
if len(fileSpec.associatedFiles) == len(ret_list):
for (assFileSpec, retVal) in zip(fileSpec.associatedFiles, ret_list):
if retVal == 'T':
existence_set.add(assFileSpec.path)
else:
msgStr = 'returned number of files inconsistent! Skipped...'
tmp_log.error(msgStr)
except Exception:
core_utils.dump_error_message(tmp_log)
os.remove(tmpArgFile.name)
del p, stdOut, stdErr
gc.collect()
return existence_set
# parallel execution of check existence
with Pool(max_workers=nThreadsForZip) as pool:
existence_set_list = pool.map(_check_assfile_existence, outFiles_list)
# loop
for fileSpec, existence_set in zip(outFiles_list, existence_set_list):
if self.zipDir == "${SRCDIR}":
# the same directory as src
zipDir = os.path.dirname(next(iter(fileSpec.associatedFiles)).path)
elif self.zipDir == "${WORKDIR}":
# work dir
workSpec = jobspec.get_workspec_list()[0]
zipDir = workSpec.get_access_point()
else:
zipDir = self.zipDir
zipPath = os.path.join(zipDir, fileSpec.lfn)
argDict = dict()
argDict['zipPath'] = zipPath
argDict['associatedFiles'] = []
# check existence of files
for assFileSpec in fileSpec.associatedFiles:
if assFileSpec.path in existence_set:
argDict['associatedFiles'].append(assFileSpec.path)
else:
assFileSpec.status = 'failed'
# append
argDictList.append(argDict)
# parallel execution of zip
with Pool(max_workers=nThreadsForZip) as pool:
retValList = pool.map(self.ssh_make_one_zip, argDictList)
# check returns
for fileSpec, retVal in zip(jobspec.outFiles, retValList):
tmpRet, errMsg, fileInfo = retVal
if tmpRet is True:
# set path
fileSpec.path = fileInfo['path']
fileSpec.fsize = fileInfo['fsize']
fileSpec.chksum = fileInfo['chksum']
msgStr = 'fileSpec.path - {0}, fileSpec.fsize - {1}, fileSpec.chksum(adler32) - {2}' \
.format(fileSpec.path, fileSpec.fsize, fileSpec.chksum)
tmp_log.debug(msgStr)
else:
tmp_log.error('got {0} with {1} when zipping {2}'.format(tmpRet, errMsg, fileSpec.lfn))
return tmpRet, 'failed to zip with {0}'.format(errMsg)
except Exception:
errMsg = core_utils.dump_error_message(tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
tmp_log.debug('done')
return True, ''
# make one zip file; file operations are done on remote side with ssh
def ssh_make_one_zip(self, arg_dict):
try:
zipPath = arg_dict['zipPath']
lfn = os.path.basename(zipPath)
self.zip_tmp_log.debug('{0} start zipPath={1} with {2} files'.format(lfn, zipPath,
len(arg_dict['associatedFiles'])))
# tmp arg file
tmpArgFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_tar-name.tmp',
dir=os.path.dirname(zipPath))
for path in arg_dict['associatedFiles']:
tmpArgFile.write('{0}\n'.format(path))
tmpArgFile.close()
# tmp zip file names
tmpZipPath = zipPath + '.' + str(uuid.uuid4())
com1 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"test -f {tmpZipPath} || tar -cf {tmpZipPath} -T {arg_file} --transform \'s;.*/;;\' "'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
tmpZipPath=tmpZipPath,
arg_file=tmpArgFile.name,
)
# execute
p1 = subprocess.Popen(com1,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p1.communicate()
retCode = p1.returncode
if retCode != 0:
msgStr = 'failed to make zip for {0} with {1}:{2}'.format(lfn, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
del p1, stdOut, stdErr
gc.collect()
os.remove(tmpArgFile.name)
# avoid overwriting
lockName = 'zip.lock.{0}'.format(lfn)
lockInterval = 60
tmpStat = False
# get lock
for i in range(lockInterval):
tmpStat = self.dbInterface.get_object_lock(lockName, lock_interval=lockInterval)
if tmpStat:
break
time.sleep(1)
# failed to lock
if not tmpStat:
msgStr = 'failed to lock for {0}'.format(lfn)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
# rename to be zipPath
com2 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"test -f {zipPath} || mv {tmpZipPath} {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
zipPath=zipPath,
tmpZipPath=tmpZipPath,
)
p2 = subprocess.Popen(com2,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2.communicate()
del p2
gc.collect()
# release lock
self.dbInterface.release_object_lock(lockName)
# make return
fileInfo = dict()
fileInfo['path'] = zipPath
# get size
# statInfo = os.stat(zipPath)
# fileInfo['fsize'] = statInfo.st_size
com3 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"stat -c %s {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
zipPath=zipPath,
)
p3 = subprocess.Popen(com3,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p3.communicate()
retCode = p3.returncode
if retCode != 0:
msgStr = 'failed to get file size of {0} with {1}:{2}'.format(zipPath, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
else:
file_size = int(stdOut.strip('\n'))
fileInfo['fsize'] = file_size
del p3, stdOut, stdErr
gc.collect()
# get checksum
# fileInfo['chksum'] = core_utils.calc_adler32(zipPath)
com4 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"{fileop_script} adler32 {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
fileop_script=self.fileop_script,
zipPath=zipPath,
)
p4 = subprocess.Popen(com4,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p4.communicate()
retCode = p4.returncode
if retCode != 0:
msgStr = 'failed to get file adler32 of {0} with {1}:{2}'.format(zipPath, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
else:
file_chksum = stdOut.strip('\n')
fileInfo['chksum'] = file_chksum
del p4, stdOut, stdErr
gc.collect()
except Exception:
errMsg = core_utils.dump_error_message(self.zip_tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
self.zip_tmp_log.debug('{0} done'.format(lfn))
return True, '', fileInfo
| 45.839196
| 119
| 0.478294
|
import os
import uuid
import time
import multiprocessing
import tempfile
import gc
from concurrent.futures import ThreadPoolExecutor as Pool
try:
import subprocess32 as subprocess
except ImportError:
import subprocess
from pandaharvester.harvesterconfig import harvester_config
from pandaharvester.harvestercore import core_utils
from pandaharvester.harvestercore.plugin_base import PluginBase
class BaseZipper(PluginBase):
def __init__(self, **kwarg):
self.zipDir = "${SRCDIR}"
self.zip_tmp_log = None
self.zip_jobSpec = None
PluginBase.__init__(self, **kwarg)
def simple_zip_output(self, jobspec, tmp_log):
tmp_log.debug('start')
self.zip_tmp_log = tmp_log
self.zip_jobSpec = jobspec
argDictList = []
try:
for fileSpec in jobspec.outFiles:
if self.zipDir == "${SRCDIR}":
zipDir = os.path.dirname(next(iter(fileSpec.associatedFiles)).path)
elif self.zipDir == "${WORKDIR}":
workSpec = jobspec.get_workspec_list()[0]
zipDir = workSpec.get_access_point()
else:
zipDir = self.zipDir
zipPath = os.path.join(zipDir, fileSpec.lfn)
argDict = dict()
argDict['zipPath'] = zipPath
argDict['associatedFiles'] = []
for assFileSpec in fileSpec.associatedFiles:
if os.path.exists(assFileSpec.path):
argDict['associatedFiles'].append(assFileSpec.path)
else:
assFileSpec.status = 'failed'
argDictList.append(argDict)
try:
nThreadsForZip = harvester_config.stager.nThreadsForZip
except Exception:
nThreadsForZip = multiprocessing.cpu_count()
with Pool(max_workers=nThreadsForZip) as pool:
retValList = pool.map(self.make_one_zip, argDictList)
for fileSpec, retVal in zip(jobspec.outFiles, retValList):
tmpRet, errMsg, fileInfo = retVal
if tmpRet is True:
fileSpec.path = fileInfo['path']
fileSpec.fsize = fileInfo['fsize']
fileSpec.chksum = fileInfo['chksum']
msgStr = 'fileSpec.path - {0}, fileSpec.fsize - {1}, fileSpec.chksum(adler32) - {2}' \
.format(fileSpec.path, fileSpec.fsize, fileSpec.chksum)
tmp_log.debug(msgStr)
else:
tmp_log.error('got {0} with {1} when zipping {2}'.format(tmpRet, errMsg, fileSpec.lfn))
return tmpRet, 'failed to zip with {0}'.format(errMsg)
except Exception:
errMsg = core_utils.dump_error_message(tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
tmp_log.debug('done')
return True, ''
def make_one_zip(self, arg_dict):
try:
zipPath = arg_dict['zipPath']
lfn = os.path.basename(zipPath)
self.zip_tmp_log.debug('{0} start zipPath={1} with {2} files'.format(lfn, zipPath,
len(arg_dict['associatedFiles'])))
if not os.path.exists(zipPath):
# tmp file names
tmpZipPath = zipPath + '.' + str(uuid.uuid4())
tmpZipPathIn = tmpZipPath + '.in'
with open(tmpZipPathIn, "w") as f:
for associatedFile in arg_dict['associatedFiles']:
f.write("{0}\n".format(associatedFile))
# make command
com = 'tar -c -f {0} -T {1} '.format(tmpZipPath, tmpZipPathIn)
com += "--transform 's/.*\///' "
# execute
p = subprocess.Popen(com,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
if retCode != 0:
msgStr = 'failed to make zip for {0} with {1}:{2}'.format(lfn, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
# avoid overwriting
lockName = 'zip.lock.{0}'.format(lfn)
lockInterval = 60
tmpStat = False
# get lock
for i in range(lockInterval):
tmpStat = self.dbInterface.get_object_lock(lockName, lock_interval=lockInterval)
if tmpStat:
break
time.sleep(1)
# failed to lock
if not tmpStat:
msgStr = 'failed to lock for {0}'.format(lfn)
self.zip_tmp_log.error(msgStr)
return None, msgStr
if not os.path.exists(zipPath):
os.rename(tmpZipPath, zipPath)
# release lock
self.dbInterface.release_object_lock(lockName)
# make return
fileInfo = dict()
fileInfo['path'] = zipPath
# get size
statInfo = os.stat(zipPath)
fileInfo['fsize'] = statInfo.st_size
fileInfo['chksum'] = core_utils.calc_adler32(zipPath)
except Exception:
errMsg = core_utils.dump_error_message(self.zip_tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
self.zip_tmp_log.debug('{0} done'.format(lfn))
return True, '', fileInfo
# zip output files; file operations are done on remote side with ssh
def ssh_zip_output(self, jobspec, tmp_log):
tmp_log.debug('start')
self.zip_tmp_log = tmp_log
self.zip_jobSpec = jobspec
argDictList = []
outFiles_list = list(jobspec.outFiles)
try:
try:
nThreadsForZip = harvester_config.stager.nThreadsForZip
except Exception:
nThreadsForZip = multiprocessing.cpu_count()
# check associate file existence
def _check_assfile_existence(fileSpec):
# ass_file_paths_str = ' '.join([ assFileSpec.path for assFileSpec in fileSpec.associatedFiles ])
# tmpfile over shared fs
tmpArgFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_check-exist.tmp',
dir=os.path.dirname(next(iter(fileSpec.associatedFiles)).path))
for assFileSpec in fileSpec.associatedFiles:
tmpArgFile.write('{0}\n'.format(assFileSpec.path))
tmpArgFile.close()
# record set
existence_set = set()
# make command
# '"for i in $(cat {arg_file}); do test -f $i && echo \'T\' || echo \'F\'; done" '
com = ( 'ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"cat {arg_file} | xargs -I%% sh -c \' test -f %% && echo T || echo F \' " '
).format(
sshkey=self.sshkey,
userhost=self.userhost,
arg_file=tmpArgFile.name,
)
# execute
p = subprocess.Popen(com,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p.communicate()
retCode = p.returncode
if retCode != 0:
msgStr = 'failed to existence of associate files with {0}:{1}'.format(stdOut, stdErr)
tmp_log.error(msgStr)
else:
try:
ret_list = stdOut.strip('\n').split('\n')
if len(fileSpec.associatedFiles) == len(ret_list):
for (assFileSpec, retVal) in zip(fileSpec.associatedFiles, ret_list):
if retVal == 'T':
existence_set.add(assFileSpec.path)
else:
msgStr = 'returned number of files inconsistent! Skipped...'
tmp_log.error(msgStr)
except Exception:
core_utils.dump_error_message(tmp_log)
os.remove(tmpArgFile.name)
del p, stdOut, stdErr
gc.collect()
return existence_set
# parallel execution of check existence
with Pool(max_workers=nThreadsForZip) as pool:
existence_set_list = pool.map(_check_assfile_existence, outFiles_list)
# loop
for fileSpec, existence_set in zip(outFiles_list, existence_set_list):
if self.zipDir == "${SRCDIR}":
# the same directory as src
zipDir = os.path.dirname(next(iter(fileSpec.associatedFiles)).path)
elif self.zipDir == "${WORKDIR}":
# work dir
workSpec = jobspec.get_workspec_list()[0]
zipDir = workSpec.get_access_point()
else:
zipDir = self.zipDir
zipPath = os.path.join(zipDir, fileSpec.lfn)
argDict = dict()
argDict['zipPath'] = zipPath
argDict['associatedFiles'] = []
# check existence of files
for assFileSpec in fileSpec.associatedFiles:
if assFileSpec.path in existence_set:
argDict['associatedFiles'].append(assFileSpec.path)
else:
assFileSpec.status = 'failed'
# append
argDictList.append(argDict)
# parallel execution of zip
with Pool(max_workers=nThreadsForZip) as pool:
retValList = pool.map(self.ssh_make_one_zip, argDictList)
# check returns
for fileSpec, retVal in zip(jobspec.outFiles, retValList):
tmpRet, errMsg, fileInfo = retVal
if tmpRet is True:
# set path
fileSpec.path = fileInfo['path']
fileSpec.fsize = fileInfo['fsize']
fileSpec.chksum = fileInfo['chksum']
msgStr = 'fileSpec.path - {0}, fileSpec.fsize - {1}, fileSpec.chksum(adler32) - {2}' \
.format(fileSpec.path, fileSpec.fsize, fileSpec.chksum)
tmp_log.debug(msgStr)
else:
tmp_log.error('got {0} with {1} when zipping {2}'.format(tmpRet, errMsg, fileSpec.lfn))
return tmpRet, 'failed to zip with {0}'.format(errMsg)
except Exception:
errMsg = core_utils.dump_error_message(tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
tmp_log.debug('done')
return True, ''
# make one zip file; file operations are done on remote side with ssh
def ssh_make_one_zip(self, arg_dict):
try:
zipPath = arg_dict['zipPath']
lfn = os.path.basename(zipPath)
self.zip_tmp_log.debug('{0} start zipPath={1} with {2} files'.format(lfn, zipPath,
len(arg_dict['associatedFiles'])))
# tmp arg file
tmpArgFile = tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='_tar-name.tmp',
dir=os.path.dirname(zipPath))
for path in arg_dict['associatedFiles']:
tmpArgFile.write('{0}\n'.format(path))
tmpArgFile.close()
# tmp zip file names
tmpZipPath = zipPath + '.' + str(uuid.uuid4())
com1 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"test -f {tmpZipPath} || tar -cf {tmpZipPath} -T {arg_file} --transform \'s;.*/;;\' "'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
tmpZipPath=tmpZipPath,
arg_file=tmpArgFile.name,
)
# execute
p1 = subprocess.Popen(com1,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p1.communicate()
retCode = p1.returncode
if retCode != 0:
msgStr = 'failed to make zip for {0} with {1}:{2}'.format(lfn, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
del p1, stdOut, stdErr
gc.collect()
os.remove(tmpArgFile.name)
# avoid overwriting
lockName = 'zip.lock.{0}'.format(lfn)
lockInterval = 60
tmpStat = False
# get lock
for i in range(lockInterval):
tmpStat = self.dbInterface.get_object_lock(lockName, lock_interval=lockInterval)
if tmpStat:
break
time.sleep(1)
# failed to lock
if not tmpStat:
msgStr = 'failed to lock for {0}'.format(lfn)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
# rename to be zipPath
com2 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"test -f {zipPath} || mv {tmpZipPath} {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
zipPath=zipPath,
tmpZipPath=tmpZipPath,
)
p2 = subprocess.Popen(com2,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
p2.communicate()
del p2
gc.collect()
# release lock
self.dbInterface.release_object_lock(lockName)
# make return
fileInfo = dict()
fileInfo['path'] = zipPath
# get size
# statInfo = os.stat(zipPath)
# fileInfo['fsize'] = statInfo.st_size
com3 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"stat -c %s {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
zipPath=zipPath,
)
p3 = subprocess.Popen(com3,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p3.communicate()
retCode = p3.returncode
if retCode != 0:
msgStr = 'failed to get file size of {0} with {1}:{2}'.format(zipPath, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
else:
file_size = int(stdOut.strip('\n'))
fileInfo['fsize'] = file_size
del p3, stdOut, stdErr
gc.collect()
# get checksum
# fileInfo['chksum'] = core_utils.calc_adler32(zipPath)
com4 = ('ssh '
'-o StrictHostKeyChecking=no '
'-i {sshkey} '
'{userhost} '
'"{fileop_script} adler32 {zipPath}"'
).format(
sshkey=self.sshkey,
userhost=self.userhost,
fileop_script=self.fileop_script,
zipPath=zipPath,
)
p4 = subprocess.Popen(com4,
shell=True,
close_fds=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdOut, stdErr = p4.communicate()
retCode = p4.returncode
if retCode != 0:
msgStr = 'failed to get file adler32 of {0} with {1}:{2}'.format(zipPath, stdOut, stdErr)
self.zip_tmp_log.error(msgStr)
return None, msgStr, {}
else:
file_chksum = stdOut.strip('\n')
fileInfo['chksum'] = file_chksum
del p4, stdOut, stdErr
gc.collect()
except Exception:
errMsg = core_utils.dump_error_message(self.zip_tmp_log)
return False, 'failed to zip with {0}'.format(errMsg)
self.zip_tmp_log.debug('{0} done'.format(lfn))
return True, '', fileInfo
| true
| true
|
1c4288efd617819e6d0091556a0abbfdca80a2e0
| 16,472
|
py
|
Python
|
python/ccxt/lbank.py
|
Kubulus1997/Trading
|
caa80b8fed76e0bdc0b6b8d99f9f27f89fa545e2
|
[
"MIT"
] | null | null | null |
python/ccxt/lbank.py
|
Kubulus1997/Trading
|
caa80b8fed76e0bdc0b6b8d99f9f27f89fa545e2
|
[
"MIT"
] | null | null | null |
python/ccxt/lbank.py
|
Kubulus1997/Trading
|
caa80b8fed76e0bdc0b6b8d99f9f27f89fa545e2
|
[
"MIT"
] | 1
|
2021-07-20T10:37:02.000Z
|
2021-07-20T10:37:02.000Z
|
# -*- coding: utf-8 -*-
# PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN:
# https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code
from ccxt.base.exchange import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
class lbank (Exchange):
def describe(self):
return self.deep_extend(super(lbank, self).describe(), {
'id': 'lbank',
'name': 'LBank',
'countries': 'CN',
'version': 'v1',
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
},
'timeframes': {
'1m': 'minute1',
'5m': 'minute5',
'15m': 'minute15',
'30m': 'minute30',
'1h': 'hour1',
'2h': 'hour2',
'4h': 'hour4',
'6h': 'hour6',
'8h': 'hour8',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38063602-9605e28a-3302-11e8-81be-64b1e53c4cfb.jpg',
'api': 'https://api.lbank.info',
'www': 'https://www.lbank.info',
'doc': 'https://www.lbank.info/api/api-overview',
'fees': 'https://lbankinfo.zendesk.com/hc/zh-cn/articles/115002295114--%E8%B4%B9%E7%8E%87%E8%AF%B4%E6%98%8E',
},
'api': {
'public': {
'get': [
'currencyPairs',
'ticker',
'depth',
'trades',
'kline',
],
},
'private': {
'post': [
'user_info',
'create_order',
'cancel_order',
'orders_info',
'orders_info_history',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'withdraw': {
'BTC': None,
'ZEC': 0.01,
'ETH': 0.01,
'ETC': 0.01,
# 'QTUM': amount => max(0.01, amount * (0.1 / 100)),
'VEN': 10.0,
'BCH': 0.0002,
'SC': 50.0,
'BTM': 20.0,
'NAS': 1.0,
'EOS': 1.0,
'XWC': 5.0,
'BTS': 1.0,
'INK': 10.0,
'BOT': 3.0,
'YOYOW': 15.0,
'TGC': 10.0,
'NEO': 0.0,
'CMT': 20.0,
'SEER': 2000.0,
'FIL': None,
'BTG': None,
},
},
},
})
def fetch_markets(self):
markets = self.publicGetCurrencyPairs()
result = []
for i in range(0, len(markets)):
id = markets[i]
baseId, quoteId = id.split('_')
base = self.common_currency_code(baseId.upper())
quote = self.common_currency_code(quoteId.upper())
symbol = '/'.join([base, quote])
precision = {
'amount': 8,
'price': 8,
}
lot = math.pow(10, -precision['amount'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'lot': lot,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': id,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = ticker['timestamp']
info = ticker
ticker = info['ticker']
last = self.safe_float(ticker, 'latest')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': self.safe_float(ticker, 'change'),
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': info,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(self.extend({
'symbol': 'all',
}, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker['symbol']
market = self.marketsById[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_order_book(self, symbol, limit=60, params={}):
self.load_markets()
response = self.publicGetDepth(self.extend({
'symbol': self.market_id(symbol),
'size': min(limit, 60),
}, params))
return self.parse_order_book(response)
def parse_trade(self, trade, market=None):
symbol = market['symbol']
timestamp = int(trade['date_ms'])
price = float(trade['price'])
amount = float(trade['amount'])
cost = self.cost_to_precision(symbol, price * amount)
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(trade, 'tid'),
'order': None,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': self.safe_value(trade, 'info', trade),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': 100,
}
if since:
request['time'] = int(since / 1000)
if limit:
request['size'] = limit
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
'size': 1000,
}
if since:
request['time'] = int(since / 1000)
if limit:
request['size'] = limit
response = self.publicGetKline(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserInfo(params)
result = {'info': response}
ids = list(self.extend(response['info']['free'], response['info']['freeze']).keys())
for i in range(0, len(ids)):
id = ids[i]
code = id
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
free = self.safe_float(response['info']['free'], id, 0.0)
used = self.safe_float(response['info']['freeze'], id, 0.0)
account = {
'free': free,
'used': used,
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
symbol = self.safe_value(self.marketsById, order['symbol'], {'symbol': None})
timestamp = self.safe_integer(order, 'create_time')
# Limit Order Request Returns: Order Price
# Market Order Returns: cny amount of market order
price = float(order['price'])
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
cost = filled * self.safe_float(order, 'avg_price')
status = self.safe_integer(order, 'status')
if status == -1 or status == 4:
status = 'canceled'
elif status == 2:
status = 'closed'
else:
status = 'open'
return {
'id': self.safe_string(order, 'order_id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': self.safe_string(order, 'order_type'),
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'filled': None,
'remaining': None,
'trades': None,
'fee': None,
'info': self.safe_value(order, 'info', order),
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'type': side,
'amount': amount,
}
if type == 'market':
order['type'] += '_market'
else:
order['price'] = price
response = self.privatePostCreateOrder(self.extend(order, params))
order = self.omit(order, 'type')
order['order_id'] = response['order_id']
order['type'] = side
order['order_type'] = type
order['create_time'] = self.milliseconds()
order['info'] = response
order = self.parse_order(order, market)
id = order['id']
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostCancelOrder(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostOrdersInfo(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
return self.parse_order(response['orders'][0], market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostOrdersInfoHistory(self.extend({
'symbol': market['id'],
'current_page': 1,
'page_length': 100,
}, params))
return self.parse_orders(response['orders'], None, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.fetch_orders(self.extend({
'status': 0,
}, params))
return response
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.fetch_orders(self.extend({
'status': 1,
}, params))
return response
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
# Every endpoint ends with ".do"
url += '.do'
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
success = self.safe_string(response, 'result')
if success == 'false':
errorCode = self.safe_string(response, 'error_code')
message = self.safe_string({
'10000': 'Internal error',
'10001': 'The required parameters can not be empty',
'10002': 'verification failed',
'10003': 'Illegal parameters',
'10004': 'User requests are too frequent',
'10005': 'Key does not exist',
'10006': 'user does not exist',
'10007': 'Invalid signature',
'10008': 'This currency pair is not supported',
'10009': 'Limit orders can not be missing orders and the number of orders',
'10010': 'Order price or order quantity must be greater than 0',
'10011': 'Market orders can not be missing the amount of the order',
'10012': 'market sell orders can not be missing orders',
'10013': 'is less than the minimum trading position 0.001',
'10014': 'Account number is not enough',
'10015': 'The order type is wrong',
'10016': 'Account balance is not enough',
'10017': 'Abnormal server',
'10018': 'order inquiry can not be more than 50 less than one',
'10019': 'withdrawal orders can not be more than 3 less than one',
'10020': 'less than the minimum amount of the transaction limit of 0.001',
}, errorCode, self.json(response))
ErrorClass = self.safe_value({
'10002': AuthenticationError,
'10004': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'10009': InvalidOrder,
'10010': InvalidOrder,
'10011': InvalidOrder,
'10012': InvalidOrder,
'10013': InvalidOrder,
'10014': InvalidOrder,
'10015': InvalidOrder,
'10016': InvalidOrder,
}, errorCode, ExchangeError)
raise ErrorClass(message)
return response
| 37.52164
| 126
| 0.477598
|
ge import Exchange
import math
from ccxt.base.errors import ExchangeError
from ccxt.base.errors import AuthenticationError
from ccxt.base.errors import InvalidOrder
from ccxt.base.errors import DDoSProtection
class lbank (Exchange):
def describe(self):
return self.deep_extend(super(lbank, self).describe(), {
'id': 'lbank',
'name': 'LBank',
'countries': 'CN',
'version': 'v1',
'has': {
'fetchTickers': True,
'fetchOHLCV': True,
'fetchOrder': True,
'fetchOrders': True,
'fetchOpenOrders': True,
'fetchClosedOrders': True,
},
'timeframes': {
'1m': 'minute1',
'5m': 'minute5',
'15m': 'minute15',
'30m': 'minute30',
'1h': 'hour1',
'2h': 'hour2',
'4h': 'hour4',
'6h': 'hour6',
'8h': 'hour8',
'12h': 'hour12',
'1d': 'day1',
'1w': 'week1',
},
'urls': {
'logo': 'https://user-images.githubusercontent.com/1294454/38063602-9605e28a-3302-11e8-81be-64b1e53c4cfb.jpg',
'api': 'https://api.lbank.info',
'www': 'https://www.lbank.info',
'doc': 'https://www.lbank.info/api/api-overview',
'fees': 'https://lbankinfo.zendesk.com/hc/zh-cn/articles/115002295114--%E8%B4%B9%E7%8E%87%E8%AF%B4%E6%98%8E',
},
'api': {
'public': {
'get': [
'currencyPairs',
'ticker',
'depth',
'trades',
'kline',
],
},
'private': {
'post': [
'user_info',
'create_order',
'cancel_order',
'orders_info',
'orders_info_history',
],
},
},
'fees': {
'trading': {
'maker': 0.1 / 100,
'taker': 0.1 / 100,
},
'funding': {
'withdraw': {
'BTC': None,
'ZEC': 0.01,
'ETH': 0.01,
'ETC': 0.01,
'VEN': 10.0,
'BCH': 0.0002,
'SC': 50.0,
'BTM': 20.0,
'NAS': 1.0,
'EOS': 1.0,
'XWC': 5.0,
'BTS': 1.0,
'INK': 10.0,
'BOT': 3.0,
'YOYOW': 15.0,
'TGC': 10.0,
'NEO': 0.0,
'CMT': 20.0,
'SEER': 2000.0,
'FIL': None,
'BTG': None,
},
},
},
})
def fetch_markets(self):
markets = self.publicGetCurrencyPairs()
result = []
for i in range(0, len(markets)):
id = markets[i]
baseId, quoteId = id.split('_')
base = self.common_currency_code(baseId.upper())
quote = self.common_currency_code(quoteId.upper())
symbol = '/'.join([base, quote])
precision = {
'amount': 8,
'price': 8,
}
lot = math.pow(10, -precision['amount'])
result.append({
'id': id,
'symbol': symbol,
'base': base,
'quote': quote,
'baseId': baseId,
'quoteId': quoteId,
'active': True,
'lot': lot,
'precision': precision,
'limits': {
'amount': {
'min': lot,
'max': None,
},
'price': {
'min': math.pow(10, -precision['price']),
'max': math.pow(10, precision['price']),
},
'cost': {
'min': None,
'max': None,
},
},
'info': id,
})
return result
def parse_ticker(self, ticker, market=None):
symbol = market['symbol']
timestamp = ticker['timestamp']
info = ticker
ticker = info['ticker']
last = self.safe_float(ticker, 'latest')
return {
'symbol': symbol,
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'high': self.safe_float(ticker, 'high'),
'low': self.safe_float(ticker, 'low'),
'bid': None,
'bidVolume': None,
'ask': None,
'askVolume': None,
'vwap': None,
'open': None,
'close': last,
'last': last,
'previousClose': None,
'change': self.safe_float(ticker, 'change'),
'percentage': None,
'average': None,
'baseVolume': self.safe_float(ticker, 'vol'),
'quoteVolume': None,
'info': info,
}
def fetch_ticker(self, symbol, params={}):
self.load_markets()
market = self.market(symbol)
response = self.publicGetTicker(self.extend({
'symbol': market['id'],
}, params))
return self.parse_ticker(response, market)
def fetch_tickers(self, symbols=None, params={}):
self.load_markets()
tickers = self.publicGetTicker(self.extend({
'symbol': 'all',
}, params))
result = {}
for i in range(0, len(tickers)):
ticker = tickers[i]
id = ticker['symbol']
market = self.marketsById[id]
symbol = market['symbol']
result[symbol] = self.parse_ticker(ticker, market)
return result
def fetch_order_book(self, symbol, limit=60, params={}):
self.load_markets()
response = self.publicGetDepth(self.extend({
'symbol': self.market_id(symbol),
'size': min(limit, 60),
}, params))
return self.parse_order_book(response)
def parse_trade(self, trade, market=None):
symbol = market['symbol']
timestamp = int(trade['date_ms'])
price = float(trade['price'])
amount = float(trade['amount'])
cost = self.cost_to_precision(symbol, price * amount)
return {
'timestamp': timestamp,
'datetime': self.iso8601(timestamp),
'symbol': symbol,
'id': self.safe_string(trade, 'tid'),
'order': None,
'type': None,
'side': trade['type'],
'price': price,
'amount': amount,
'cost': float(cost),
'fee': None,
'info': self.safe_value(trade, 'info', trade),
}
def fetch_trades(self, symbol, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'size': 100,
}
if since:
request['time'] = int(since / 1000)
if limit:
request['size'] = limit
response = self.publicGetTrades(self.extend(request, params))
return self.parse_trades(response, market, since, limit)
def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
request = {
'symbol': market['id'],
'type': self.timeframes[timeframe],
'size': 1000,
}
if since:
request['time'] = int(since / 1000)
if limit:
request['size'] = limit
response = self.publicGetKline(self.extend(request, params))
return self.parse_ohlcvs(response, market, timeframe, since, limit)
def fetch_balance(self, params={}):
self.load_markets()
response = self.privatePostUserInfo(params)
result = {'info': response}
ids = list(self.extend(response['info']['free'], response['info']['freeze']).keys())
for i in range(0, len(ids)):
id = ids[i]
code = id
if id in self.currencies_by_id:
code = self.currencies_by_id[id]['code']
free = self.safe_float(response['info']['free'], id, 0.0)
used = self.safe_float(response['info']['freeze'], id, 0.0)
account = {
'free': free,
'used': used,
'total': 0.0,
}
account['total'] = self.sum(account['free'], account['used'])
result[code] = account
return self.parse_balance(result)
def parse_order(self, order, market=None):
symbol = self.safe_value(self.marketsById, order['symbol'], {'symbol': None})
timestamp = self.safe_integer(order, 'create_time')
price = float(order['price'])
amount = self.safe_float(order, 'amount')
filled = self.safe_float(order, 'deal_amount')
cost = filled * self.safe_float(order, 'avg_price')
status = self.safe_integer(order, 'status')
if status == -1 or status == 4:
status = 'canceled'
elif status == 2:
status = 'closed'
else:
status = 'open'
return {
'id': self.safe_string(order, 'order_id'),
'datetime': self.iso8601(timestamp),
'timestamp': timestamp,
'status': status,
'symbol': symbol,
'type': self.safe_string(order, 'order_type'),
'side': order['type'],
'price': price,
'cost': cost,
'amount': amount,
'filled': None,
'remaining': None,
'trades': None,
'fee': None,
'info': self.safe_value(order, 'info', order),
}
def create_order(self, symbol, type, side, amount, price=None, params={}):
self.load_markets()
market = self.market(symbol)
order = {
'symbol': market['id'],
'type': side,
'amount': amount,
}
if type == 'market':
order['type'] += '_market'
else:
order['price'] = price
response = self.privatePostCreateOrder(self.extend(order, params))
order = self.omit(order, 'type')
order['order_id'] = response['order_id']
order['type'] = side
order['order_type'] = type
order['create_time'] = self.milliseconds()
order['info'] = response
order = self.parse_order(order, market)
id = order['id']
self.orders[id] = order
return order
def cancel_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostCancelOrder(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
return response
def fetch_order(self, id, symbol=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostOrdersInfo(self.extend({
'symbol': market['id'],
'order_id': id,
}, params))
return self.parse_order(response['orders'][0], market)
def fetch_orders(self, symbol=None, since=None, limit=None, params={}):
self.load_markets()
market = self.market(symbol)
response = self.privatePostOrdersInfoHistory(self.extend({
'symbol': market['id'],
'current_page': 1,
'page_length': 100,
}, params))
return self.parse_orders(response['orders'], None, since, limit)
def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.fetch_orders(self.extend({
'status': 0,
}, params))
return response
def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}):
response = self.fetch_orders(self.extend({
'status': 1,
}, params))
return response
def sign(self, path, api='public', method='GET', params={}, headers=None, body=None):
query = self.omit(params, self.extract_params(path))
url = self.urls['api'] + '/' + self.version + '/' + self.implode_params(path, params)
url += '.do'
if api == 'public':
if query:
url += '?' + self.urlencode(query)
else:
self.check_required_credentials()
query = self.keysort(self.extend({
'api_key': self.apiKey,
}, params))
queryString = self.rawencode(query) + '&secret_key=' + self.secret
query['sign'] = self.hash(self.encode(queryString)).upper()
body = self.urlencode(query)
headers = {'Content-Type': 'application/x-www-form-urlencoded'}
return {'url': url, 'method': method, 'body': body, 'headers': headers}
def request(self, path, api='public', method='GET', params={}, headers=None, body=None):
response = self.fetch2(path, api, method, params, headers, body)
success = self.safe_string(response, 'result')
if success == 'false':
errorCode = self.safe_string(response, 'error_code')
message = self.safe_string({
'10000': 'Internal error',
'10001': 'The required parameters can not be empty',
'10002': 'verification failed',
'10003': 'Illegal parameters',
'10004': 'User requests are too frequent',
'10005': 'Key does not exist',
'10006': 'user does not exist',
'10007': 'Invalid signature',
'10008': 'This currency pair is not supported',
'10009': 'Limit orders can not be missing orders and the number of orders',
'10010': 'Order price or order quantity must be greater than 0',
'10011': 'Market orders can not be missing the amount of the order',
'10012': 'market sell orders can not be missing orders',
'10013': 'is less than the minimum trading position 0.001',
'10014': 'Account number is not enough',
'10015': 'The order type is wrong',
'10016': 'Account balance is not enough',
'10017': 'Abnormal server',
'10018': 'order inquiry can not be more than 50 less than one',
'10019': 'withdrawal orders can not be more than 3 less than one',
'10020': 'less than the minimum amount of the transaction limit of 0.001',
}, errorCode, self.json(response))
ErrorClass = self.safe_value({
'10002': AuthenticationError,
'10004': DDoSProtection,
'10005': AuthenticationError,
'10006': AuthenticationError,
'10007': AuthenticationError,
'10009': InvalidOrder,
'10010': InvalidOrder,
'10011': InvalidOrder,
'10012': InvalidOrder,
'10013': InvalidOrder,
'10014': InvalidOrder,
'10015': InvalidOrder,
'10016': InvalidOrder,
}, errorCode, ExchangeError)
raise ErrorClass(message)
return response
| true
| true
|
1c42892c331158d5d88c0c7c4c0962617cdcf447
| 485
|
py
|
Python
|
examples/scan/plot_gs_energy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 2
|
2018-12-21T19:41:10.000Z
|
2019-11-25T15:26:27.000Z
|
examples/scan/plot_gs_energy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | 73
|
2017-12-22T13:30:16.000Z
|
2022-02-22T04:21:14.000Z
|
examples/scan/plot_gs_energy.py
|
f-koehler/mlxtk
|
373aed06ab23ab9b70cd99e160228c50b87e939a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os.path
import matplotlib.pyplot as plt
import numpy
import mlxtk
import mlxtk.inout.output
parameters = mlxtk.load_scan("harmonic_trap_scan")
def load_gs_energy(idx, path, parameter):
data = mlxtk.inout.output.read_output(os.path.join(path, "gs_relax/output"))
return parameter.m, data[2][-1]
data = numpy.array(parameters.foreach(load_gs_energy))
plt.plot(data[:, 0], data[:, 1], marker=".")
plt.xlabel("$m$")
plt.ylabel("$E$")
plt.show()
| 21.086957
| 80
| 0.719588
|
import os.path
import matplotlib.pyplot as plt
import numpy
import mlxtk
import mlxtk.inout.output
parameters = mlxtk.load_scan("harmonic_trap_scan")
def load_gs_energy(idx, path, parameter):
data = mlxtk.inout.output.read_output(os.path.join(path, "gs_relax/output"))
return parameter.m, data[2][-1]
data = numpy.array(parameters.foreach(load_gs_energy))
plt.plot(data[:, 0], data[:, 1], marker=".")
plt.xlabel("$m$")
plt.ylabel("$E$")
plt.show()
| true
| true
|
1c4289735bfd091d0116b5f9487f65562d2b9827
| 556
|
py
|
Python
|
Server/ChatBot/venv/Lib/site-packages/numpy/_distributor_init.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | 1
|
2020-02-23T21:19:50.000Z
|
2020-02-23T21:19:50.000Z
|
Server/ChatBot/venv/Lib/site-packages/numpy/_distributor_init.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | null | null | null |
Server/ChatBot/venv/Lib/site-packages/numpy/_distributor_init.py
|
sozuer53/BBC
|
31bb128cb1e1a19db955fd673d67cf0e92bac3a4
|
[
"Apache-2.0"
] | 6
|
2020-04-13T15:33:30.000Z
|
2020-06-21T19:26:55.000Z
|
""" Initialization specific to SSE2 builds of BLAS / LAPACK libs
"""
# Add check for SSE2 on Windows
try:
from ctypes import windll, wintypes
except (ImportError, ValueError):
pass
else:
has_feature = windll.kernel32.IsProcessorFeaturePresent
has_feature.argtypes = [wintypes.DWORD]
if not has_feature(10):
msg = ("This version of numpy needs a CPU capable of SSE2, "
"but Windows says that is not so.\n",
"Please reinstall numpy using a different distribution")
raise RuntimeError(msg)
| 32.705882
| 72
| 0.679856
|
try:
from ctypes import windll, wintypes
except (ImportError, ValueError):
pass
else:
has_feature = windll.kernel32.IsProcessorFeaturePresent
has_feature.argtypes = [wintypes.DWORD]
if not has_feature(10):
msg = ("This version of numpy needs a CPU capable of SSE2, "
"but Windows says that is not so.\n",
"Please reinstall numpy using a different distribution")
raise RuntimeError(msg)
| true
| true
|
1c428a469d716395d9ececbaae305a7cce0e5772
| 126,893
|
py
|
Python
|
keystone/tests/test_backend_ldap.py
|
BMDan/keystone
|
39de8b0a0a34c1645b607449fc1247d5cc11d89d
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/test_backend_ldap.py
|
BMDan/keystone
|
39de8b0a0a34c1645b607449fc1247d5cc11d89d
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/test_backend_ldap.py
|
BMDan/keystone
|
39de8b0a0a34c1645b607449fc1247d5cc11d89d
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2012 OpenStack Foundation
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import uuid
import ldap
import mock
from testtools import matchers
from keystone import assignment
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import identity
from keystone.identity.mapping_backends import mapping as map
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests import fakeldap
from keystone.tests import identity_mapping as mapping_sql
from keystone.tests.ksfixtures import database
from keystone.tests import test_backend
CONF = config.CONF
def create_group_container(identity_api):
# Create the groups base entry (ou=Groups,cn=example,cn=com)
group_api = identity_api.driver.group
conn = group_api.get_connection()
dn = 'ou=Groups,cn=example,cn=com'
conn.add_s(dn, [('objectclass', ['organizationalUnit']),
('ou', ['Groups'])])
class BaseLDAPIdentity(test_backend.IdentityTests):
def setUp(self):
super(BaseLDAPIdentity, self).setUp()
self.clear_database()
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
self.addCleanup(common_ldap_core._HANDLERS.clear)
def _get_domain_fixture(self):
"""Domains in LDAP are read-only, so just return the static one."""
return self.assignment_api.get_domain(CONF.identity.default_domain_id)
def clear_database(self):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
def reload_backends(self, domain_id):
# Only one backend unless we are using separate domain backends
self.load_backends()
def get_config(self, domain_id):
# Only one conf structure unless we are using separate domain backends
return CONF
def config_overrides(self):
super(BaseLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def config_files(self):
config_files = super(BaseLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def get_user_enabled_vals(self, user):
user_dn = (
self.identity_api.driver.user._id_to_dn_string(user['id']))
enabled_attr_name = CONF.ldap.user_enabled_attribute
ldap_ = self.identity_api.driver.user.get_connection()
res = ldap_.search_s(user_dn,
ldap.SCOPE_BASE,
u'(sn=%s)' % user['name'])
if enabled_attr_name in res[0][1]:
return res[0][1][enabled_attr_name]
else:
return None
def test_build_tree(self):
"""Regression test for building the tree names
"""
user_api = identity.backends.ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn)
def test_configurable_allowed_user_actions(self):
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
user = self.identity_api.create_user(user)
self.identity_api.get_user(user['id'])
user['password'] = u'fäképass2'
self.identity_api.update_user(user['id'], user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_configurable_forbidden_user_actions(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
conf.ldap.user_allow_update = False
conf.ldap.user_allow_delete = False
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
user)
self.user_foo['password'] = u'fäképass2'
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user,
self.user_foo['id'],
self.user_foo)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.delete_user,
self.user_foo['id'])
def test_configurable_forbidden_create_existing_user(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
self.reload_backends(CONF.identity.default_domain_id)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
self.user_foo)
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
self.assertDictEqual(user_ref, self.user_foo)
conf = self.get_config(user_ref['domain_id'])
conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
self.reload_backends(user_ref['domain_id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
self.user_foo['id'])
def test_remove_role_grant_from_user_and_project(self):
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_project(self):
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'enabled': True,
'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual([], roles_ref)
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertNotEmpty(roles_ref)
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_correct_role_grant_from_a_mix(self):
self.skipTest('Blocked by bug 1101287')
def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_group_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_user_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_multi_role_grant_by_user_group_on_project_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_role_with_user_and_group_grants(self):
self.skipTest('Blocked by bug 1101287')
def test_delete_user_with_group_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_group_with_user_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_user(self):
domain = self._get_domain_fixture()
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(0))
# new grant(user1, role_member, tenant_bar)
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# new grant(user1, role_member, tenant_baz)
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(2))
# Now, check number of projects through groups
user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user2 = self.identity_api.create_user(user2)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user2['id'], group1['id'])
# new grant(group1(user2), role_member, tenant_bar)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# new grant(group1(user2), role_member, tenant_baz)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
# new grant(group1(user2), role_other, tenant_bar)
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_other['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
def test_list_projects_for_user_and_groups(self):
domain = self._get_domain_fixture()
# Create user1
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
# Create new group for user1
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
# Add user1 to group1
self.identity_api.add_user_to_group(user1['id'], group1['id'])
# Now, add grant to user1 and group1 in tenant_bar
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# The result is user1 has only one project granted
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
# Now, delete user1 grant into tenant_bar and check
self.assignment_api.delete_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
# The result is user1 has only one project granted.
# Granted through group1.
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
def test_list_projects_for_user_with_grants(self):
domain = self._get_domain_fixture()
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group2 = self.identity_api.create_group(group2)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project2['id'], project2)
self.identity_api.add_user_to_group(new_user['id'],
group1['id'])
self.identity_api.add_user_to_group(new_user['id'],
group2['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
project_id=project2['id'],
role_id=self.role_admin['id'])
user_projects = self.assignment_api.list_projects_for_user(
new_user['id'])
self.assertEqual(3, len(user_projects))
def test_create_duplicate_user_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_project_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_group_name_in_different_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_user_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_user_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_group_between_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_group_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_get_roles_for_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_roles_for_groups_on_domain(self):
self.skipTest('Blocked by bug: 1390125')
def test_get_roles_for_groups_on_project(self):
self.skipTest('Blocked by bug: 1390125')
def test_list_domains_for_groups(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_groups(self):
self.skipTest('Blocked by bug: 1390125')
def test_domain_delete_hierarchy(self):
self.skipTest('Domains are read-only against LDAP')
def test_list_role_assignments_unfiltered(self):
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.assignment_api.create_project(new_project['id'], new_project)
# First check how many role grant already exist
existing_assignments = len(self.assignment_api.list_role_assignments())
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=new_project['id'],
role_id='admin')
# Read back the list of assignments - check it is gone up by 2
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(existing_assignments + 2, after_assignments)
def test_list_role_assignments_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.assignment_api.create_project(new_project['id'], new_project)
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
# Read back the list of assignments and ensure
# that the LDAP dumb member isn't listed.
assignment_ids = [a['user_id'] for a in
self.assignment_api.list_role_assignments()]
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, assignment_ids)
def test_list_user_ids_for_project_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': test_backend.DEFAULT_DOMAIN_ID}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
user_ids = self.assignment_api.list_user_ids_for_project(
self.tenant_baz['id'])
self.assertIn(user['id'], user_ids)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_multi_group_grants_on_project_domain(self):
self.skipTest('Blocked by bug 1101287')
def test_list_group_members_missing_entry(self):
"""List group members with deleted user.
If a group has a deleted entry for a member, the non-deleted members
are returned.
"""
# Create a group
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a couple of users and add them to the group.
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_1_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_1_id, group_id)
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_2_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_2_id, group_id)
# Delete user 2
# NOTE(blk-u): need to go directly to user interface to keep from
# updating the group.
unused, driver, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(user_2_id))
driver.user.delete(entity_id)
# List group users and verify only user 1.
res = self.identity_api.list_users_in_group(group_id)
self.assertEqual(1, len(res), "Expected 1 entry (user_1)")
self.assertEqual(user_1_id, res[0]['id'], "Expected user 1 id")
def test_list_group_members_when_no_members(self):
# List group members when there is no member in the group.
# No exception should be raised.
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
# If this doesn't raise, then the test is successful.
self.identity_api.list_users_in_group(group['id'])
def test_list_group_members_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# Create a group
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a user
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_id = self.identity_api.create_user(user)['id']
# Add user to the group
self.identity_api.add_user_to_group(user_id, group_id)
user_ids = self.identity_api.list_users_in_group(group_id)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual(
[assignment.calc_default_domain()],
domains)
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains changes is the new default_domain_id.
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(new_domain_id, domains[0]['id'])
def test_authenticate_requires_simple_bind(self):
user = {
'name': 'NO_META',
'domain_id': test_backend.DEFAULT_DOMAIN_ID,
'password': 'no_meta2',
'enabled': True,
}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
driver = self.identity_api._select_identity_driver(
user['domain_id'])
driver.user.LDAP_USER = None
driver.user.LDAP_PASSWORD = None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=user['id'],
password=None)
# (spzala)The group and domain crud tests below override the standard ones
# in test_backend.py so that we can exclude the update name test, since we
# do not yet support the update of either group or domain names with LDAP.
# In the tests below, the update is demonstrated by updating description.
# Refer to bug 1136403 for more detail.
def test_group_crud(self):
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
group['description'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
self.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
def test_create_user_none_mapping(self):
# When create a user where an attribute maps to None, the entry is
# created without that attribute and it doesn't fail with a TypeError.
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'default_project_id': 'maps_to_none',
}
# If this doesn't raise, then the test is successful.
user = self.identity_api.create_user(user)
def test_unignored_user_none_mapping(self):
# Ensure that an attribute that maps to None that is not explicitly
# ignored in configuration is implicitly ignored without triggering
# an error.
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
}
user_ref = self.identity_api.create_user(user)
# If this doesn't raise, then the test is successful.
self.identity_api.get_user(user_ref['id'])
def test_update_user_name(self):
"""A user's name cannot be changed through the LDAP driver."""
self.assertRaises(exception.Conflict,
super(BaseLDAPIdentity, self).test_update_user_name)
def test_arbitrary_attributes_are_returned_from_get_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_new_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_user_id_comma(self):
"""Even if the user has a , in their ID, groups can be listed."""
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer.
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
user = self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
# Create a group
group_id = uuid.uuid4().hex
group = {
'id': group_id,
'name': self.getUniqueString(prefix='tuidc'),
'description': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
group = self.identity_api.driver.create_group(group_id, group)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_groups()
public_group_id = None
for ref in ref_list:
if ref['name'] == group['name']:
public_group_id = ref['id']
break
# Put the user in the group
self.identity_api.add_user_to_group(public_user_id, public_group_id)
# List groups for user.
ref_list = self.identity_api.list_groups_for_user(public_user_id)
group['id'] = public_group_id
self.assertThat(ref_list, matchers.Equals([group]))
def test_user_id_comma_grants(self):
"""Even if the user has a , in their ID, can get user and group grants.
"""
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
# direct into the driver and bypass the manager layer
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
# Public ID for it.
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
# Grant the user a role on a project.
role_id = 'member'
project_id = self.tenant_baz['id']
self.assignment_api.create_grant(role_id, user_id=public_user_id,
project_id=project_id)
role_ref = self.assignment_api.get_grant(role_id,
user_id=public_user_id,
project_id=project_id)
self.assertEqual(role_id, role_ref['id'])
def test_user_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for users, users cannot be disabled.
self.config_fixture.config(group='ldap',
user_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# Attempt to disable the user.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user, self.user_foo['id'],
{'enabled': False})
user_info = self.identity_api.get_user(self.user_foo['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
# ref.
self.assertNotIn('enabled', user_info)
def test_group_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for groups, groups cannot be disabled.
self.config_fixture.config(group='ldap',
group_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# There's no group fixture so create a group.
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
# Attempt to disable the group.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_group, new_group['id'],
{'enabled': False})
group_info = self.identity_api.get_group(new_group['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
# ref.
self.assertNotIn('enabled', group_info)
def test_project_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for projects, projects cannot be disabled.
self.config_fixture.config(group='ldap',
project_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# Attempt to disable the project.
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_project,
self.tenant_baz['id'], {'enabled': False})
project_info = self.assignment_api.get_project(self.tenant_baz['id'])
# Unlike other entities, if 'enabled' is ignored then 'enabled' is
# returned as part of the ref.
self.assertIs(True, project_info['enabled'])
class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def setUp(self):
# NOTE(dstanek): The database must be setup prior to calling the
# parent's setUp. The parent's setUp uses services (like
# credentials) that require a database.
self.useFixture(database.Database())
super(LDAPIdentity, self).setUp()
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def test_configurable_allowed_project_actions(self):
tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True}
self.assignment_api.create_project(u'fäké1', tenant)
tenant_ref = self.assignment_api.get_project(u'fäké1')
self.assertEqual(u'fäké1', tenant_ref['id'])
tenant['enabled'] = False
self.assignment_api.update_project(u'fäké1', tenant)
self.assignment_api.delete_project(u'fäké1')
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
u'fäké1')
def test_configurable_subtree_delete(self):
self.config_fixture.config(group='ldap', allow_subtree_delete=True)
self.load_backends()
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.assignment_api.create_project(project1['id'], project1)
role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role1['id'], role1)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role1['id'])
self.assignment_api.delete_project(project1['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project1['id'])
self.assignment_api.create_project(project1['id'], project1)
list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(0, len(list))
def test_configurable_forbidden_project_actions(self):
self.config_fixture.config(
group='ldap', project_allow_create=False,
project_allow_update=False, project_allow_delete=False)
self.load_backends()
tenant = {'id': u'fäké1', 'name': u'fäké1'}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_project,
u'fäké1',
tenant)
self.tenant_bar['enabled'] = False
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_project,
self.tenant_bar['id'],
self.tenant_bar)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_project,
self.tenant_bar['id'])
def test_configurable_allowed_role_actions(self):
role = {'id': u'fäké1', 'name': u'fäké1'}
self.assignment_api.create_role(u'fäké1', role)
role_ref = self.assignment_api.get_role(u'fäké1')
self.assertEqual(u'fäké1', role_ref['id'])
role['name'] = u'fäké2'
self.assignment_api.update_role(u'fäké1', role)
self.assignment_api.delete_role(u'fäké1')
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
u'fäké1')
def test_configurable_forbidden_role_actions(self):
self.config_fixture.config(
group='ldap', role_allow_create=False, role_allow_update=False,
role_allow_delete=False)
self.load_backends()
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_role,
role['id'],
role)
self.role_member['name'] = uuid.uuid4().hex
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_role,
self.role_member['id'],
self.role_member)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_role,
self.role_member['id'])
def test_project_filter(self):
tenant_ref = self.assignment_api.get_project(self.tenant_bar['id'])
self.assertDictEqual(tenant_ref, self.tenant_bar)
self.config_fixture.config(group='ldap',
project_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.project_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assignment_api.get_role(self.role_member['id'])
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_bar['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
self.tenant_bar['id'])
def test_role_filter(self):
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertDictEqual(role_ref, self.role_member)
self.config_fixture.config(group='ldap',
role_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_filter will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
self.role_member['id'])
def test_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
dumb_id)
def test_project_attribute_mapping(self):
self.config_fixture.config(
group='ldap', project_name_attribute='ou',
project_desc_attribute='description',
project_enabled_attribute='enabled')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_name_attribute,
# CONF.ldap.project_desc_attribute, and
# CONF.ldap.project_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['name'])
self.assertEqual(
self.tenant_baz['description'],
tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
self.config_fixture.config(group='ldap',
project_name_attribute='description',
project_desc_attribute='ou')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.project_name_attribute,
# CONF.ldap.project_desc_attribute, and
# CONF.ldap.project_enabled_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['description'], tenant_ref['name'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
def test_project_attribute_ignore(self):
self.config_fixture.config(
group='ldap',
project_attribute_ignore=['name', 'description', 'enabled'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.project_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change configs values in tests
# that could affect what the drivers would return up to the manager.
# This solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertNotIn('name', tenant_ref)
self.assertNotIn('description', tenant_ref)
self.assertNotIn('enabled', tenant_ref)
def test_role_attribute_mapping(self):
self.config_fixture.config(group='ldap', role_name_attribute='ou')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertEqual(self.role_member['name'], role_ref['name'])
self.config_fixture.config(group='ldap', role_name_attribute='sn')
self.load_backends()
# NOTE(morganfainberg): CONF.ldap.role_name_attribute will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
def test_role_attribute_ignore(self):
self.config_fixture.config(group='ldap',
role_attribute_ignore=['name'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
# NOTE(morganfainberg): CONF.ldap.role_attribute_ignore will not be
# dynamically changed at runtime. This invalidate is a work-around for
# the expectation that it is safe to change config values in tests that
# could affect what the drivers would return up to the manager. This
# solves this assumption when working with aggressive (on-create)
# cache population.
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
def test_user_enable_attribute_mask(self):
self.config_fixture.config(group='ldap', user_enabled_mask=2,
user_enabled_default='512')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user(user)
# Use assertIs rather than assertTrue because assertIs will assert the
# value is a Boolean as expected.
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([514], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
# Ensure that the LDAP attribute is False for a newly created
# enabled user.
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
# Ensure that the LDAP attribute is True for a disabled user.
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
# Enable the user and ensure that the LDAP attribute is True again.
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
# Ensure that the LDAP attribute is True for a newly created
# disabled user.
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
# Ensure that the LDAP attribute is inverted for a newly created
# user when the user_enabled_default setting is used.
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_no_enabled_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
# Mock the search results to return an entry with
# no enabled value.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
# Ensure that the model enabled attribute is inverted
# from the resource default.
self.assertIs(not CONF.ldap.user_enabled_default, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_default_str_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default='False')
# Mock the search results to return an entry with
# no enabled value.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
# Ensure that the model enabled attribute is inverted
# from the resource default.
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
def test_user_api_get_connection_no_user_password(self, mocked_method):
"""Don't bind in case the user and password are blank."""
# Ensure the username/password are in-fact blank
self.config_fixture.config(group='ldap', user=None, password=None)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertFalse(mocked_method.called,
msg='`simple_bind_s` method was unexpectedly called')
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_off(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=False)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# chase_referrals. Check to make sure the value of chase_referrals
# is as expected.
self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_on(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=True)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# chase_referrals. Check to make sure the value of chase_referrals
# is as expected.
self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_debug_level_set(self, mocked_fakeldap):
level = 12345
self.config_fixture.config(
group='ldap',
url='fake://memory',
debug_level=level)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
# The last call_arg should be a dictionary and should contain
# debug_level. Check to make sure the value of debug_level
# is as expected.
self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level'])
def test_wrong_ldap_scope(self):
self.config_fixture.config(group='ldap', query_scope=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope,
identity.backends.ldap.Identity)
def test_wrong_alias_dereferencing(self):
self.config_fixture.config(group='ldap',
alias_dereferencing=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing,
identity.backends.ldap.Identity)
def test_is_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_upper_case_keys(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'CN=dumb,DC=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_with_false_use_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=False)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_not_dumb(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'ou=some,dc=example.com'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_user_extra_attribute_mapping(self):
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:name'])
self.load_backends()
user = {
'name': 'EXTRA_ATTRIBUTES',
'password': 'extra',
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
self.assertThat([user['name']], matchers.Equals(attrs['description']))
def test_user_extra_attribute_mapping_description_is_returned(self):
# Given a mapping like description:description, the description is
# returned.
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:description'])
self.load_backends()
description = uuid.uuid4().hex
user = {
'name': uuid.uuid4().hex,
'description': description,
'password': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
res = self.identity_api.driver.user.get_all()
new_user = [u for u in res if u['id'] == user['id']][0]
self.assertThat(new_user['description'], matchers.Equals(description))
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_mixed_case_attribute(self, mock_ldap_get):
# Mock the search results to return attribute names
# with unexpected case.
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sN': [uuid.uuid4().hex],
'MaIl': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user = self.identity_api.get_user('junk')
self.assertEqual(mock_ldap_get.return_value[1]['sN'][0],
user['name'])
self.assertEqual(mock_ldap_get.return_value[1]['MaIl'][0],
user['email'])
def test_parse_extra_attribute_mapping(self):
option_list = ['description:name', 'gecos:password',
'fake:invalid', 'invalid1', 'invalid2:',
'description:name:something']
mapping = self.identity_api.driver.user._parse_extra_attrs(option_list)
expected_dict = {'description': 'name', 'gecos': 'password',
'fake': 'invalid', 'invalid2': ''}
self.assertDictEqual(expected_dict, mapping)
# TODO(henry-nash): These need to be removed when the full LDAP implementation
# is submitted - see Bugs 1092187, 1101287, 1101276, 1101289
def test_domain_crud(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True, 'description': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
domain['id'],
domain)
self.assertRaises(exception.Conflict,
self.assignment_api.create_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
domain['description'] = uuid.uuid4().hex
self.assertRaises(exception.DomainNotFound,
self.assignment_api.update_domain,
domain['id'],
domain)
self.assertRaises(exception.Forbidden,
self.assignment_api.update_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
self.assertRaises(exception.DomainNotFound,
self.assignment_api.delete_domain,
domain['id'])
self.assertRaises(exception.Forbidden,
self.assignment_api.delete_domain,
CONF.identity.default_domain_id)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
@tests.skip_if_no_multiple_domains_support
def test_create_domain_case_sensitivity(self):
# domains are read-only, so case sensitivity isn't an issue
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
ref['id'],
ref)
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_domain_rename_invalidates_get_domain_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_domain_rename_invalidates_get_domain_by_name_cache)
def test_project_rename_invalidates_get_project_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_project_rename_invalidates_get_project_by_name_cache)
def test_project_crud(self):
# NOTE(topol): LDAP implementation does not currently support the
# updating of a project name so this method override
# provides a different update test
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'enabled': True,
'parent_id': None}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
@tests.skip_if_cache_disabled('assignment')
def test_cache_layer_project_crud(self):
# NOTE(morganfainberg): LDAP implementation does not currently support
# updating project names. This method override provides a different
# update test.
project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex}
project_id = project['id']
# Create a project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
updated_project = copy.deepcopy(project)
updated_project['description'] = uuid.uuid4().hex
# Update project, bypassing assignment_api manager
self.assignment_api.driver.update_project(project_id,
updated_project)
# Verify get_project still returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify get_project now returns the new project
self.assertDictContainsSubset(
updated_project,
self.assignment_api.get_project(project_id))
# Update project using the assignment_api manager back to original
self.assignment_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Delete project bypassing assignment_api
self.assignment_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify ProjectNotFound now raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
# recreate project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
# delete project
self.assignment_api.delete_project(project_id)
# Verify ProjectNotFound is raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
def _assert_create_hierarchy_not_allowed(self):
domain = self._get_domain_fixture()
project1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': None}
self.assignment_api.create_project(project1['id'], project1)
# Creating project2 under project1. LDAP will not allow
# the creation of a project with parent_id being set
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': project1['id']}
self.assertRaises(exception.InvalidParentProject,
self.assignment_api.create_project,
project2['id'],
project2)
# Now, we'll create project 2 with no parent
project2['parent_id'] = None
self.assignment_api.create_project(project2['id'], project2)
# Returning projects to be used across the tests
return [project1, project2]
def test_check_leaf_projects(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
self.assertTrue(self.assignment_api.is_leaf_project(project))
def test_list_projects_in_subtree(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
subtree_list = self.assignment_api.list_projects_in_subtree(
project)
self.assertEqual(0, len(subtree_list))
def test_list_project_parents(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
parents_list = self.assignment_api.list_project_parents(project)
self.assertEqual(0, len(parents_list))
def test_hierarchical_projects_crud(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_under_disabled_one(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_with_invalid_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_create_leaf_project_with_invalid_domain(self):
self._assert_create_hierarchy_not_allowed()
def test_update_project_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_enable_project_with_disabled_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_check_hierarchy_depth(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
depth = self._get_hierarchy_depth(project['id'])
self.assertEqual(1, depth)
def test_multi_role_grant_by_user_group_on_project_domain(self):
# This is a partial implementation of the standard test that
# is defined in test_backend.py. It omits both domain and
# group grants. since neither of these are yet supported by
# the ldap backend.
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.assignment_api.create_project(project1['id'], project1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[1]['id'])
# Although list_grants are not yet supported, we can test the
# alternate way of getting back lists of grants, where user
# and group roles are combined. Only directly assigned user
# roles are available, since group grants are not yet supported
combined_list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(2, len(combined_list))
self.assertIn(role_list[0]['id'], combined_list)
self.assertIn(role_list[1]['id'], combined_list)
# Finally, although domain roles are not implemented, check we can
# issue the combined get roles call with benign results, since thus is
# used in token generation
combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
user1['id'], CONF.identity.default_domain_id)
self.assertEqual(0, len(combined_role_list))
def test_list_projects_for_alternate_domain(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_get_default_domain_by_name(self):
domain = self._get_domain_fixture()
domain_ref = self.assignment_api.get_domain_by_name(domain['name'])
self.assertEqual(domain_ref, domain)
def test_base_ldap_connection_deref_option(self):
def get_conn(deref_name):
self.config_fixture.config(group='ldap',
alias_dereferencing=deref_name)
base_ldap = common_ldap.BaseLdap(CONF)
return base_ldap.get_connection()
conn = get_conn('default')
self.assertEqual(ldap.get_option(ldap.OPT_DEREF),
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('always')
self.assertEqual(ldap.DEREF_ALWAYS,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('finding')
self.assertEqual(ldap.DEREF_FINDING,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('never')
self.assertEqual(ldap.DEREF_NEVER,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('searching')
self.assertEqual(ldap.DEREF_SEARCHING,
conn.get_option(ldap.OPT_DEREF))
def test_list_users_no_dn(self):
users = self.identity_api.list_users()
self.assertEqual(len(default_fixtures.USERS), len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
for user_ref in users:
self.assertNotIn('dn', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_list_groups_no_dn(self):
# Create some test groups.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
# Fetch the test groups and ensure that they don't contain a dn.
groups = self.identity_api.list_groups()
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_list_groups_for_user_no_dn(self):
# Create a test user.
user = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user = self.identity_api.create_user(user)
# Create some test groups and add the test user as a member.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
self.identity_api.add_user_to_group(user['id'], group['id'])
# Fetch the groups for the test user
# and ensure they don't contain a dn.
groups = self.identity_api.list_groups_for_user(user['id'])
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_user_id_attribute_in_create(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id}
user = self.identity_api.create_user(user)
user_ref = self.identity_api.get_user(user['id'])
# 'email' attribute should've created because it is also being used
# as user_id
self.assertEqual(user_ref['id'], user_ref['email'])
def test_user_id_attribute_map(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user_ref = self.identity_api.get_user(self.user_foo['email'])
# the user_id_attribute map should be honored, which means
# user_ref['id'] should contains the email attribute
self.assertEqual(self.user_foo['email'], user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
# make 'email' multivalued so we can test the error condition
email1 = uuid.uuid4().hex
email2 = uuid.uuid4().hex
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'mail': [email1, email2],
'cn': 'nobodycares'
}
)
user_ref = self.identity_api.get_user(email1)
# make sure we get the ID from DN (old behavior) if the ID attribute
# has multiple values
self.assertEqual('nobodycares', user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_id_attribute_not_found(self, mock_ldap_get):
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
}
)
user_api = identity.backends.ldap.UserApi(CONF)
self.assertRaises(exception.NotFound,
user_api.get,
'nobodycares')
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_id_not_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'uid'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'foo=bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'foo': ['bar'],
'cn': ['junk'],
'uid': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('junk', user_ref['name'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_name_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'sAMAccountName'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'cn=Foo Bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'cn': ['Foo Bar'],
'SAMAccountName': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('Foo Bar', user_ref['name'])
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def config_files(self):
config_files = super(LDAPIdentityEnabledEmulation, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def config_overrides(self):
super(LDAPIdentityEnabledEmulation, self).config_overrides()
self.config_fixture.config(group='ldap',
user_enabled_emulation=True,
project_enabled_emulation=True)
def test_project_crud(self):
# NOTE(topol): LDAPIdentityEnabledEmulation will create an
# enabled key in the project dictionary so this
# method override handles this side-effect
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'parent_id': None}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
# self.assignment_api.create_project adds an enabled
# key with a value of True when LDAPIdentityEnabledEmulation
# is used so we now add this expected key to the project dictionary
project['enabled'] = True
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
def test_user_crud(self):
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user_dict)
user_dict['enabled'] = True
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictContainsSubset(user_dict, user_ref_dict)
user_dict['password'] = uuid.uuid4().hex
self.identity_api.update_user(user['id'], user)
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictContainsSubset(user_dict, user_ref_dict)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_user_auth_emulated(self):
self.config_fixture.config(group='ldap',
user_enabled_emulation_dn='cn=test,dc=test')
self.reload_backends(CONF.identity.default_domain_id)
self.identity_api.authenticate(
context={},
user_id=self.user_foo['id'],
password=self.user_foo['password'])
def test_user_enable_attribute_mask(self):
self.skipTest(
"Enabled emulation conflicts with enabled mask")
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
# Ensure that the enabled LDAP attribute is not set for a
# newly created enabled user.
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
# Ensure that an enabled LDAP attribute is not set for a disabled user.
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Enable the user and ensure that the LDAP enabled
# attribute is not set.
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Ensure that the LDAP enabled attribute is not set for a
# newly created disabled user.
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
# Ensure that the LDAP enabled attribute is not set for a newly created
# user when the user_enabled_default setting is used.
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
def test_user_enabled_invert_no_enabled_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
def test_user_enabled_invert_default_str_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase):
def config_files(self):
config_files = super(LdapIdentitySqlAssignment, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf'))
return config_files
def setUp(self):
self.useFixture(database.Database())
super(LdapIdentitySqlAssignment, self).setUp()
self.clear_database()
self.load_backends()
cache.configure_cache_region(cache.REGION)
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_overrides(self):
super(LdapIdentitySqlAssignment, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def test_domain_crud(self):
pass
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual([assignment.calc_default_domain()], domains)
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains doesn't change because the SQL identity
# backend reads it from the database, which doesn't get updated by
# config change.
orig_default_domain_id = CONF.identity.default_domain_id
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(orig_default_domain_id, domains[0]['id'])
def test_create_domain(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
domain['id'],
domain)
def test_get_and_remove_role_grant_by_group_and_domain(self):
# TODO(henry-nash): We should really rewrite the tests in test_backend
# to be more flexible as to where the domains are sourced from, so
# that we would not need to override such tests here. This is raised
# as bug 1373865.
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
"""Class to test mapping of default LDAP backend.
The default configuration is not to enable mapping when using a single
backend LDAP driver. However, a cloud provider might want to enable
the mapping, hence hiding the LDAP IDs from any clients of keystone.
Setting backward_compatible_ids to False will enable this mapping.
"""
def config_overrides(self):
super(LdapIdentitySqlAssignmentWithMapping, self).config_overrides()
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def test_dynamic_mapping_build(self):
"""Test to ensure entities not create via controller are mapped.
Many LDAP backends will, essentially, by Read Only. In these cases
the mapping is not built by creating objects, rather from enumerating
the entries. We test this here my manually deleting the mapping and
then trying to re-read the entries.
"""
initial_mappings = len(mapping_sql.list_id_mappings())
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user1 = self.identity_api.create_user(user1)
user2 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user2 = self.identity_api.create_user(user2)
mappings = mapping_sql.list_id_mappings()
self.assertEqual(initial_mappings + 2, len(mappings))
# Now delete the mappings for the two users above
self.id_mapping_api.purge_mappings({'public_id': user1['id']})
self.id_mapping_api.purge_mappings({'public_id': user2['id']})
# We should no longer be able to get these users via their old IDs
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user1['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user2['id'])
# Now enumerate all users...this should re-build the mapping, and
# we should be able to find the users via their original public IDs.
self.identity_api.list_users()
self.identity_api.get_user(user1['id'])
self.identity_api.get_user(user2['id'])
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
class BaseMultiLDAPandSQLIdentity(object):
"""Mixin class with support methods for domain-specific config testing."""
def create_user(self, domain_id):
user = {'name': uuid.uuid4().hex,
'domain_id': domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user_ref = self.identity_api.create_user(user)
# Put the password back in, since this is used later by tests to
# authenticate.
user_ref['password'] = user['password']
return user_ref
def create_users_across_domains(self):
"""Create a set of users, each with a role on their own domain."""
# We also will check that the right number of id mappings get created
initial_mappings = len(mapping_sql.list_id_mappings())
self.users['user0'] = self.create_user(
self.domains['domain_default']['id'])
self.assignment_api.create_grant(
user_id=self.users['user0']['id'],
domain_id=self.domains['domain_default']['id'],
role_id=self.role_member['id'])
for x in range(1, self.domain_count):
self.users['user%s' % x] = self.create_user(
self.domains['domain%s' % x]['id'])
self.assignment_api.create_grant(
user_id=self.users['user%s' % x]['id'],
domain_id=self.domains['domain%s' % x]['id'],
role_id=self.role_member['id'])
# So how many new id mappings should have been created? One for each
# user created in a domain that is using the non default driver..
self.assertEqual(initial_mappings + self.domain_specific_count,
len(mapping_sql.list_id_mappings()))
def check_user(self, user, domain_id, expected_status):
"""Check user is in correct backend.
As part of the tests, we want to force ourselves to manually
select the driver for a given domain, to make sure the entity
ended up in the correct backend.
"""
driver = self.identity_api._select_identity_driver(domain_id)
unused, unused, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(
user['id']))
if expected_status == 200:
ref = driver.get_user(entity_id)
ref = self.identity_api._set_domain_id_and_mapping(
ref, domain_id, driver, map.EntityType.USER)
user = user.copy()
del user['password']
self.assertDictEqual(ref, user)
else:
# TODO(henry-nash): Use AssertRaises here, although
# there appears to be an issue with using driver.get_user
# inside that construct
try:
driver.get_user(entity_id)
except expected_status:
pass
def setup_initial_domains(self):
def create_domain(domain):
try:
ref = self.assignment_api.create_domain(
domain['id'], domain)
except exception.Conflict:
ref = (
self.assignment_api.get_domain_by_name(domain['name']))
return ref
self.domains = {}
for x in range(1, self.domain_count):
domain = 'domain%s' % x
self.domains[domain] = create_domain(
{'id': uuid.uuid4().hex, 'name': domain})
self.domains['domain_default'] = create_domain(
assignment.calc_default_domain())
def test_authenticate_to_each_domain(self):
"""Test that a user in each domain can authenticate."""
for user_num in range(self.domain_count):
user = 'user%s' % user_num
self.identity_api.authenticate(
context={},
user_id=self.users[user]['id'],
password=self.users[user]['password'])
class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase, BaseMultiLDAPandSQLIdentity):
"""Class to test common SQL plus individual LDAP backends.
We define a set of domains and domain-specific backends:
- A separate LDAP backend for the default domain
- A separate LDAP backend for domain1
- domain2 shares the same LDAP as domain1, but uses a different
tree attach point
- An SQL backend for all other domains (which will include domain3
and domain4)
Normally one would expect that the default domain would be handled as
part of the "other domains" - however the above provides better
test coverage since most of the existing backend tests use the default
domain.
"""
def setUp(self):
self.useFixture(database.Database())
super(MultiLDAPandSQLIdentity, self).setUp()
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 5
self.domain_specific_count = 3
self.setup_initial_domains()
self._setup_initial_users()
# All initial test data setup complete, time to switch on support
# for separate backends per domain.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=tests.TESTCONF + '/domain_configs_multi_ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(MultiLDAPandSQLIdentity, self).config_overrides()
# Make sure identity and assignment are actually SQL drivers,
# BaseLDAPIdentity sets these options to use LDAP.
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def _setup_initial_users(self):
# Create some identity entities BEFORE we switch to multi-backend, so
# we can test that these are still accessible
self.users = {}
self.users['userA'] = self.create_user(
self.domains['domain_default']['id'])
self.users['userB'] = self.create_user(
self.domains['domain1']['id'])
self.users['userC'] = self.create_user(
self.domains['domain3']['id'])
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
# than in the standard test.
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
Test Plan:
- Users were created in each domain as part of setup, now make sure
you can only find a given user in its relevant domain/backend
- Make sure that for a backend that supports multiple domains
you can get the users via any of its domains
"""
# Check that I can read a user with the appropriate domain-selected
# driver, but won't find it via any other domain driver
check_user = self.check_user
check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user0'], domain, exception.UserNotFound)
check_user(self.users['user1'], self.domains['domain1']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user1'], domain, exception.UserNotFound)
check_user(self.users['user2'], self.domains['domain2']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user2'], domain, exception.UserNotFound)
# domain3 and domain4 share the same backend, so you should be
# able to see user3 and user4 from either.
check_user(self.users['user3'], self.domains['domain3']['id'], 200)
check_user(self.users['user3'], self.domains['domain4']['id'], 200)
check_user(self.users['user4'], self.domains['domain3']['id'], 200)
check_user(self.users['user4'], self.domains['domain4']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain2']['id']]:
check_user(self.users['user3'], domain, exception.UserNotFound)
check_user(self.users['user4'], domain, exception.UserNotFound)
# Finally, going through the regular manager layer, make sure we
# only see the right number of users in each of the non-default
# domains. One might have expected two users in domain1 (since we
# created one before we switched to multi-backend), however since
# that domain changed backends in the switch we don't find it anymore.
# This is as designed - we don't support moving domains between
# backends.
#
# The listing of the default domain is already handled in the
# test_lists_users() method.
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain4']['id']]:
self.assertThat(
self.identity_api.list_users(domain_scope=domain),
matchers.HasLength(1))
# domain3 had a user created before we switched on
# multiple backends, plus one created afterwards - and its
# backend has not changed - so we should find two.
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain3']['id']),
matchers.HasLength(2))
def test_existing_uuids_work(self):
"""Test that 'uni-domain' created IDs still work.
Throwing the switch to domain-specific backends should not cause
existing identities to be inaccessible via ID.
"""
self.identity_api.get_user(self.users['userA']['id'])
self.identity_api.get_user(self.users['userB']['id'])
self.identity_api.get_user(self.users['userC']['id'])
def test_scanning_of_config_dir(self):
"""Test the Manager class scans the config directory.
The setup for the main tests above load the domain configs directly
so that the test overrides can be included. This test just makes sure
that the standard config directory scanning does pick up the relevant
domain config files.
"""
# Confirm that config has drivers_enabled as True, which we will
# check has been set to False later in this test
self.assertTrue(config.CONF.identity.domain_specific_drivers_enabled)
self.load_backends()
# Execute any command to trigger the lazy loading of domain configs
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id'])
# ...and now check the domain configs have been set up
self.assertIn('default', self.identity_api.domain_configs)
self.assertIn(self.domains['domain1']['id'],
self.identity_api.domain_configs)
self.assertIn(self.domains['domain2']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain3']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain4']['id'],
self.identity_api.domain_configs)
# Finally check that a domain specific config contains items from both
# the primary config and the domain specific config
conf = self.identity_api.domain_configs.get_domain_conf(
self.domains['domain1']['id'])
# This should now be false, as is the default, since this is not
# set in the standard primary config file
self.assertFalse(conf.identity.domain_specific_drivers_enabled)
# ..and make sure a domain-specific options is also set
self.assertEqual('fake://memory1', conf.ldap.url)
def test_delete_domain_with_user_added(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'description': uuid.uuid4().hex,
'parent_id': None,
'enabled': True}
self.assignment_api.create_domain(domain['id'], domain)
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
domain['enabled'] = False
self.assignment_api.update_domain(domain['id'], domain)
self.assignment_api.delete_domain(domain['id'])
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
def test_user_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class DomainSpecificLDAPandSQLIdentity(
BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase,
BaseMultiLDAPandSQLIdentity):
"""Class to test when all domains use specific configs, including SQL.
We define a set of domains and domain-specific backends:
- A separate LDAP backend for the default domain
- A separate SQL backend for domain1
Although the default driver still exists, we don't use it.
"""
def setUp(self):
self.useFixture(database.Database())
super(DomainSpecificLDAPandSQLIdentity, self).setUp()
self.initial_setup()
def initial_setup(self):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_one_sql_one_ldap'))
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 2
self.setup_initial_domains()
self.users = {}
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificLDAPandSQLIdentity, self).config_overrides()
# Make sure assignment is actually an SQL driver,
# BaseLDAPIdentity causes this option to use LDAP.
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_domain_crud(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
# than in the standard test.
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
"""Test that separate configs have segregated the domain.
Test Plan:
- Users were created in each domain as part of setup, now make sure
you can only find a given user in its relevant domain/backend
- Make sure that for a backend that supports multiple domains
you can get the users via any of its domains
"""
# Check that I can read a user with the appropriate domain-selected
# driver, but won't find it via any other domain driver
self.check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
self.check_user(self.users['user0'],
self.domains['domain1']['id'], exception.UserNotFound)
self.check_user(self.users['user1'],
self.domains['domain1']['id'], 200)
self.check_user(self.users['user1'],
self.domains['domain_default']['id'],
exception.UserNotFound)
# Finally, going through the regular manager layer, make sure we
# only see the right number of users in the non-default domain.
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id']),
matchers.HasLength(1))
def test_add_role_grant_to_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_get_role_grants_for_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
self.skipTest('Blocked by bug 1221805')
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
def test_user_id_comma(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_id_comma_grants(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
"""Class to test simplest use of domain-specific SQL driver.
The simplest use of an SQL domain-specific backend is when it is used to
augment the standard case when LDAP is the default driver defined in the
main config file. This would allow, for example, service users to be
stored in SQL while LDAP handles the rest. Hence we define:
- The default driver uses the LDAP backend for the default domain
- A separate SQL backend for domain1
"""
def initial_setup(self):
# We aren't setting up any initial data ahead of switching to
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_default_ldap_one_sql'))
# Part of the testing counts how many new mappings get created as
# we create users, so ensure we are NOT using mapping for the default
# LDAP domain so this doesn't confuse the calculation.
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=True)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 1
self.setup_initial_domains()
self.users = {}
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificSQLIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def get_config(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
return CONF
else:
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def reload_backends(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
self.load_backends()
else:
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def test_default_sql_plus_sql_specific_driver_fails(self):
# First confirm that if ldap is default driver, domain1 can be
# loaded as sql
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now re-initialize, but with sql as the default identity driver
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs, which
# should fail since we would now have two sql drivers.
self.assertRaises(exception.MultipleSQLDriversInConfig,
self.identity_api.list_users,
domain_scope=CONF.identity.default_domain_id)
def test_multiple_sql_specific_drivers_fails(self):
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Ensure default, domain1 and domain2 exist
self.domain_count = 3
self.setup_initial_domains()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
# This will only load domain1, since the domain2 config file is
# not stored in the same location
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now try and manually load a 2nd sql specific driver, for domain2,
# which should fail.
self.assertRaises(exception.MultipleSQLDriversInConfig,
self.identity_api.domain_configs._load_config,
self.identity_api.assignment_api,
[tests.TESTCONF + '/domain_configs_one_extra_sql/' +
'keystone.domain2.conf'],
'domain2')
| 43.605842
| 79
| 0.625535
|
import copy
import uuid
import ldap
import mock
from testtools import matchers
from keystone import assignment
from keystone.common import cache
from keystone.common import ldap as common_ldap
from keystone.common.ldap import core as common_ldap_core
from keystone.common import sql
from keystone import config
from keystone import exception
from keystone import identity
from keystone.identity.mapping_backends import mapping as map
from keystone import tests
from keystone.tests import default_fixtures
from keystone.tests import fakeldap
from keystone.tests import identity_mapping as mapping_sql
from keystone.tests.ksfixtures import database
from keystone.tests import test_backend
CONF = config.CONF
def create_group_container(identity_api):
group_api = identity_api.driver.group
conn = group_api.get_connection()
dn = 'ou=Groups,cn=example,cn=com'
conn.add_s(dn, [('objectclass', ['organizationalUnit']),
('ou', ['Groups'])])
class BaseLDAPIdentity(test_backend.IdentityTests):
def setUp(self):
super(BaseLDAPIdentity, self).setUp()
self.clear_database()
common_ldap.register_handler('fake://', fakeldap.FakeLdap)
self.load_backends()
self.load_fixtures(default_fixtures)
self.addCleanup(common_ldap_core._HANDLERS.clear)
def _get_domain_fixture(self):
return self.assignment_api.get_domain(CONF.identity.default_domain_id)
def clear_database(self):
for shelf in fakeldap.FakeShelves:
fakeldap.FakeShelves[shelf].clear()
def reload_backends(self, domain_id):
self.load_backends()
def get_config(self, domain_id):
return CONF
def config_overrides(self):
super(BaseLDAPIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
def config_files(self):
config_files = super(BaseLDAPIdentity, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def get_user_enabled_vals(self, user):
user_dn = (
self.identity_api.driver.user._id_to_dn_string(user['id']))
enabled_attr_name = CONF.ldap.user_enabled_attribute
ldap_ = self.identity_api.driver.user.get_connection()
res = ldap_.search_s(user_dn,
ldap.SCOPE_BASE,
u'(sn=%s)' % user['name'])
if enabled_attr_name in res[0][1]:
return res[0][1][enabled_attr_name]
else:
return None
def test_build_tree(self):
user_api = identity.backends.ldap.UserApi(CONF)
self.assertTrue(user_api)
self.assertEqual("ou=Users,%s" % CONF.ldap.suffix, user_api.tree_dn)
def test_configurable_allowed_user_actions(self):
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
user = self.identity_api.create_user(user)
self.identity_api.get_user(user['id'])
user['password'] = u'fäképass2'
self.identity_api.update_user(user['id'], user)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_configurable_forbidden_user_actions(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
conf.ldap.user_allow_update = False
conf.ldap.user_allow_delete = False
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'tenants': ['bar']}
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
user)
self.user_foo['password'] = u'fäképass2'
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user,
self.user_foo['id'],
self.user_foo)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.delete_user,
self.user_foo['id'])
def test_configurable_forbidden_create_existing_user(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_allow_create = False
self.reload_backends(CONF.identity.default_domain_id)
self.assertRaises(exception.ForbiddenAction,
self.identity_api.create_user,
self.user_foo)
def test_user_filter(self):
user_ref = self.identity_api.get_user(self.user_foo['id'])
self.user_foo.pop('password')
self.assertDictEqual(user_ref, self.user_foo)
conf = self.get_config(user_ref['domain_id'])
conf.ldap.user_filter = '(CN=DOES_NOT_MATCH)'
self.reload_backends(user_ref['domain_id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
self.user_foo['id'])
def test_remove_role_grant_from_user_and_project(self):
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
user_id=self.user_foo['id'],
project_id=self.tenant_baz['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_project(self):
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'enabled': True,
'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual([], roles_ref)
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertNotEmpty(roles_ref)
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
project_id=self.tenant_bar['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
project_id=self.tenant_bar['id'],
role_id='member')
def test_get_and_remove_role_grant_by_group_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_correct_role_grant_from_a_mix(self):
self.skipTest('Blocked by bug 1101287')
def test_get_and_remove_role_grant_by_group_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_and_remove_role_grant_by_user_and_cross_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_group_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_role_grant_by_user_and_cross_domain_project(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_multi_role_grant_by_user_group_on_project_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_role_with_user_and_group_grants(self):
self.skipTest('Blocked by bug 1101287')
def test_delete_user_with_group_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_delete_group_with_user_project_domain_links(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_user(self):
domain = self._get_domain_fixture()
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(0))
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(2))
user2 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user2 = self.identity_api.create_user(user2)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user2['id'], group1['id'])
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_baz['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_other['id'])
user_projects = self.assignment_api.list_projects_for_user(user2['id'])
self.assertThat(user_projects, matchers.HasLength(2))
def test_list_projects_for_user_and_groups(self):
domain = self._get_domain_fixture()
user1 = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'domain_id': domain['id'], 'enabled': True}
user1 = self.identity_api.create_user(user1)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
self.identity_api.add_user_to_group(user1['id'], group1['id'])
self.assignment_api.create_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(group_id=group1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
self.assignment_api.delete_grant(user_id=user1['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
user_projects = self.assignment_api.list_projects_for_user(user1['id'])
self.assertThat(user_projects, matchers.HasLength(1))
def test_list_projects_for_user_with_grants(self):
domain = self._get_domain_fixture()
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': domain['id']}
new_user = self.identity_api.create_user(new_user)
group1 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group1 = self.identity_api.create_group(group1)
group2 = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group2 = self.identity_api.create_group(group2)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project1['id'], project1)
project2 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': domain['id']}
self.assignment_api.create_project(project2['id'], project2)
self.identity_api.add_user_to_group(new_user['id'],
group1['id'])
self.identity_api.add_user_to_group(new_user['id'],
group2['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=self.tenant_bar['id'],
role_id=self.role_member['id'])
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=project1['id'],
role_id=self.role_admin['id'])
self.assignment_api.create_grant(group_id=group2['id'],
project_id=project2['id'],
role_id=self.role_admin['id'])
user_projects = self.assignment_api.list_projects_for_user(
new_user['id'])
self.assertEqual(3, len(user_projects))
def test_create_duplicate_user_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_project_name_in_different_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_create_duplicate_group_name_in_different_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_user_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_user_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_group_between_domains(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_move_group_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains(self):
self.skipTest('Domains are read-only against LDAP')
def test_move_project_between_domains_with_clashing_names_fails(self):
self.skipTest('Domains are read-only against LDAP')
def test_get_roles_for_user_and_domain(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_get_roles_for_groups_on_domain(self):
self.skipTest('Blocked by bug: 1390125')
def test_get_roles_for_groups_on_project(self):
self.skipTest('Blocked by bug: 1390125')
def test_list_domains_for_groups(self):
self.skipTest('N/A: LDAP does not support multiple domains')
def test_list_projects_for_groups(self):
self.skipTest('Blocked by bug: 1390125')
def test_domain_delete_hierarchy(self):
self.skipTest('Domains are read-only against LDAP')
def test_list_role_assignments_unfiltered(self):
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.assignment_api.create_project(new_project['id'], new_project)
existing_assignments = len(self.assignment_api.list_role_assignments())
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
self.assignment_api.create_grant(group_id=new_group['id'],
project_id=new_project['id'],
role_id='admin')
after_assignments = len(self.assignment_api.list_role_assignments())
self.assertEqual(existing_assignments + 2, after_assignments)
def test_list_role_assignments_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
new_domain = self._get_domain_fixture()
new_user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
new_project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': new_domain['id']}
self.assignment_api.create_project(new_project['id'], new_project)
self.assignment_api.create_grant(user_id=new_user['id'],
project_id=new_project['id'],
role_id='other')
assignment_ids = [a['user_id'] for a in
self.assignment_api.list_role_assignments()]
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, assignment_ids)
def test_list_user_ids_for_project_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': uuid.uuid4().hex, 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': test_backend.DEFAULT_DOMAIN_ID}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
user_ids = self.assignment_api.list_user_ids_for_project(
self.tenant_baz['id'])
self.assertIn(user['id'], user_ids)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_multi_group_grants_on_project_domain(self):
self.skipTest('Blocked by bug 1101287')
def test_list_group_members_missing_entry(self):
# Create a group
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
# Create a couple of users and add them to the group.
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_1_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_1_id, group_id)
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_2_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_2_id, group_id)
# Delete user 2
# NOTE(blk-u): need to go directly to user interface to keep from
# updating the group.
unused, driver, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(user_2_id))
driver.user.delete(entity_id)
# List group users and verify only user 1.
res = self.identity_api.list_users_in_group(group_id)
self.assertEqual(1, len(res), "Expected 1 entry (user_1)")
self.assertEqual(user_1_id, res[0]['id'], "Expected user 1 id")
def test_list_group_members_when_no_members(self):
# List group members when there is no member in the group.
# No exception should be raised.
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
# If this doesn't raise, then the test is successful.
self.identity_api.list_users_in_group(group['id'])
def test_list_group_members_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
group = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
group_id = self.identity_api.create_group(group)['id']
user = dict(name=uuid.uuid4().hex,
domain_id=CONF.identity.default_domain_id)
user_id = self.identity_api.create_user(user)['id']
self.identity_api.add_user_to_group(user_id, group_id)
user_ids = self.identity_api.list_users_in_group(group_id)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertNotIn(dumb_id, user_ids)
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual(
[assignment.calc_default_domain()],
domains)
def test_list_domains_non_default_domain_id(self):
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(new_domain_id, domains[0]['id'])
def test_authenticate_requires_simple_bind(self):
user = {
'name': 'NO_META',
'domain_id': test_backend.DEFAULT_DOMAIN_ID,
'password': 'no_meta2',
'enabled': True,
}
user = self.identity_api.create_user(user)
self.assignment_api.add_user_to_project(self.tenant_baz['id'],
user['id'])
driver = self.identity_api._select_identity_driver(
user['domain_id'])
driver.user.LDAP_USER = None
driver.user.LDAP_PASSWORD = None
self.assertRaises(AssertionError,
self.identity_api.authenticate,
context={},
user_id=user['id'],
password=None)
def test_group_crud(self):
group = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex}
group = self.identity_api.create_group(group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
group['description'] = uuid.uuid4().hex
self.identity_api.update_group(group['id'], group)
group_ref = self.identity_api.get_group(group['id'])
self.assertDictEqual(group_ref, group)
self.identity_api.delete_group(group['id'])
self.assertRaises(exception.GroupNotFound,
self.identity_api.get_group,
group['id'])
def test_create_user_none_mapping(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
'default_project_id': 'maps_to_none',
}
# If this doesn't raise, then the test is successful.
user = self.identity_api.create_user(user)
def test_unignored_user_none_mapping(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_attribute_ignore = ['enabled', 'email',
'tenants', 'tenantId']
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id,
}
user_ref = self.identity_api.create_user(user)
self.identity_api.get_user(user_ref['id'])
def test_update_user_name(self):
self.assertRaises(exception.Conflict,
super(BaseLDAPIdentity, self).test_update_user_name)
def test_arbitrary_attributes_are_returned_from_get_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_new_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_updated_arbitrary_attributes_are_returned_from_update_user(self):
self.skipTest("Using arbitrary attributes doesn't work under LDAP")
def test_cache_layer_domain_crud(self):
self.skipTest('Domains are read-only against LDAP')
def test_user_id_comma(self):
# direct into the driver and bypass the manager layer.
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
user = self.identity_api.driver.create_user(user_id, user)
# Now we'll use the manager to discover it, which will create a
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
group_id = uuid.uuid4().hex
group = {
'id': group_id,
'name': self.getUniqueString(prefix='tuidc'),
'description': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
group = self.identity_api.driver.create_group(group_id, group)
# Public ID for it.
ref_list = self.identity_api.list_groups()
public_group_id = None
for ref in ref_list:
if ref['name'] == group['name']:
public_group_id = ref['id']
break
# Put the user in the group
self.identity_api.add_user_to_group(public_user_id, public_group_id)
# List groups for user.
ref_list = self.identity_api.list_groups_for_user(public_user_id)
group['id'] = public_group_id
self.assertThat(ref_list, matchers.Equals([group]))
def test_user_id_comma_grants(self):
# Create a user with a , in their ID
# NOTE(blk-u): the DN for this user is hard-coded in fakeldap!
# Since we want to fake up this special ID, we'll squirt this
user_id = u'Doe, John'
user = {
'id': user_id,
'name': self.getUniqueString(),
'password': self.getUniqueString(),
'domain_id': CONF.identity.default_domain_id,
}
self.identity_api.driver.create_user(user_id, user)
# Public ID for it.
ref_list = self.identity_api.list_users()
public_user_id = None
for ref in ref_list:
if ref['name'] == user['name']:
public_user_id = ref['id']
break
# Grant the user a role on a project.
role_id = 'member'
project_id = self.tenant_baz['id']
self.assignment_api.create_grant(role_id, user_id=public_user_id,
project_id=project_id)
role_ref = self.assignment_api.get_grant(role_id,
user_id=public_user_id,
project_id=project_id)
self.assertEqual(role_id, role_ref['id'])
def test_user_enabled_ignored_disable_error(self):
# When the server is configured so that the enabled attribute is
# ignored for users, users cannot be disabled.
self.config_fixture.config(group='ldap',
user_attribute_ignore=['enabled'])
# Need to re-load backends for the config change to take effect.
self.load_backends()
# Attempt to disable the user.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_user, self.user_foo['id'],
{'enabled': False})
user_info = self.identity_api.get_user(self.user_foo['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
self.assertNotIn('enabled', user_info)
def test_group_enabled_ignored_disable_error(self):
self.config_fixture.config(group='ldap',
group_attribute_ignore=['enabled'])
self.load_backends()
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'],
'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
# Attempt to disable the group.
self.assertRaises(exception.ForbiddenAction,
self.identity_api.update_group, new_group['id'],
{'enabled': False})
group_info = self.identity_api.get_group(new_group['id'])
# If 'enabled' is ignored then 'enabled' isn't returned as part of the
self.assertNotIn('enabled', group_info)
def test_project_enabled_ignored_disable_error(self):
self.config_fixture.config(group='ldap',
project_attribute_ignore=['enabled'])
self.load_backends()
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_project,
self.tenant_baz['id'], {'enabled': False})
project_info = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertIs(True, project_info['enabled'])
class LDAPIdentity(BaseLDAPIdentity, tests.TestCase):
def setUp(self):
self.useFixture(database.Database())
super(LDAPIdentity, self).setUp()
def load_fixtures(self, fixtures):
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def test_configurable_allowed_project_actions(self):
tenant = {'id': u'fäké1', 'name': u'fäké1', 'enabled': True}
self.assignment_api.create_project(u'fäké1', tenant)
tenant_ref = self.assignment_api.get_project(u'fäké1')
self.assertEqual(u'fäké1', tenant_ref['id'])
tenant['enabled'] = False
self.assignment_api.update_project(u'fäké1', tenant)
self.assignment_api.delete_project(u'fäké1')
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
u'fäké1')
def test_configurable_subtree_delete(self):
self.config_fixture.config(group='ldap', allow_subtree_delete=True)
self.load_backends()
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.assignment_api.create_project(project1['id'], project1)
role1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role1['id'], role1)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role1['id'])
self.assignment_api.delete_project(project1['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project1['id'])
self.assignment_api.create_project(project1['id'], project1)
list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(0, len(list))
def test_configurable_forbidden_project_actions(self):
self.config_fixture.config(
group='ldap', project_allow_create=False,
project_allow_update=False, project_allow_delete=False)
self.load_backends()
tenant = {'id': u'fäké1', 'name': u'fäké1'}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_project,
u'fäké1',
tenant)
self.tenant_bar['enabled'] = False
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_project,
self.tenant_bar['id'],
self.tenant_bar)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_project,
self.tenant_bar['id'])
def test_configurable_allowed_role_actions(self):
role = {'id': u'fäké1', 'name': u'fäké1'}
self.assignment_api.create_role(u'fäké1', role)
role_ref = self.assignment_api.get_role(u'fäké1')
self.assertEqual(u'fäké1', role_ref['id'])
role['name'] = u'fäké2'
self.assignment_api.update_role(u'fäké1', role)
self.assignment_api.delete_role(u'fäké1')
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
u'fäké1')
def test_configurable_forbidden_role_actions(self):
self.config_fixture.config(
group='ldap', role_allow_create=False, role_allow_update=False,
role_allow_delete=False)
self.load_backends()
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.create_role,
role['id'],
role)
self.role_member['name'] = uuid.uuid4().hex
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.update_role,
self.role_member['id'],
self.role_member)
self.assertRaises(exception.ForbiddenAction,
self.assignment_api.delete_role,
self.role_member['id'])
def test_project_filter(self):
tenant_ref = self.assignment_api.get_project(self.tenant_bar['id'])
self.assertDictEqual(tenant_ref, self.tenant_bar)
self.config_fixture.config(group='ldap',
project_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assignment_api.get_role(self.role_member['id'])
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_bar['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
self.tenant_bar['id'])
def test_role_filter(self):
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertDictEqual(role_ref, self.role_member)
self.config_fixture.config(group='ldap',
role_filter='(CN=DOES_NOT_MATCH)')
self.load_backends()
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
self.assertRaises(exception.RoleNotFound,
self.assignment_api.get_role,
self.role_member['id'])
def test_dumb_member(self):
self.config_fixture.config(group='ldap', use_dumb_member=True)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
dumb_id = common_ldap.BaseLdap._dn_to_id(CONF.ldap.dumb_member)
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
dumb_id)
def test_project_attribute_mapping(self):
self.config_fixture.config(
group='ldap', project_name_attribute='ou',
project_desc_attribute='description',
project_enabled_attribute='enabled')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['name'])
self.assertEqual(
self.tenant_baz['description'],
tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
self.config_fixture.config(group='ldap',
project_name_attribute='description',
project_desc_attribute='ou')
self.load_backends()
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertEqual(self.tenant_baz['description'], tenant_ref['name'])
self.assertEqual(self.tenant_baz['name'], tenant_ref['description'])
self.assertEqual(self.tenant_baz['enabled'], tenant_ref['enabled'])
def test_project_attribute_ignore(self):
self.config_fixture.config(
group='ldap',
project_attribute_ignore=['name', 'description', 'enabled'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
self.assignment_api.get_project.invalidate(self.assignment_api,
self.tenant_baz['id'])
tenant_ref = self.assignment_api.get_project(self.tenant_baz['id'])
self.assertEqual(self.tenant_baz['id'], tenant_ref['id'])
self.assertNotIn('name', tenant_ref)
self.assertNotIn('description', tenant_ref)
self.assertNotIn('enabled', tenant_ref)
def test_role_attribute_mapping(self):
self.config_fixture.config(group='ldap', role_name_attribute='ou')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertEqual(self.role_member['name'], role_ref['name'])
self.config_fixture.config(group='ldap', role_name_attribute='sn')
self.load_backends()
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
def test_role_attribute_ignore(self):
self.config_fixture.config(group='ldap',
role_attribute_ignore=['name'])
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
self.assignment_api.get_role.invalidate(self.assignment_api,
self.role_member['id'])
role_ref = self.assignment_api.get_role(self.role_member['id'])
self.assertEqual(self.role_member['id'], role_ref['id'])
self.assertNotIn('name', role_ref)
def test_user_enable_attribute_mask(self):
self.config_fixture.config(group='ldap', user_enabled_mask=2,
user_enabled_default='512')
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user(user)
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([514], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], False)
self.assertNotIn('enabled_nomask', user_ref)
user['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user)
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([512], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(user_ref['enabled'], True)
self.assertNotIn('enabled_nomask', user_ref)
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([True], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
enabled_vals = self.get_user_enabled_vals(user_ref)
self.assertEqual([False], enabled_vals)
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_no_enabled_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
self.assertIs(not CONF.ldap.user_enabled_default, user_ref['enabled'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_enabled_invert_default_str_value(self, mock_ldap_get):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default='False')
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'email': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user_api = identity.backends.ldap.UserApi(CONF)
user_ref = user_api.get('junk')
self.assertIs(True, user_ref['enabled'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'simple_bind_s')
def test_user_api_get_connection_no_user_password(self, mocked_method):
self.config_fixture.config(group='ldap', user=None, password=None)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertFalse(mocked_method.called,
msg='`simple_bind_s` method was unexpectedly called')
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_off(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=False)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertFalse(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_chase_referrals_on(self, mocked_fakeldap):
self.config_fixture.config(
group='ldap',
url='fake://memory',
chase_referrals=True)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertTrue(mocked_fakeldap.call_args[-1]['chase_referrals'])
@mock.patch.object(common_ldap_core.KeystoneLDAPHandler, 'connect')
def test_debug_level_set(self, mocked_fakeldap):
level = 12345
self.config_fixture.config(
group='ldap',
url='fake://memory',
debug_level=level)
user_api = identity.backends.ldap.UserApi(CONF)
user_api.get_connection(user=None, password=None)
self.assertEqual(level, mocked_fakeldap.call_args[-1]['debug_level'])
def test_wrong_ldap_scope(self):
self.config_fixture.config(group='ldap', query_scope=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP scope: %s. *' % CONF.ldap.query_scope,
identity.backends.ldap.Identity)
def test_wrong_alias_dereferencing(self):
self.config_fixture.config(group='ldap',
alias_dereferencing=uuid.uuid4().hex)
self.assertRaisesRegexp(
ValueError,
'Invalid LDAP deref option: %s\.' % CONF.ldap.alias_dereferencing,
identity.backends.ldap.Identity)
def test_is_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_upper_case_keys(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'CN=dumb,DC=nonexistent'
self.assertTrue(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_with_false_use_dumb_member(self):
self.config_fixture.config(group='ldap',
use_dumb_member=False)
self.load_backends()
dn = 'cn=dumb,dc=nonexistent'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_is_dumb_member_not_dumb(self):
self.config_fixture.config(group='ldap',
use_dumb_member=True)
self.load_backends()
dn = 'ou=some,dc=example.com'
self.assertFalse(self.identity_api.driver.user._is_dumb_member(dn))
def test_user_extra_attribute_mapping(self):
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:name'])
self.load_backends()
user = {
'name': 'EXTRA_ATTRIBUTES',
'password': 'extra',
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
dn, attrs = self.identity_api.driver.user._ldap_get(user['id'])
self.assertThat([user['name']], matchers.Equals(attrs['description']))
def test_user_extra_attribute_mapping_description_is_returned(self):
self.config_fixture.config(
group='ldap',
user_additional_attribute_mapping=['description:description'])
self.load_backends()
description = uuid.uuid4().hex
user = {
'name': uuid.uuid4().hex,
'description': description,
'password': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id
}
user = self.identity_api.create_user(user)
res = self.identity_api.driver.user.get_all()
new_user = [u for u in res if u['id'] == user['id']][0]
self.assertThat(new_user['description'], matchers.Equals(description))
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_mixed_case_attribute(self, mock_ldap_get):
mock_ldap_get.return_value = (
'cn=junk,dc=example,dc=com',
{
'sN': [uuid.uuid4().hex],
'MaIl': [uuid.uuid4().hex],
'cn': ['junk']
}
)
user = self.identity_api.get_user('junk')
self.assertEqual(mock_ldap_get.return_value[1]['sN'][0],
user['name'])
self.assertEqual(mock_ldap_get.return_value[1]['MaIl'][0],
user['email'])
def test_parse_extra_attribute_mapping(self):
option_list = ['description:name', 'gecos:password',
'fake:invalid', 'invalid1', 'invalid2:',
'description:name:something']
mapping = self.identity_api.driver.user._parse_extra_attrs(option_list)
expected_dict = {'description': 'name', 'gecos': 'password',
'fake': 'invalid', 'invalid2': ''}
self.assertDictEqual(expected_dict, mapping)
def test_domain_crud(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True, 'description': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
domain['id'],
domain)
self.assertRaises(exception.Conflict,
self.assignment_api.create_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
domain['description'] = uuid.uuid4().hex
self.assertRaises(exception.DomainNotFound,
self.assignment_api.update_domain,
domain['id'],
domain)
self.assertRaises(exception.Forbidden,
self.assignment_api.update_domain,
CONF.identity.default_domain_id,
domain)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
self.assertRaises(exception.DomainNotFound,
self.assignment_api.delete_domain,
domain['id'])
self.assertRaises(exception.Forbidden,
self.assignment_api.delete_domain,
CONF.identity.default_domain_id)
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
@tests.skip_if_no_multiple_domains_support
def test_create_domain_case_sensitivity(self):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
ref['id'],
ref)
def test_cache_layer_domain_crud(self):
# TODO(morganfainberg): This also needs to be removed when full LDAP
# implementation is submitted. No need to duplicate the above test,
# just skip this time.
self.skipTest('Domains are read-only against LDAP')
def test_domain_rename_invalidates_get_domain_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_domain_rename_invalidates_get_domain_by_name_cache)
def test_project_rename_invalidates_get_project_by_name_cache(self):
parent = super(LDAPIdentity, self)
self.assertRaises(
exception.Forbidden,
parent.test_project_rename_invalidates_get_project_by_name_cache)
def test_project_crud(self):
# NOTE(topol): LDAP implementation does not currently support the
# updating of a project name so this method override
# provides a different update test
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'enabled': True,
'parent_id': None}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
@tests.skip_if_cache_disabled('assignment')
def test_cache_layer_project_crud(self):
# NOTE(morganfainberg): LDAP implementation does not currently support
# updating project names. This method override provides a different
# update test.
project = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex}
project_id = project['id']
# Create a project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
updated_project = copy.deepcopy(project)
updated_project['description'] = uuid.uuid4().hex
# Update project, bypassing assignment_api manager
self.assignment_api.driver.update_project(project_id,
updated_project)
# Verify get_project still returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify get_project now returns the new project
self.assertDictContainsSubset(
updated_project,
self.assignment_api.get_project(project_id))
# Update project using the assignment_api manager back to original
self.assignment_api.update_project(project['id'], project)
# Verify get_project returns the original project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Delete project bypassing assignment_api
self.assignment_api.driver.delete_project(project_id)
# Verify get_project still returns the project_ref
self.assertDictContainsSubset(
project, self.assignment_api.get_project(project_id))
# Invalidate cache
self.assignment_api.get_project.invalidate(self.assignment_api,
project_id)
# Verify ProjectNotFound now raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
# recreate project
self.assignment_api.create_project(project_id, project)
self.assignment_api.get_project(project_id)
# delete project
self.assignment_api.delete_project(project_id)
# Verify ProjectNotFound is raised
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project_id)
def _assert_create_hierarchy_not_allowed(self):
domain = self._get_domain_fixture()
project1 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': None}
self.assignment_api.create_project(project1['id'], project1)
# Creating project2 under project1. LDAP will not allow
# the creation of a project with parent_id being set
project2 = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': '',
'domain_id': domain['id'],
'enabled': True,
'parent_id': project1['id']}
self.assertRaises(exception.InvalidParentProject,
self.assignment_api.create_project,
project2['id'],
project2)
# Now, we'll create project 2 with no parent
project2['parent_id'] = None
self.assignment_api.create_project(project2['id'], project2)
return [project1, project2]
def test_check_leaf_projects(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
self.assertTrue(self.assignment_api.is_leaf_project(project))
def test_list_projects_in_subtree(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
subtree_list = self.assignment_api.list_projects_in_subtree(
project)
self.assertEqual(0, len(subtree_list))
def test_list_project_parents(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
parents_list = self.assignment_api.list_project_parents(project)
self.assertEqual(0, len(parents_list))
def test_hierarchical_projects_crud(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_under_disabled_one(self):
self._assert_create_hierarchy_not_allowed()
def test_create_project_with_invalid_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_create_leaf_project_with_invalid_domain(self):
self._assert_create_hierarchy_not_allowed()
def test_update_project_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_enable_project_with_disabled_parent(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_disable_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_delete_hierarchical_not_leaf_project(self):
self._assert_create_hierarchy_not_allowed()
def test_check_hierarchy_depth(self):
projects = self._assert_create_hierarchy_not_allowed()
for project in projects:
depth = self._get_hierarchy_depth(project['id'])
self.assertEqual(1, depth)
def test_multi_role_grant_by_user_group_on_project_domain(self):
role_list = []
for _ in range(2):
role = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex}
self.assignment_api.create_role(role['id'], role)
role_list.append(role)
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user1 = self.identity_api.create_user(user1)
project1 = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id}
self.assignment_api.create_project(project1['id'], project1)
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[0]['id'])
self.assignment_api.add_role_to_user_and_project(
user_id=user1['id'],
tenant_id=project1['id'],
role_id=role_list[1]['id'])
combined_list = self.assignment_api.get_roles_for_user_and_project(
user1['id'],
project1['id'])
self.assertEqual(2, len(combined_list))
self.assertIn(role_list[0]['id'], combined_list)
self.assertIn(role_list[1]['id'], combined_list)
combined_role_list = self.assignment_api.get_roles_for_user_and_domain(
user1['id'], CONF.identity.default_domain_id)
self.assertEqual(0, len(combined_role_list))
def test_list_projects_for_alternate_domain(self):
self.skipTest(
'N/A: LDAP does not support multiple domains')
def test_get_default_domain_by_name(self):
domain = self._get_domain_fixture()
domain_ref = self.assignment_api.get_domain_by_name(domain['name'])
self.assertEqual(domain_ref, domain)
def test_base_ldap_connection_deref_option(self):
def get_conn(deref_name):
self.config_fixture.config(group='ldap',
alias_dereferencing=deref_name)
base_ldap = common_ldap.BaseLdap(CONF)
return base_ldap.get_connection()
conn = get_conn('default')
self.assertEqual(ldap.get_option(ldap.OPT_DEREF),
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('always')
self.assertEqual(ldap.DEREF_ALWAYS,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('finding')
self.assertEqual(ldap.DEREF_FINDING,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('never')
self.assertEqual(ldap.DEREF_NEVER,
conn.get_option(ldap.OPT_DEREF))
conn = get_conn('searching')
self.assertEqual(ldap.DEREF_SEARCHING,
conn.get_option(ldap.OPT_DEREF))
def test_list_users_no_dn(self):
users = self.identity_api.list_users()
self.assertEqual(len(default_fixtures.USERS), len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
for user_ref in users:
self.assertNotIn('dn', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_list_groups_no_dn(self):
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
groups = self.identity_api.list_groups()
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_list_groups_for_user_no_dn(self):
# Create a test user.
user = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user = self.identity_api.create_user(user)
# Create some test groups and add the test user as a member.
domain = self._get_domain_fixture()
expected_group_ids = []
numgroups = 3
for _ in range(numgroups):
group = {'name': uuid.uuid4().hex, 'domain_id': domain['id']}
group = self.identity_api.create_group(group)
expected_group_ids.append(group['id'])
self.identity_api.add_user_to_group(user['id'], group['id'])
# Fetch the groups for the test user
# and ensure they don't contain a dn.
groups = self.identity_api.list_groups_for_user(user['id'])
self.assertEqual(numgroups, len(groups))
group_ids = set(group['id'] for group in groups)
for group_ref in groups:
self.assertNotIn('dn', group_ref)
self.assertEqual(set(expected_group_ids), group_ids)
def test_user_id_attribute_in_create(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user = {'name': u'fäké1',
'password': u'fäképass1',
'domain_id': CONF.identity.default_domain_id}
user = self.identity_api.create_user(user)
user_ref = self.identity_api.get_user(user['id'])
# as user_id
self.assertEqual(user_ref['id'], user_ref['email'])
def test_user_id_attribute_map(self):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
user_ref = self.identity_api.get_user(self.user_foo['email'])
# the user_id_attribute map should be honored, which means
# user_ref['id'] should contains the email attribute
self.assertEqual(self.user_foo['email'], user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_get_id_from_dn_for_multivalued_attribute_id(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'mail'
self.reload_backends(CONF.identity.default_domain_id)
# make 'email' multivalued so we can test the error condition
email1 = uuid.uuid4().hex
email2 = uuid.uuid4().hex
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'mail': [email1, email2],
'cn': 'nobodycares'
}
)
user_ref = self.identity_api.get_user(email1)
# make sure we get the ID from DN (old behavior) if the ID attribute
# has multiple values
self.assertEqual('nobodycares', user_ref['id'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_id_attribute_not_found(self, mock_ldap_get):
mock_ldap_get.return_value = (
'cn=nobodycares,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
}
)
user_api = identity.backends.ldap.UserApi(CONF)
self.assertRaises(exception.NotFound,
user_api.get,
'nobodycares')
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_id_not_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'uid'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'foo=bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'foo': ['bar'],
'cn': ['junk'],
'uid': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('junk', user_ref['name'])
@mock.patch.object(common_ldap_core.BaseLdap, '_ldap_get')
def test_user_name_in_dn(self, mock_ldap_get):
conf = self.get_config(CONF.identity.default_domain_id)
conf.ldap.user_id_attribute = 'sAMAccountName'
conf.ldap.user_name_attribute = 'cn'
self.reload_backends(CONF.identity.default_domain_id)
mock_ldap_get.return_value = (
'cn=Foo Bar,dc=example,dc=com',
{
'sn': [uuid.uuid4().hex],
'cn': ['Foo Bar'],
'SAMAccountName': ['crap']
}
)
user_ref = self.identity_api.get_user('crap')
self.assertEqual('crap', user_ref['id'])
self.assertEqual('Foo Bar', user_ref['name'])
class LDAPIdentityEnabledEmulation(LDAPIdentity):
def setUp(self):
super(LDAPIdentityEnabledEmulation, self).setUp()
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
for obj in [self.tenant_bar, self.tenant_baz, self.user_foo,
self.user_two, self.user_badguy]:
obj.setdefault('enabled', True)
def load_fixtures(self, fixtures):
# Override super impl since need to create group container.
create_group_container(self.identity_api)
super(LDAPIdentity, self).load_fixtures(fixtures)
def config_files(self):
config_files = super(LDAPIdentityEnabledEmulation, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap.conf'))
return config_files
def config_overrides(self):
super(LDAPIdentityEnabledEmulation, self).config_overrides()
self.config_fixture.config(group='ldap',
user_enabled_emulation=True,
project_enabled_emulation=True)
def test_project_crud(self):
# NOTE(topol): LDAPIdentityEnabledEmulation will create an
# enabled key in the project dictionary so this
# method override handles this side-effect
project = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'description': uuid.uuid4().hex,
'parent_id': None}
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
# self.assignment_api.create_project adds an enabled
# key with a value of True when LDAPIdentityEnabledEmulation
# is used so we now add this expected key to the project dictionary
project['enabled'] = True
self.assertDictEqual(project_ref, project)
project['description'] = uuid.uuid4().hex
self.assignment_api.update_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.delete_project(project['id'])
self.assertRaises(exception.ProjectNotFound,
self.assignment_api.get_project,
project['id'])
def test_user_crud(self):
user_dict = {
'domain_id': CONF.identity.default_domain_id,
'name': uuid.uuid4().hex,
'password': uuid.uuid4().hex}
user = self.identity_api.create_user(user_dict)
user_dict['enabled'] = True
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictContainsSubset(user_dict, user_ref_dict)
user_dict['password'] = uuid.uuid4().hex
self.identity_api.update_user(user['id'], user)
user_ref = self.identity_api.get_user(user['id'])
del user_dict['password']
user_ref_dict = dict((x, user_ref[x]) for x in user_ref)
self.assertDictContainsSubset(user_dict, user_ref_dict)
self.identity_api.delete_user(user['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user['id'])
def test_user_auth_emulated(self):
self.config_fixture.config(group='ldap',
user_enabled_emulation_dn='cn=test,dc=test')
self.reload_backends(CONF.identity.default_domain_id)
self.identity_api.authenticate(
context={},
user_id=self.user_foo['id'],
password=self.user_foo['password'])
def test_user_enable_attribute_mask(self):
self.skipTest(
"Enabled emulation conflicts with enabled mask")
def test_user_enabled_invert(self):
self.config_fixture.config(group='ldap', user_enabled_invert=True,
user_enabled_default=False)
self.clear_database()
self.load_backends()
self.load_fixtures(default_fixtures)
user1 = {'name': u'fäké1', 'enabled': True,
'domain_id': CONF.identity.default_domain_id}
user2 = {'name': u'fäké2', 'enabled': False,
'domain_id': CONF.identity.default_domain_id}
user3 = {'name': u'fäké3',
'domain_id': CONF.identity.default_domain_id}
# Ensure that the enabled LDAP attribute is not set for a
# newly created enabled user.
user_ref = self.identity_api.create_user(user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
# Ensure that an enabled LDAP attribute is not set for a disabled user.
user1['enabled'] = False
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Enable the user and ensure that the LDAP enabled
# attribute is not set.
user1['enabled'] = True
user_ref = self.identity_api.update_user(user_ref['id'], user1)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
# Ensure that the LDAP enabled attribute is not set for a
# newly created disabled user.
user_ref = self.identity_api.create_user(user2)
self.assertIs(False, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(False, user_ref['enabled'])
# Ensure that the LDAP enabled attribute is not set for a newly created
# user when the user_enabled_default setting is used.
user_ref = self.identity_api.create_user(user3)
self.assertIs(True, user_ref['enabled'])
self.assertIsNone(self.get_user_enabled_vals(user_ref))
user_ref = self.identity_api.get_user(user_ref['id'])
self.assertIs(True, user_ref['enabled'])
def test_user_enabled_invert_no_enabled_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
def test_user_enabled_invert_default_str_value(self):
self.skipTest(
"N/A: Covered by test_user_enabled_invert")
class LdapIdentitySqlAssignment(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase):
def config_files(self):
config_files = super(LdapIdentitySqlAssignment, self).config_files()
config_files.append(tests.dirs.tests_conf('backend_ldap_sql.conf'))
return config_files
def setUp(self):
self.useFixture(database.Database())
super(LdapIdentitySqlAssignment, self).setUp()
self.clear_database()
self.load_backends()
cache.configure_cache_region(cache.REGION)
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.load_fixtures(default_fixtures)
# defaulted by the data load
self.user_foo['enabled'] = True
def config_overrides(self):
super(LdapIdentitySqlAssignment, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def test_domain_crud(self):
pass
def test_list_domains(self):
domains = self.assignment_api.list_domains()
self.assertEqual([assignment.calc_default_domain()], domains)
def test_list_domains_non_default_domain_id(self):
# If change the default_domain_id, the ID of the default domain
# returned by list_domains doesn't change because the SQL identity
# config change.
orig_default_domain_id = CONF.identity.default_domain_id
new_domain_id = uuid.uuid4().hex
self.config_fixture.config(group='identity',
default_domain_id=new_domain_id)
domains = self.assignment_api.list_domains()
self.assertEqual(orig_default_domain_id, domains[0]['id'])
def test_create_domain(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
self.assertRaises(exception.Forbidden,
self.assignment_api.create_domain,
domain['id'],
domain)
def test_get_and_remove_role_grant_by_group_and_domain(self):
# TODO(henry-nash): We should really rewrite the tests in test_backend
# to be more flexible as to where the domains are sourced from, so
# that we would not need to override such tests here. This is raised
# as bug 1373865.
new_domain = self._get_domain_fixture()
new_group = {'domain_id': new_domain['id'], 'name': uuid.uuid4().hex}
new_group = self.identity_api.create_group(new_group)
new_user = {'name': 'new_user', 'password': uuid.uuid4().hex,
'enabled': True, 'domain_id': new_domain['id']}
new_user = self.identity_api.create_user(new_user)
self.identity_api.add_user_to_group(new_user['id'],
new_group['id'])
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assignment_api.create_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertDictEqual(roles_ref[0], self.role_member)
self.assignment_api.delete_grant(group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
roles_ref = self.assignment_api.list_grants(
group_id=new_group['id'],
domain_id=new_domain['id'])
self.assertEqual(0, len(roles_ref))
self.assertRaises(exception.NotFound,
self.assignment_api.delete_grant,
group_id=new_group['id'],
domain_id=new_domain['id'],
role_id='member')
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class LdapIdentitySqlAssignmentWithMapping(LdapIdentitySqlAssignment):
def config_overrides(self):
super(LdapIdentitySqlAssignmentWithMapping, self).config_overrides()
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
def test_dynamic_mapping_build(self):
initial_mappings = len(mapping_sql.list_id_mappings())
user1 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user1 = self.identity_api.create_user(user1)
user2 = {'name': uuid.uuid4().hex,
'domain_id': CONF.identity.default_domain_id,
'password': uuid.uuid4().hex, 'enabled': True}
user2 = self.identity_api.create_user(user2)
mappings = mapping_sql.list_id_mappings()
self.assertEqual(initial_mappings + 2, len(mappings))
self.id_mapping_api.purge_mappings({'public_id': user1['id']})
self.id_mapping_api.purge_mappings({'public_id': user2['id']})
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user1['id'])
self.assertRaises(exception.UserNotFound,
self.identity_api.get_user,
user2['id'])
self.identity_api.list_users()
self.identity_api.get_user(user1['id'])
self.identity_api.get_user(user2['id'])
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
class BaseMultiLDAPandSQLIdentity(object):
def create_user(self, domain_id):
user = {'name': uuid.uuid4().hex,
'domain_id': domain_id,
'password': uuid.uuid4().hex,
'enabled': True}
user_ref = self.identity_api.create_user(user)
user_ref['password'] = user['password']
return user_ref
def create_users_across_domains(self):
initial_mappings = len(mapping_sql.list_id_mappings())
self.users['user0'] = self.create_user(
self.domains['domain_default']['id'])
self.assignment_api.create_grant(
user_id=self.users['user0']['id'],
domain_id=self.domains['domain_default']['id'],
role_id=self.role_member['id'])
for x in range(1, self.domain_count):
self.users['user%s' % x] = self.create_user(
self.domains['domain%s' % x]['id'])
self.assignment_api.create_grant(
user_id=self.users['user%s' % x]['id'],
domain_id=self.domains['domain%s' % x]['id'],
role_id=self.role_member['id'])
self.assertEqual(initial_mappings + self.domain_specific_count,
len(mapping_sql.list_id_mappings()))
def check_user(self, user, domain_id, expected_status):
driver = self.identity_api._select_identity_driver(domain_id)
unused, unused, entity_id = (
self.identity_api._get_domain_driver_and_entity_id(
user['id']))
if expected_status == 200:
ref = driver.get_user(entity_id)
ref = self.identity_api._set_domain_id_and_mapping(
ref, domain_id, driver, map.EntityType.USER)
user = user.copy()
del user['password']
self.assertDictEqual(ref, user)
else:
try:
driver.get_user(entity_id)
except expected_status:
pass
def setup_initial_domains(self):
def create_domain(domain):
try:
ref = self.assignment_api.create_domain(
domain['id'], domain)
except exception.Conflict:
ref = (
self.assignment_api.get_domain_by_name(domain['name']))
return ref
self.domains = {}
for x in range(1, self.domain_count):
domain = 'domain%s' % x
self.domains[domain] = create_domain(
{'id': uuid.uuid4().hex, 'name': domain})
self.domains['domain_default'] = create_domain(
assignment.calc_default_domain())
def test_authenticate_to_each_domain(self):
for user_num in range(self.domain_count):
user = 'user%s' % user_num
self.identity_api.authenticate(
context={},
user_id=self.users[user]['id'],
password=self.users[user]['password'])
class MultiLDAPandSQLIdentity(BaseLDAPIdentity, tests.SQLDriverOverrides,
tests.TestCase, BaseMultiLDAPandSQLIdentity):
def setUp(self):
self.useFixture(database.Database())
super(MultiLDAPandSQLIdentity, self).setUp()
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 5
self.domain_specific_count = 3
self.setup_initial_domains()
self._setup_initial_users()
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=tests.TESTCONF + '/domain_configs_multi_ldap')
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(MultiLDAPandSQLIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def _setup_initial_users(self):
self.users = {}
self.users['userA'] = self.create_user(
self.domains['domain_default']['id'])
self.users['userB'] = self.create_user(
self.domains['domain1']['id'])
self.users['userC'] = self.create_user(
self.domains['domain3']['id'])
def reload_backends(self, domain_id):
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def get_config(self, domain_id):
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
check_user = self.check_user
check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user0'], domain, exception.UserNotFound)
check_user(self.users['user1'], self.domains['domain1']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain2']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user1'], domain, exception.UserNotFound)
check_user(self.users['user2'], self.domains['domain2']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain3']['id'],
self.domains['domain4']['id']]:
check_user(self.users['user2'], domain, exception.UserNotFound)
# domain3 and domain4 share the same backend, so you should be
# able to see user3 and user4 from either.
check_user(self.users['user3'], self.domains['domain3']['id'], 200)
check_user(self.users['user3'], self.domains['domain4']['id'], 200)
check_user(self.users['user4'], self.domains['domain3']['id'], 200)
check_user(self.users['user4'], self.domains['domain4']['id'], 200)
for domain in [self.domains['domain_default']['id'],
self.domains['domain1']['id'],
self.domains['domain2']['id']]:
check_user(self.users['user3'], domain, exception.UserNotFound)
check_user(self.users['user4'], domain, exception.UserNotFound)
# Finally, going through the regular manager layer, make sure we
# only see the right number of users in each of the non-default
# domains. One might have expected two users in domain1 (since we
# created one before we switched to multi-backend), however since
# that domain changed backends in the switch we don't find it anymore.
# backends.
#
# The listing of the default domain is already handled in the
# test_lists_users() method.
for domain in [self.domains['domain1']['id'],
self.domains['domain2']['id'],
self.domains['domain4']['id']]:
self.assertThat(
self.identity_api.list_users(domain_scope=domain),
matchers.HasLength(1))
# domain3 had a user created before we switched on
# multiple backends, plus one created afterwards - and its
# backend has not changed - so we should find two.
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain3']['id']),
matchers.HasLength(2))
def test_existing_uuids_work(self):
self.identity_api.get_user(self.users['userA']['id'])
self.identity_api.get_user(self.users['userB']['id'])
self.identity_api.get_user(self.users['userC']['id'])
def test_scanning_of_config_dir(self):
# Confirm that config has drivers_enabled as True, which we will
# check has been set to False later in this test
self.assertTrue(config.CONF.identity.domain_specific_drivers_enabled)
self.load_backends()
# Execute any command to trigger the lazy loading of domain configs
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id'])
# ...and now check the domain configs have been set up
self.assertIn('default', self.identity_api.domain_configs)
self.assertIn(self.domains['domain1']['id'],
self.identity_api.domain_configs)
self.assertIn(self.domains['domain2']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain3']['id'],
self.identity_api.domain_configs)
self.assertNotIn(self.domains['domain4']['id'],
self.identity_api.domain_configs)
# Finally check that a domain specific config contains items from both
# the primary config and the domain specific config
conf = self.identity_api.domain_configs.get_domain_conf(
self.domains['domain1']['id'])
# This should now be false, as is the default, since this is not
# set in the standard primary config file
self.assertFalse(conf.identity.domain_specific_drivers_enabled)
# ..and make sure a domain-specific options is also set
self.assertEqual('fake://memory1', conf.ldap.url)
def test_delete_domain_with_user_added(self):
domain = {'id': uuid.uuid4().hex, 'name': uuid.uuid4().hex,
'enabled': True}
project = {'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'domain_id': domain['id'],
'description': uuid.uuid4().hex,
'parent_id': None,
'enabled': True}
self.assignment_api.create_domain(domain['id'], domain)
self.assignment_api.create_project(project['id'], project)
project_ref = self.assignment_api.get_project(project['id'])
self.assertDictEqual(project_ref, project)
self.assignment_api.create_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
self.assignment_api.delete_grant(user_id=self.user_foo['id'],
project_id=project['id'],
role_id=self.role_member['id'])
domain['enabled'] = False
self.assignment_api.update_domain(domain['id'], domain)
self.assignment_api.delete_domain(domain['id'])
self.assertRaises(exception.DomainNotFound,
self.assignment_api.get_domain,
domain['id'])
def test_user_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
# Override
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class DomainSpecificLDAPandSQLIdentity(
BaseLDAPIdentity, tests.SQLDriverOverrides, tests.TestCase,
BaseMultiLDAPandSQLIdentity):
def setUp(self):
self.useFixture(database.Database())
super(DomainSpecificLDAPandSQLIdentity, self).setUp()
self.initial_setup()
def initial_setup(self):
# domain-specific operation, so make the switch straight away.
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_one_sql_one_ldap'))
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=False)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 2
self.setup_initial_domains()
self.users = {}
self.clear_database()
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificLDAPandSQLIdentity, self).config_overrides()
# Make sure assignment is actually an SQL driver,
# BaseLDAPIdentity causes this option to use LDAP.
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def reload_backends(self, domain_id):
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def get_config(self, domain_id):
# Get the config for this domain, will return CONF
# if no specific config defined for this domain
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def test_list_domains(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_domains_non_default_domain_id(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_domain_crud(self):
self.skipTest(
'N/A: Not relevant for multi ldap testing')
def test_list_users(self):
# Override the standard list users, since we have added an extra user
# to the default domain, so the number of expected users is one more
# than in the standard test.
users = self.identity_api.list_users(
domain_scope=self._set_domain_scope(
CONF.identity.default_domain_id))
self.assertEqual(len(default_fixtures.USERS) + 1, len(users))
user_ids = set(user['id'] for user in users)
expected_user_ids = set(getattr(self, 'user_%s' % user['id'])['id']
for user in default_fixtures.USERS)
expected_user_ids.add(self.users['user0']['id'])
for user_ref in users:
self.assertNotIn('password', user_ref)
self.assertEqual(expected_user_ids, user_ids)
def test_domain_segregation(self):
# Check that I can read a user with the appropriate domain-selected
# driver, but won't find it via any other domain driver
self.check_user(self.users['user0'],
self.domains['domain_default']['id'], 200)
self.check_user(self.users['user0'],
self.domains['domain1']['id'], exception.UserNotFound)
self.check_user(self.users['user1'],
self.domains['domain1']['id'], 200)
self.check_user(self.users['user1'],
self.domains['domain_default']['id'],
exception.UserNotFound)
self.assertThat(
self.identity_api.list_users(
domain_scope=self.domains['domain1']['id']),
matchers.HasLength(1))
def test_add_role_grant_to_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_get_role_grants_for_user_and_project_404(self):
self.skipTest('Blocked by bug 1101287')
def test_list_projects_for_user_with_grants(self):
self.skipTest('Blocked by bug 1221805')
def test_get_roles_for_user_and_project_user_group_same_id(self):
self.skipTest('N/A: We never generate the same ID for a user and '
'group in our mapping table')
def test_user_id_comma(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_id_comma_grants(self):
self.skipTest('Only valid if it is guaranteed to be talking to '
'the fakeldap backend')
def test_user_enabled_ignored_disable_error(self):
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_group_enabled_ignored_disable_error(self):
# Override.
self.skipTest("Doesn't apply since LDAP config has no affect on the "
"SQL identity backend.")
def test_project_enabled_ignored_disable_error(self):
self.skipTest("Doesn't apply since LDAP configuration is ignored for "
"SQL assignment backend.")
class DomainSpecificSQLIdentity(DomainSpecificLDAPandSQLIdentity):
def initial_setup(self):
# We aren't setting up any initial data ahead of switching to
self.config_fixture.config(
group='identity', domain_specific_drivers_enabled=True,
domain_config_dir=(
tests.TESTCONF + '/domain_configs_default_ldap_one_sql'))
self.config_fixture.config(group='identity_mapping',
backward_compatible_ids=True)
self.load_backends()
self.engine = sql.get_engine()
self.addCleanup(sql.cleanup)
sql.ModelBase.metadata.create_all(bind=self.engine)
self.addCleanup(sql.ModelBase.metadata.drop_all, bind=self.engine)
self.domain_count = 2
self.domain_specific_count = 1
self.setup_initial_domains()
self.users = {}
self.load_fixtures(default_fixtures)
self.create_users_across_domains()
def config_overrides(self):
super(DomainSpecificSQLIdentity, self).config_overrides()
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
def get_config(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
return CONF
else:
return self.identity_api.domain_configs.get_domain_conf(domain_id)
def reload_backends(self, domain_id):
if domain_id == CONF.identity.default_domain_id:
self.load_backends()
else:
# Just reload the driver for this domain - which will pickup
# any updated cfg
self.identity_api.domain_configs.reload_domain_driver(
self.identity_api.assignment_api, domain_id)
def test_default_sql_plus_sql_specific_driver_fails(self):
# First confirm that if ldap is default driver, domain1 can be
# loaded as sql
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now re-initialize, but with sql as the default identity driver
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.sql.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Make any identity call to initiate the lazy loading of configs, which
# should fail since we would now have two sql drivers.
self.assertRaises(exception.MultipleSQLDriversInConfig,
self.identity_api.list_users,
domain_scope=CONF.identity.default_domain_id)
def test_multiple_sql_specific_drivers_fails(self):
self.config_fixture.config(
group='identity',
driver='keystone.identity.backends.ldap.Identity')
self.config_fixture.config(
group='assignment',
driver='keystone.assignment.backends.sql.Assignment')
self.load_backends()
# Ensure default, domain1 and domain2 exist
self.domain_count = 3
self.setup_initial_domains()
# Make any identity call to initiate the lazy loading of configs
self.identity_api.list_users(
domain_scope=CONF.identity.default_domain_id)
# This will only load domain1, since the domain2 config file is
# not stored in the same location
self.assertIsNotNone(self.get_config(self.domains['domain1']['id']))
# Now try and manually load a 2nd sql specific driver, for domain2,
# which should fail.
self.assertRaises(exception.MultipleSQLDriversInConfig,
self.identity_api.domain_configs._load_config,
self.identity_api.assignment_api,
[tests.TESTCONF + '/domain_configs_one_extra_sql/' +
'keystone.domain2.conf'],
'domain2')
| true
| true
|
1c428a87c47d58c69d87bdaa6594d7a2b98be1e1
| 1,557
|
py
|
Python
|
Training/testModel.py
|
TWCurry/emotion-recognition-training-platform
|
748fcdf2558fbfe9fb1523ef4024e7373543f0e3
|
[
"MIT"
] | null | null | null |
Training/testModel.py
|
TWCurry/emotion-recognition-training-platform
|
748fcdf2558fbfe9fb1523ef4024e7373543f0e3
|
[
"MIT"
] | null | null | null |
Training/testModel.py
|
TWCurry/emotion-recognition-training-platform
|
748fcdf2558fbfe9fb1523ef4024e7373543f0e3
|
[
"MIT"
] | null | null | null |
import sys, os
import tensorflow as tf
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
imgHeight = 762
imgWidth = 562
emotionCodes = ["AF", "AN", "DI", "HA", "NE", "SA", "SU"]
emotionNames = ["Afraid", "Angry", "Disgusted", "Happy", "Neutral", "Sad", "Surprised"]
def main():
try:
modelDir = sys.argv[1]
except Exception as e:
print("Invalid parameters.")
sys.exit(1)
model = keras.models.load_model(modelDir)
testImg = keras.preprocessing.image.load_img("dataSet/test5.jpg")
testArr = keras.preprocessing.image.img_to_array(testImg)
testArr = np.array([testArr])
probabilityModel = tf.keras.Sequential([model,tf.keras.layers.Softmax()]) # Create model to convert logits to probabilities
predictions = probabilityModel.predict(testArr)
# Predictions is list of lists, each list showing how confident the model is on each label
# np.argmax returns index of highest value in list
# so we just map that index to the index of the emotion codes to find what it thinks the emotion is
print(predictions)
print("======================PREDICTION======================")
print(f"Predicted emotion: {emotionNames[np.argmax(predictions[0])]}")
print("======================================================")
# print(predictions)
# print(np.argmax(predictions[0]))
# print(emotionCodes[np.argmax(predictions[0])])
plt.bar(emotionCodes, predictions[0])
plt.xticks(emotionCodes)
plt.show()
if __name__ == "__main__":
main()
| 37.97561
| 127
| 0.649326
|
import sys, os
import tensorflow as tf
import numpy as np
from tensorflow import keras
import matplotlib.pyplot as plt
imgHeight = 762
imgWidth = 562
emotionCodes = ["AF", "AN", "DI", "HA", "NE", "SA", "SU"]
emotionNames = ["Afraid", "Angry", "Disgusted", "Happy", "Neutral", "Sad", "Surprised"]
def main():
try:
modelDir = sys.argv[1]
except Exception as e:
print("Invalid parameters.")
sys.exit(1)
model = keras.models.load_model(modelDir)
testImg = keras.preprocessing.image.load_img("dataSet/test5.jpg")
testArr = keras.preprocessing.image.img_to_array(testImg)
testArr = np.array([testArr])
probabilityModel = tf.keras.Sequential([model,tf.keras.layers.Softmax()])
predictions = probabilityModel.predict(testArr)
print(predictions)
print("======================PREDICTION======================")
print(f"Predicted emotion: {emotionNames[np.argmax(predictions[0])]}")
print("======================================================")
plt.bar(emotionCodes, predictions[0])
plt.xticks(emotionCodes)
plt.show()
if __name__ == "__main__":
main()
| true
| true
|
1c428aad04470cc050f9150015da22fd25145f7a
| 7,723
|
py
|
Python
|
tests/test_cli_analyze.py
|
tk-ML/SALib
|
b090a1699e6df9b789723bf0097521e5dc316e4c
|
[
"MIT"
] | 1
|
2021-06-22T08:27:17.000Z
|
2021-06-22T08:27:17.000Z
|
tests/test_cli_analyze.py
|
JimmyKude/SALib
|
2545a439ca474a673fddadf0399f7c4e21000d99
|
[
"MIT"
] | null | null | null |
tests/test_cli_analyze.py
|
JimmyKude/SALib
|
2545a439ca474a673fddadf0399f7c4e21000d99
|
[
"MIT"
] | null | null | null |
import sys
import subprocess
from SALib.test_functions import Ishigami
import numpy as np
import re
salib_cli = "./src/SALib/scripts/salib.py"
ishigami_fp = "./src/SALib/test_functions/params/Ishigami.txt"
if sys.version_info[0] == 2:
subprocess.run = subprocess.call
def test_delta():
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024"\
.format(cli=salib_cli, fn=ishigami_fp) +\
" --precision 8 --max-order 2 --seed=100"
subprocess.run(cmd.split())
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze delta -p {fn} -X model_input.txt \
-Y model_output.txt -c 0 -r 10 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'Parameterdeltadelta_confS1S1_confx10.2122850.0074810.3123190.011463x20.3530150.0061840.4306860.013135x30.1613440.0057540.0013880.001545'
assert len(result) > 0 and result in expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_dgsm():
# Generate inputs
cmd = "python {cli} sample finite_diff -p {fn} -o model_input.txt -d 0.001\
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze dgsm -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "Parametervivi_stddgsmdgsm_confx17.69803416.3731482.2331100.986061x224.48770117.3199737.1035971.092944x311.05754523.7851003.2076651.488346"
assert len(result) > 0 and result == expected, \
"Unexpected DGSM results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_fast():
# Generate inputs
cmd = "python {cli} sample fast_sampler -p {fn} -o model_input.txt \
--precision=8 -n 1000 -M 4 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze fast -p {fn} \
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstTotalx10.3104030.555603x20.4425530.469546x30.0000000.239155"
assert len(result) > 0 and result == expected, \
"Unexpected FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_ff():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze ff -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterMEx10.000000x20.000000x30.000000dummy_00.000000('x1','x2')0.000000('x1','x3')0.000000('x2','x3')0.000000('x1','dummy_0')0.000000('x2','dummy_0')0.000000('x3','dummy_0')0.000000"
assert len(result) > 0 and result == expected, \
"Unexpected FF results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_morris():
# Generate inputs
cmd = "python {cli} sample morris -p {fn} -o model_input.txt -n 100\
--precision=8 --levels=10 --seed=100 -lo False"\
.format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
# run analysis
analyze_cmd = "python {cli} analyze morris -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 -l 10 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = """ParameterMu_StarMuMu_Star_ConfSigmax17.4997.4991.8019.330x22.215-0.4700.3482.776x35.4240.8641.1487.862"""
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_rbd_fast():
# Generate inputs
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
# Run model and save output
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze rbd_fast -p {fn} -X model_input.txt\
-Y model_output.txt --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
# run analysis and use regex to strip all whitespace from result
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstx10.39223x20.299578x30.0342307"
assert len(result) > 0 and result == expected, \
"Unexpected RBD-FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_sobol():
# Generate inputs
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024\
--precision 8 --max-order 2 --seed=100".format(cli=salib_cli,
fn=ishigami_fp)
cmd = cmd.split()
result = subprocess.check_output(cmd, universal_newlines=True)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze sobol -p {fn}\
-Y model_output.txt -c 0 --max-order 2\
-r 1000 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'ParameterS1S1_confSTST_confx10.3168320.0622410.5558600.085972x20.4437630.0560470.4418980.041596x30.0122030.0559540.2446750.025332Parameter_1Parameter_2S2S2_confx1x20.0092540.083829x1x30.2381720.101764x2x3-0.0048880.067819'
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
if __name__ == '__main__':
test_delta()
test_dgsm()
test_fast()
test_ff()
test_morris()
test_rbd_fast()
test_sobol()
| 38.422886
| 245
| 0.65661
|
import sys
import subprocess
from SALib.test_functions import Ishigami
import numpy as np
import re
salib_cli = "./src/SALib/scripts/salib.py"
ishigami_fp = "./src/SALib/test_functions/params/Ishigami.txt"
if sys.version_info[0] == 2:
subprocess.run = subprocess.call
def test_delta():
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024"\
.format(cli=salib_cli, fn=ishigami_fp) +\
" --precision 8 --max-order 2 --seed=100"
subprocess.run(cmd.split())
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze delta -p {fn} -X model_input.txt \
-Y model_output.txt -c 0 -r 10 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'Parameterdeltadelta_confS1S1_confx10.2122850.0074810.3123190.011463x20.3530150.0061840.4306860.013135x30.1613440.0057540.0013880.001545'
assert len(result) > 0 and result in expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_dgsm():
cmd = "python {cli} sample finite_diff -p {fn} -o model_input.txt -d 0.001\
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze dgsm -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "Parametervivi_stddgsmdgsm_confx17.69803416.3731482.2331100.986061x224.48770117.3199737.1035971.092944x311.05754523.7851003.2076651.488346"
assert len(result) > 0 and result == expected, \
"Unexpected DGSM results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_fast():
cmd = "python {cli} sample fast_sampler -p {fn} -o model_input.txt \
--precision=8 -n 1000 -M 4 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze fast -p {fn} \
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstTotalx10.3104030.555603x20.4425530.469546x30.0000000.239155"
assert len(result) > 0 and result == expected, \
"Unexpected FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_ff():
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 -n 1000 --seed=100".format(cli=salib_cli,
fn=ishigami_fp).split()
subprocess.run(cmd)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze ff -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterMEx10.000000x20.000000x30.000000dummy_00.000000('x1','x2')0.000000('x1','x3')0.000000('x2','x3')0.000000('x1','dummy_0')0.000000('x2','dummy_0')0.000000('x3','dummy_0')0.000000"
assert len(result) > 0 and result == expected, \
"Unexpected FF results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_morris():
cmd = "python {cli} sample morris -p {fn} -o model_input.txt -n 100\
--precision=8 --levels=10 --seed=100 -lo False"\
.format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze morris -p {fn} -X model_input.txt\
-Y model_output.txt -c 0 -r 1000 -l 10 --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = """ParameterMu_StarMuMu_Star_ConfSigmax17.4997.4991.8019.330x22.215-0.4700.3482.776x35.4240.8641.1487.862"""
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
def test_rbd_fast():
cmd = "python {cli} sample ff -p {fn} -o model_input.txt \
--precision=8 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
subprocess.run(cmd)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze rbd_fast -p {fn} -X model_input.txt\
-Y model_output.txt --seed=100"\
.format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected = "ParameterFirstx10.39223x20.299578x30.0342307"
assert len(result) > 0 and result == expected, \
"Unexpected RBD-FAST results.\n\nExpected:\n{}\n\nGot:{}"\
.format(expected, result)
def test_sobol():
cmd = "python {cli} sample saltelli -p {fn} -o model_input.txt -n 1024\
--precision 8 --max-order 2 --seed=100".format(cli=salib_cli,
fn=ishigami_fp)
cmd = cmd.split()
result = subprocess.check_output(cmd, universal_newlines=True)
np.savetxt('model_output.txt', Ishigami.evaluate(
np.loadtxt('model_input.txt')))
analyze_cmd = "python {cli} analyze sobol -p {fn}\
-Y model_output.txt -c 0 --max-order 2\
-r 1000 --seed=100".format(cli=salib_cli, fn=ishigami_fp).split()
result = subprocess.check_output(analyze_cmd, universal_newlines=True)
result = re.sub(r'[\n\t\s]*', '', result)
expected_output = 'ParameterS1S1_confSTST_confx10.3168320.0622410.5558600.085972x20.4437630.0560470.4418980.041596x30.0122030.0559540.2446750.025332Parameter_1Parameter_2S2S2_confx1x20.0092540.083829x1x30.2381720.101764x2x3-0.0048880.067819'
assert len(result) > 0 and result == expected_output, \
"Results did not match expected values:\n\n Expected: \n{} \n\n Got: \n{}".format(
expected_output, result)
if __name__ == '__main__':
test_delta()
test_dgsm()
test_fast()
test_ff()
test_morris()
test_rbd_fast()
test_sobol()
| true
| true
|
1c428bebeb6bf553a525fd33bd5c9ffd2aac7acd
| 944
|
py
|
Python
|
michelanglo_app/security.py
|
matteoferla/PyMOL-to-NGL-transpiler
|
3e1ce6cecde4b913188afa4472148bc5ab7f34a8
|
[
"MIT"
] | 11
|
2020-03-23T03:41:36.000Z
|
2022-03-11T18:53:26.000Z
|
michelanglo_app/security.py
|
thesgc/MichelaNGLo
|
ad8a2e5207638e82bb4163662af351b4864f39c1
|
[
"MIT"
] | 1
|
2020-05-13T20:07:51.000Z
|
2020-06-25T04:44:57.000Z
|
michelanglo_app/security.py
|
thesgc/MichelaNGLo
|
ad8a2e5207638e82bb4163662af351b4864f39c1
|
[
"MIT"
] | 1
|
2019-11-11T19:12:26.000Z
|
2019-11-11T19:12:26.000Z
|
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from .models import User
class MyAuthenticationPolicy(AuthTktAuthenticationPolicy):
def authenticated_userid(self, request):
user = request.user
if user is not None:
return user.id
def get_user(request):
user_id = request.unauthenticated_userid
if user_id == 'None': ## this only happens on dev. No idea why.
user_id = None
if user_id is not None:
user = request.dbsession.query(User).get(user_id)
return user
def includeme(config):
settings = config.get_settings()
authn_policy = MyAuthenticationPolicy(
settings['auth.secret'],
hashalg='sha512',
)
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(ACLAuthorizationPolicy())
config.add_request_method(get_user, 'user', reify=True)
| 31.466667
| 67
| 0.723517
|
from pyramid.authentication import AuthTktAuthenticationPolicy
from pyramid.authorization import ACLAuthorizationPolicy
from .models import User
class MyAuthenticationPolicy(AuthTktAuthenticationPolicy):
def authenticated_userid(self, request):
user = request.user
if user is not None:
return user.id
def get_user(request):
user_id = request.unauthenticated_userid
if user_id == 'None': s not None:
user = request.dbsession.query(User).get(user_id)
return user
def includeme(config):
settings = config.get_settings()
authn_policy = MyAuthenticationPolicy(
settings['auth.secret'],
hashalg='sha512',
)
config.set_authentication_policy(authn_policy)
config.set_authorization_policy(ACLAuthorizationPolicy())
config.add_request_method(get_user, 'user', reify=True)
| true
| true
|
1c428c8f3dd3802d4ef990317e40f6a83e5179fb
| 1,490
|
py
|
Python
|
components/alibi-explain-server/setup.py
|
bmfisher/seldon-core
|
69fbae930dad4999c2225773468fe462442ec8be
|
[
"Apache-2.0"
] | null | null | null |
components/alibi-explain-server/setup.py
|
bmfisher/seldon-core
|
69fbae930dad4999c2225773468fe462442ec8be
|
[
"Apache-2.0"
] | 60
|
2021-06-18T03:25:51.000Z
|
2022-03-28T04:07:46.000Z
|
components/alibi-explain-server/setup.py
|
rajgupt/seldon-core
|
e48d01bc02a424d94b7a7303798b5cd7e7e82e13
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 kubeflow.org.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from setuptools import setup, find_packages
tests_require = ["pytest", "pytest-tornasync", "mypy"]
setup(
name="alibiexplainer",
version="0.3.0",
author_email="cc@seldon.io",
license="../../LICENSE.txt",
url="https://github.com/SeldonIO/seldon-core/tree/master/components/alibi-explain-server",
description="Model Explanation Server.",
long_description=open("README.md").read(),
python_requires=">=3.6",
packages=find_packages("alibiexplainer"),
install_requires=[
"kfserving>=0.3.0",
"alibi==0.5.5",
"scikit-learn>= 0.23.0",
"argparse>=1.4.0",
"requests>=2.22.0",
"joblib>=0.13.2",
"dill>=0.3.0",
"grpcio>=1.22.0",
"xgboost==1.0.2",
"lightgbm==3.1.1",
"catboost==0.24.4",
"shap==0.36.0",
],
tests_require=tests_require,
extras_require={"test": tests_require},
)
| 32.391304
| 94
| 0.655705
|
from setuptools import setup, find_packages
tests_require = ["pytest", "pytest-tornasync", "mypy"]
setup(
name="alibiexplainer",
version="0.3.0",
author_email="cc@seldon.io",
license="../../LICENSE.txt",
url="https://github.com/SeldonIO/seldon-core/tree/master/components/alibi-explain-server",
description="Model Explanation Server.",
long_description=open("README.md").read(),
python_requires=">=3.6",
packages=find_packages("alibiexplainer"),
install_requires=[
"kfserving>=0.3.0",
"alibi==0.5.5",
"scikit-learn>= 0.23.0",
"argparse>=1.4.0",
"requests>=2.22.0",
"joblib>=0.13.2",
"dill>=0.3.0",
"grpcio>=1.22.0",
"xgboost==1.0.2",
"lightgbm==3.1.1",
"catboost==0.24.4",
"shap==0.36.0",
],
tests_require=tests_require,
extras_require={"test": tests_require},
)
| true
| true
|
1c428cd385440ea3c0ccbe81db1d5ce97b0ee4ef
| 1,047
|
py
|
Python
|
setup.py
|
cablelabs/snaps-orchestration
|
0c55208e73e8ecd6c9e365215f931b5612a82ea9
|
[
"Apache-2.0"
] | 1
|
2021-05-17T13:52:20.000Z
|
2021-05-17T13:52:20.000Z
|
setup.py
|
cablelabs/snaps-common
|
d65764d19414510e93906da706afda2b6d2aa7fb
|
[
"Apache-2.0"
] | 10
|
2019-02-28T21:35:34.000Z
|
2021-09-23T23:25:05.000Z
|
setup.py
|
cablelabs/snaps-orchestration
|
0c55208e73e8ecd6c9e365215f931b5612a82ea9
|
[
"Apache-2.0"
] | 1
|
2019-02-13T10:10:41.000Z
|
2019-02-13T10:10:41.000Z
|
# Copyright (c) 2018 Cable Television Laboratories, Inc. ("CableLabs")
# and others. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
test_suite='tests',
pbr=True)
| 36.103448
| 74
| 0.729704
|
import setuptools
import multiprocessing
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
test_suite='tests',
pbr=True)
| true
| true
|
1c428d1476cfb6464eafd4c10af8c2321eaeb5b8
| 4,846
|
py
|
Python
|
test_lanenet.py
|
sschoedel/lanenet-lane-detection
|
210116a7d4d1324fcfb2b8b50404e4c3bf99811f
|
[
"Apache-2.0"
] | null | null | null |
test_lanenet.py
|
sschoedel/lanenet-lane-detection
|
210116a7d4d1324fcfb2b8b50404e4c3bf99811f
|
[
"Apache-2.0"
] | null | null | null |
test_lanenet.py
|
sschoedel/lanenet-lane-detection
|
210116a7d4d1324fcfb2b8b50404e4c3bf99811f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Time : 18-5-23 上午11:33
# @Author : MaybeShewill-CV
# @Site : https://github.com/MaybeShewill-CV/lanenet-lane-detection
# @File : test_lanenet.py
# @IDE: PyCharm Community Edition
"""
test LaneNet model on single image
"""
import argparse
import os.path as ops
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from lanenet_model import lanenet
from lanenet_model import lanenet_postprocess
from local_utils.config_utils import parse_config_utils
from local_utils.log_util import init_logger
CFG = parse_config_utils.lanenet_cfg
LOG = init_logger.get_logger(log_file_name_prefix='lanenet_test')
def init_args():
"""
:return:
"""
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, help='The image path or the src image save dir')
parser.add_argument('--weights_path', type=str, help='The model weights path')
return parser.parse_args()
def args_str2bool(arg_value):
"""
:param arg_value:
:return:
"""
if arg_value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg_value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def minmax_scale(input_arr):
"""
:param input_arr:
:return:
"""
min_val = np.min(input_arr)
max_val = np.max(input_arr)
output_arr = (input_arr - min_val) * 255.0 / (max_val - min_val)
return output_arr
def test_lanenet(image_path, weights_path):
"""
:param image_path:
:param weights_path:
:return:
"""
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
LOG.info('Start reading image and preprocessing')
t_start = time.time()
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image_vis = image
image = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
image = image / 127.5 - 1.0
LOG.info('Image load complete, cost time: {:.5f}s'.format(time.time() - t_start))
input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input_tensor')
net = lanenet.LaneNet(phase='test', cfg=CFG)
binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor, name='LaneNet')
postprocessor = lanenet_postprocess.LaneNetPostProcessor(cfg=CFG)
# Set sess configuration
sess_config = tf.ConfigProto()
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.GPU.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.GPU.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
# define moving average version of the learned variables for eval
with tf.variable_scope(name_or_scope='moving_avg'):
variable_averages = tf.train.ExponentialMovingAverage(
CFG.SOLVER.MOVING_AVE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
# define saver
saver = tf.train.Saver(variables_to_restore)
# saver = tf.train.import_meta_graph(variables_to_restore)
with sess.as_default():
saver.restore(sess=sess, save_path=weights_path)
# print("weights path")
# print(weights_path)
# saver.restore(sess, tf.train.latest_checkpoint(weights_path))
t_start = time.time()
loop_times = 500
for i in range(loop_times):
binary_seg_image, instance_seg_image = sess.run(
[binary_seg_ret, instance_seg_ret],
feed_dict={input_tensor: [image]}
)
t_cost = time.time() - t_start
t_cost /= loop_times
LOG.info('Single image inference cost time: {:.5f}s'.format(t_cost))
postprocess_result = postprocessor.postprocess(
binary_seg_result=binary_seg_image[0],
instance_seg_result=instance_seg_image[0],
source_image=image_vis
)
mask_image = postprocess_result['mask_image']
for i in range(CFG.MODEL.EMBEDDING_FEATS_DIMS):
instance_seg_image[0][:, :, i] = minmax_scale(instance_seg_image[0][:, :, i])
embedding_image = np.array(instance_seg_image[0], np.uint8)
plt.figure('mask_image')
plt.imshow(mask_image[:, :, (2, 1, 0)])
plt.figure('src_image')
plt.imshow(image_vis[:, :, (2, 1, 0)])
plt.figure('instance_image')
plt.imshow(embedding_image[:, :, (2, 1, 0)])
plt.figure('binary_image')
plt.imshow(binary_seg_image[0] * 255, cmap='gray')
plt.show()
sess.close()
return
if __name__ == '__main__':
"""
test code
"""
# init args
args = init_args()
test_lanenet(args.image_path, args.weights_path)
| 29.730061
| 98
| 0.666529
|
import argparse
import os.path as ops
import time
import cv2
import matplotlib.pyplot as plt
import numpy as np
import tensorflow as tf
from lanenet_model import lanenet
from lanenet_model import lanenet_postprocess
from local_utils.config_utils import parse_config_utils
from local_utils.log_util import init_logger
CFG = parse_config_utils.lanenet_cfg
LOG = init_logger.get_logger(log_file_name_prefix='lanenet_test')
def init_args():
parser = argparse.ArgumentParser()
parser.add_argument('--image_path', type=str, help='The image path or the src image save dir')
parser.add_argument('--weights_path', type=str, help='The model weights path')
return parser.parse_args()
def args_str2bool(arg_value):
if arg_value.lower() in ('yes', 'true', 't', 'y', '1'):
return True
elif arg_value.lower() in ('no', 'false', 'f', 'n', '0'):
return False
else:
raise argparse.ArgumentTypeError('Unsupported value encountered.')
def minmax_scale(input_arr):
min_val = np.min(input_arr)
max_val = np.max(input_arr)
output_arr = (input_arr - min_val) * 255.0 / (max_val - min_val)
return output_arr
def test_lanenet(image_path, weights_path):
assert ops.exists(image_path), '{:s} not exist'.format(image_path)
LOG.info('Start reading image and preprocessing')
t_start = time.time()
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
image_vis = image
image = cv2.resize(image, (512, 256), interpolation=cv2.INTER_LINEAR)
image = image / 127.5 - 1.0
LOG.info('Image load complete, cost time: {:.5f}s'.format(time.time() - t_start))
input_tensor = tf.placeholder(dtype=tf.float32, shape=[1, 256, 512, 3], name='input_tensor')
net = lanenet.LaneNet(phase='test', cfg=CFG)
binary_seg_ret, instance_seg_ret = net.inference(input_tensor=input_tensor, name='LaneNet')
postprocessor = lanenet_postprocess.LaneNetPostProcessor(cfg=CFG)
sess_config = tf.ConfigProto()
sess_config.gpu_options.per_process_gpu_memory_fraction = CFG.GPU.GPU_MEMORY_FRACTION
sess_config.gpu_options.allow_growth = CFG.GPU.TF_ALLOW_GROWTH
sess_config.gpu_options.allocator_type = 'BFC'
sess = tf.Session(config=sess_config)
with tf.variable_scope(name_or_scope='moving_avg'):
variable_averages = tf.train.ExponentialMovingAverage(
CFG.SOLVER.MOVING_AVE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()
saver = tf.train.Saver(variables_to_restore)
with sess.as_default():
saver.restore(sess=sess, save_path=weights_path)
t_start = time.time()
loop_times = 500
for i in range(loop_times):
binary_seg_image, instance_seg_image = sess.run(
[binary_seg_ret, instance_seg_ret],
feed_dict={input_tensor: [image]}
)
t_cost = time.time() - t_start
t_cost /= loop_times
LOG.info('Single image inference cost time: {:.5f}s'.format(t_cost))
postprocess_result = postprocessor.postprocess(
binary_seg_result=binary_seg_image[0],
instance_seg_result=instance_seg_image[0],
source_image=image_vis
)
mask_image = postprocess_result['mask_image']
for i in range(CFG.MODEL.EMBEDDING_FEATS_DIMS):
instance_seg_image[0][:, :, i] = minmax_scale(instance_seg_image[0][:, :, i])
embedding_image = np.array(instance_seg_image[0], np.uint8)
plt.figure('mask_image')
plt.imshow(mask_image[:, :, (2, 1, 0)])
plt.figure('src_image')
plt.imshow(image_vis[:, :, (2, 1, 0)])
plt.figure('instance_image')
plt.imshow(embedding_image[:, :, (2, 1, 0)])
plt.figure('binary_image')
plt.imshow(binary_seg_image[0] * 255, cmap='gray')
plt.show()
sess.close()
return
if __name__ == '__main__':
args = init_args()
test_lanenet(args.image_path, args.weights_path)
| true
| true
|
1c428d60ac80347b2d212dd41c3d7dd711aec90e
| 234
|
py
|
Python
|
inst/admin.py
|
collinsbett29/insta
|
2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581
|
[
"MIT"
] | null | null | null |
inst/admin.py
|
collinsbett29/insta
|
2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581
|
[
"MIT"
] | 3
|
2020-06-06T00:04:18.000Z
|
2021-09-08T01:25:53.000Z
|
inst/admin.py
|
collinsbett29/Instagram
|
2b78fbef9b4739cfd5fe6e6a86ccf4f850f33581
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.contrib import admin
from .models import Profile, Image, Comments
# Register your models here.
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Comments)
| 26
| 44
| 0.824786
|
from __future__ import unicode_literals
from django.contrib import admin
from .models import Profile, Image, Comments
admin.site.register(Profile)
admin.site.register(Image)
admin.site.register(Comments)
| true
| true
|
1c428dfa6d43a5952b138faaddf251c3aef2beb6
| 2,906
|
py
|
Python
|
pyne/dbgen/kaeri.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | 1
|
2019-10-10T14:14:11.000Z
|
2019-10-10T14:14:11.000Z
|
pyne/dbgen/kaeri.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | 58
|
2019-01-07T16:13:26.000Z
|
2019-05-09T15:56:26.000Z
|
pyne/dbgen/kaeri.py
|
ypark234/pyne
|
b7c4932c0399e6a0881aea943b392fb97cd0b6bd
|
[
"MIT"
] | null | null | null |
from __future__ import print_function
import os
import re
import sys
from warnings import warn
from pyne.utils import QAWarning
try:
import urllib.request as urllib2
from urllib.error import URLError
except ImportError:
import urllib2
from urllib2 import URLError
from pyne import nucname
warn(__name__ + " is not yet QA compliant.", QAWarning)
if sys.version_info[0] > 2:
basestring = str
def grab_kaeri_nuclide(nuc, build_dir="", n=None):
"""Grabs a nuclide file from KAERI from the web and places
it a {nuc}.html file in the build directory.
Parameters
----------
nuc : str, int
nuclide, preferably in name form.
build_dir : str, optional
Directory to place html files in.
n : None or int
Optional flag on data to grab. None = basic data,
2 = cross section summary, 3 = cross section graphs.
"""
if not isinstance(nuc, basestring):
nuc = nucname.name(nuc).upper()
if n is None:
filename = os.path.join(build_dir, nuc + '.html')
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}'.format(nuc)
else:
filename = os.path.join(build_dir, '{nuc}_{n}.html'.format(nuc=nuc, n=n))
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}&n={n}'.format(nuc, n=n)
print(" getting {0} and placing in {1}".format(nuc, filename))
# Get the url
req = urllib2.Request(kaeri_url, headers={'User-Agent': 'Mozilla/5.0'})
hdl = urllib2.urlopen(req, timeout=30.0)
i = 1
# try reading in the data until it works or ten times
read_in = False
while (not read_in) and (i <= 10):
try:
kaeri_html = hdl.read()
read_in = True
except URLError:
hdl.close()
i += 1
print(" getting {0} and placing in {1}, attempt {2}".format(nuc, filename, i))
hdl = urllib2.urlopen(req, timeout=30.0)
# Write out to the file
with open(filename, 'w') as f:
f.write(kaeri_html)
nat_iso_regex = re.compile('.*?/cgi-bin/nuclide[?]nuc=([A-Za-z]{1,2}\d{1,3}).*?[(].*?[)]')
def parse_for_natural_isotopes(htmlfile):
"""Parses an elemental html file, returning a set of naturally occuring isotopes."""
nat_isos = set()
with open(htmlfile, 'r') as f:
for line in f:
m = nat_iso_regex.search(line)
if m is not None:
nat_isos.add(nucname.id(m.group(1)))
return nat_isos
all_iso_regex = re.compile('.*?/cgi-bin/nuclide[?]nuc=([A-Za-z]{1,2}\d{1,3})')
def parse_for_all_isotopes(htmlfile):
"""Parses an elemental html file, returning a set of all occuring isotopes."""
isos = set()
with open(htmlfile, 'r') as f:
for line in f:
m = all_iso_regex.search(line)
if m is not None:
isos.add(nucname.id(m.group(1)))
return isos
| 31.247312
| 93
| 0.611838
|
from __future__ import print_function
import os
import re
import sys
from warnings import warn
from pyne.utils import QAWarning
try:
import urllib.request as urllib2
from urllib.error import URLError
except ImportError:
import urllib2
from urllib2 import URLError
from pyne import nucname
warn(__name__ + " is not yet QA compliant.", QAWarning)
if sys.version_info[0] > 2:
basestring = str
def grab_kaeri_nuclide(nuc, build_dir="", n=None):
if not isinstance(nuc, basestring):
nuc = nucname.name(nuc).upper()
if n is None:
filename = os.path.join(build_dir, nuc + '.html')
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}'.format(nuc)
else:
filename = os.path.join(build_dir, '{nuc}_{n}.html'.format(nuc=nuc, n=n))
kaeri_url = 'http://atom.kaeri.re.kr/cgi-bin/nuclide?nuc={0}&n={n}'.format(nuc, n=n)
print(" getting {0} and placing in {1}".format(nuc, filename))
req = urllib2.Request(kaeri_url, headers={'User-Agent': 'Mozilla/5.0'})
hdl = urllib2.urlopen(req, timeout=30.0)
i = 1
read_in = False
while (not read_in) and (i <= 10):
try:
kaeri_html = hdl.read()
read_in = True
except URLError:
hdl.close()
i += 1
print(" getting {0} and placing in {1}, attempt {2}".format(nuc, filename, i))
hdl = urllib2.urlopen(req, timeout=30.0)
with open(filename, 'w') as f:
f.write(kaeri_html)
nat_iso_regex = re.compile('.*?/cgi-bin/nuclide[?]nuc=([A-Za-z]{1,2}\d{1,3}).*?[(].*?[)]')
def parse_for_natural_isotopes(htmlfile):
nat_isos = set()
with open(htmlfile, 'r') as f:
for line in f:
m = nat_iso_regex.search(line)
if m is not None:
nat_isos.add(nucname.id(m.group(1)))
return nat_isos
all_iso_regex = re.compile('.*?/cgi-bin/nuclide[?]nuc=([A-Za-z]{1,2}\d{1,3})')
def parse_for_all_isotopes(htmlfile):
isos = set()
with open(htmlfile, 'r') as f:
for line in f:
m = all_iso_regex.search(line)
if m is not None:
isos.add(nucname.id(m.group(1)))
return isos
| true
| true
|
1c428e344a4510bb9bcff5766db0a66d09b5906c
| 327,578
|
py
|
Python
|
test/test_ITU.py
|
the-aerospace-corporation/ITU-Rpy
|
4456da2db9f28453d5a08339c84fe5bf25b999d8
|
[
"MIT"
] | null | null | null |
test/test_ITU.py
|
the-aerospace-corporation/ITU-Rpy
|
4456da2db9f28453d5a08339c84fe5bf25b999d8
|
[
"MIT"
] | null | null | null |
test/test_ITU.py
|
the-aerospace-corporation/ITU-Rpy
|
4456da2db9f28453d5a08339c84fe5bf25b999d8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest as test
import itur
import itur.models as models
import sys
from astropy import units as u
def suite():
""" A test suite for the ITU-P Recommendations. Recommendations tested:
* ITU-P R-676-9
* ITU-P R-676-11
* ITU-P R-618-12
* ITU-P R-618-13
* ITU-P R-453-12
* ITU-P R-837-6
* ITU-P R-837-7
* ITU-P R-838-3
* ITU-P R-839-4
* ITU-P R-840-4
* ITU-P R-840-7
* ITU-P R-1511-1
"""
suite = test.TestSuite()
# Ensure models are in the right version
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
# ITU-R P.676 tests (Gaseous attenuation)
suite.addTest(ITUR676_9TestCase('test_gammaw'))
suite.addTest(ITUR676_9TestCase('test_gamma0'))
# suite.addTest(ITUR676_9TestCase('test_zenit_water_vapour_attenuation'))
suite.addTest(ITUR676_11TestCase('test_gammaw_exact'))
suite.addTest(ITUR676_11TestCase('test_gamma0_exact'))
suite.addTest(ITUR676_11TestCase('test_gammaw_approx'))
suite.addTest(ITUR676_11TestCase('test_gamma0_approx'))
suite.addTest(ITUR676_11TestCase('test_zenit_water_vapour_attenuation'))
# ITU-R P.618 tests (Rain attenuation)
suite.addTest(ITUR618_12TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_12TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_12TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_13TestCase('test_probability_of_rain_attenuation'))
# suite.addTest(ITUR618_13TestCase('test_site_diversity'))
suite.addTest(ITUR618_13TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_13TestCase('test_total_attenuation'))
# ITU-R P.453 tests (Wet term radio refractivity)
suite.addTest(ITUR453_12TestCase('test_wet_term_radio_refractivity'))
suite.addTest(ITUR453_13TestCase('test_wet_term_radio_refractivity'))
# ITU-R P.836 tests (Water vapour density)
suite.addTest(ITUR836_6TestCase('surface_water_vapour_density'))
suite.addTest(ITUR836_6TestCase('total_water_vapour_content'))
# ITU-R P.836 tests (Rainfall rate)
suite.addTest(ITUR837_6TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_probability'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate_R001'))
# ITU-R P.836 tests (Rainfall specific attenuation)
suite.addTest(ITUR838_3TestCase('test_rain_specific_attenuation'))
# ITU-R P.839 tests (Rain height)
suite.addTest(ITUR839_4TestCase('test_isoterm_0_deg'))
suite.addTest(ITUR839_4TestCase('test_rain_height'))
# ITU-R P.840 tests (Clouds attenuation)
# suite.addTest(ITUR840_4TestCase('test_columnar_content_reduced_liquid'))
# suite.addTest(ITUR840_4TestCase('test_cloud_attenuation'))
suite.addTest(ITUR840_7TestCase('test_columnar_content_reduced_liquid'))
suite.addTest(ITUR840_7TestCase('test_cloud_attenuation'))
# ITU-R P.1511 tests (Topographic altitude)
suite.addTest(ITUR1511_1TestCase('test_topographic_altitude'))
suite.addTest(ITUR1511_2TestCase('test_topographic_altitude'))
return suite
class ITUR453_12TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(12)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
class ITUR453_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
3.133, 101.7, 50).value,
128.14080027, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
22.9, -43.23, 50).value,
104.35847467, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
23, 30, 50).value,
36.47166667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
25.78, -80.22, 50).value,
113.2738672, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
28.717, 77.3, 50).value,
75.66013547, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
33.94, 18.43, 50).value,
80.14015964, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
41.9, 12.49, 50).value,
61.21890044, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
51.5, -0.14, 50).value,
50.38926222, places=5)
class ITUR676_9TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(9)
models.itu836.change_version(4)
def test_gammaw(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.00705700000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.06742720000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.11538020000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.25568340000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.56358380000, places=5)
def test_gamma0(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
# def zenit_water_vapour_attenuation(self, lat, lon, el, p, f, alt):
# gamma_w = models.itu676.zenit_water_vapour_attenuation(lat,
# lon,
# p,
# f,
# None,
# alt=alt).value
# return gamma_w / np.sin(np.deg2rad(el))
#
# def test_zenit_water_vapour_attenuation(self):
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 1, 14.25, 0.06916422),
# 0.12789267, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 1, 14.25, 0.05670104),
# 0.10865204, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 1, 14.25, 0),
# 0.10205633, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.1, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.1, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.1, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.01, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.01, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.01, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.001, 14.25, 0.06916422),
# 0.15315923, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.001, 14.25, 0.05670104),
# 0.12223686, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.001, 14.25, 0),
# 0.12410189, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 1, 29, 0.06916422),
# 0.60896934, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 1, 29, 0.05670104),
# 0.51690529, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 1, 29, 0),
# 0.48519817, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.1, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.1, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.1, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.01, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.01, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.01, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 51.5, 359.86, 30.87067768, 0.001, 29, 0.06916422),
# 0.72784676, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 41.9, 12.49, 40.97052773, 0.001, 29, 0.05670104),
# 0.58076456, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 33.94, 18.43, 47.91280491, 0.001, 29, 0),
# 0.58863533, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 1, 14.25, 0),
# 0.1181882, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 1, 14.25, 0.00007511),
# 0.16093386, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.1, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.1, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.01, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.01, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.001, 14.25, 0),
# 0.13730617, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.001, 14.25, 0.00007511),
# 0.17798382, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 1, 29, 0),
# 0.55983815, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 1, 29, 0.00007511),
# 0.76047761, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.1, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.1, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.01, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.01, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 22.9, 316.77, 59.81487174, 0.001, 29, 0),
# 0.64906814, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 25.78, 279.78, 49.20900369, 0.001, 29, 0.00007511),
# 0.83981774, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 1, 14.25, 0.21755946),
# 0.18628614, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 1, 14.25, 0.23610446),
# 0.13468573, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 1, 14.25, 2.45000492),
# 0.08369587, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.1, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.1, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.1, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.01, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.01, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.01, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.001, 14.25, 0.21755946),
# 0.20242415, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.001, 14.25, 0.23610446),
# 0.14372476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.001, 14.25, 2.45000492),
# 0.09153026, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 1, 29, 0.21755946),
# 0.8771945, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 1, 29, 0.23610446),
# 0.63623574, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 1, 29, 2.45000492),
# 0.39942177, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.1, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.1, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.1, 29, 2.45000492),
# 0.43646179, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.01, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.01, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.01, 29, 2.45000492),
# 0.43646179, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 28.717, 77.3, 55.90591362, 0.001, 29, 0.21755946),
# 0.95194476, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 3.133, 101.7, 67.76751981, 0.001, 29, 0.23610446),
# 0.67829402, places=5)
# self.assertAlmostEqual(
# self.zenit_water_vapour_attenuation(
# 9.05, 38.7, 38.14104832, 0.001, 29, 2.45000492),
# 0.43646179, places=5)
class ITUR676_11TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu1511.change_version(1)
def test_gammaw_exact(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_exact(12, 1013.25, 7.5, 288.15).value,
0.00953539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(20, 1013.25, 7.5, 288.15).value,
0.09704730, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(60, 1013.25, 7.5, 288.15).value,
0.15484184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(90, 1013.25, 7.5, 288.15).value,
0.34197339, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(130, 1013.25, 7.5, 288.15).value,
0.75184470, places=5)
def test_gamma0_exact(self):
self.assertAlmostEqual(
models.itu676.gamma0_exact(12, 1013.25, 7.5, 288.15).value,
0.00869826, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(20, 1013.25, 7.5, 288.15).value,
0.01188355, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(60, 1013.25, 7.5, 288.15).value,
14.62347480, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(90, 1013.25, 7.5, 288.15).value,
0.03886971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(130, 1013.25, 7.5, 288.15).value,
0.04150908, places=5)
def test_gammaw_approx(self):
# The ITU models are non-sense and believe that the conversion between
# Kelvin is 273 instead of 273.15
self.assertAlmostEqual(
models.itu676.gammaw_approx(1, 1013.25, 7.5, 288.15).value,
5.06e-05, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(2, 1013.25, 7.5, 288.15).value,
0.000203124, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(3, 1013.25, 7.5, 288.15).value,
0.000459962, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(4, 1013.25, 7.5, 288.15).value,
0.000825295, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(5, 1013.25, 7.5, 288.15).value,
0.001305574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(6, 1013.25, 7.5, 288.15).value,
0.001910194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(7, 1013.25, 7.5, 288.15).value,
0.00265257, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(8, 1013.25, 7.5, 288.15).value,
0.00355178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(9, 1013.25, 7.5, 288.15).value,
0.00463511, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(10, 1013.25, 7.5, 288.15).value,
0.005942065, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(11, 1013.25, 7.5, 288.15).value,
0.007530789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 7.5, 288.15).value,
0.009488627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(13, 1013.25, 7.5, 288.15).value,
0.01194992, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(14, 1013.25, 7.5, 288.15).value,
0.015126834, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(15, 1013.25, 7.5, 288.15).value,
0.019364141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(16, 1013.25, 7.5, 288.15).value,
0.025238305, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(17, 1013.25, 7.5, 288.15).value,
0.033736014, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(18, 1013.25, 7.5, 288.15).value,
0.04655406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(19, 1013.25, 7.5, 288.15).value,
0.066459485, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 7.5, 288.15).value,
0.096940958, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(21, 1013.25, 7.5, 288.15).value,
0.137887422, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(22, 1013.25, 7.5, 288.15).value,
0.17418431, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(23, 1013.25, 7.5, 288.15).value,
0.180393135, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(24, 1013.25, 7.5, 288.15).value,
0.15839854, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(25, 1013.25, 7.5, 288.15).value,
0.130540688, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(26, 1013.25, 7.5, 288.15).value,
0.108338372, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(27, 1013.25, 7.5, 288.15).value,
0.092962551, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(28, 1013.25, 7.5, 288.15).value,
0.082791566, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(29, 1013.25, 7.5, 288.15).value,
0.076209755, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(30, 1013.25, 7.5, 288.15).value,
0.072073391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(31, 1013.25, 7.5, 288.15).value,
0.069632181, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(32, 1013.25, 7.5, 288.15).value,
0.06839841, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(33, 1013.25, 7.5, 288.15).value,
0.068050819, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(34, 1013.25, 7.5, 288.15).value,
0.068373336, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(35, 1013.25, 7.5, 288.15).value,
0.069217296, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(36, 1013.25, 7.5, 288.15).value,
0.070478105, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(37, 1013.25, 7.5, 288.15).value,
0.072080617, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(38, 1013.25, 7.5, 288.15).value,
0.073969796, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(39, 1013.25, 7.5, 288.15).value,
0.076104615, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(40, 1013.25, 7.5, 288.15).value,
0.078454003, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(41, 1013.25, 7.5, 288.15).value,
0.080994086, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(42, 1013.25, 7.5, 288.15).value,
0.08370628, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(43, 1013.25, 7.5, 288.15).value,
0.086575946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(44, 1013.25, 7.5, 288.15).value,
0.089591433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(45, 1013.25, 7.5, 288.15).value,
0.092743375, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(46, 1013.25, 7.5, 288.15).value,
0.096024183, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(47, 1013.25, 7.5, 288.15).value,
0.099427654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(48, 1013.25, 7.5, 288.15).value,
0.102948692, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(49, 1013.25, 7.5, 288.15).value,
0.106583076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(50, 1013.25, 7.5, 288.15).value,
0.110327299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(51, 1013.25, 7.5, 288.15).value,
0.11417843, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(52, 1013.25, 7.5, 288.15).value,
0.118134012, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(53, 1013.25, 7.5, 288.15).value,
0.122191981, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(54, 1013.25, 7.5, 288.15).value,
0.126350598, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(55, 1013.25, 7.5, 288.15).value,
0.130608397, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(56, 1013.25, 7.5, 288.15).value,
0.134964144, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(57, 1013.25, 7.5, 288.15).value,
0.139416798, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(58, 1013.25, 7.5, 288.15).value,
0.143965489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(59, 1013.25, 7.5, 288.15).value,
0.148609489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 7.5, 288.15).value,
0.153348196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(61, 1013.25, 7.5, 288.15).value,
0.158181114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(62, 1013.25, 7.5, 288.15).value,
0.163107847, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(63, 1013.25, 7.5, 288.15).value,
0.168128079, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(64, 1013.25, 7.5, 288.15).value,
0.173241572, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(65, 1013.25, 7.5, 288.15).value,
0.178448154, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(66, 1013.25, 7.5, 288.15).value,
0.183747712, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(67, 1013.25, 7.5, 288.15).value,
0.18914019, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(68, 1013.25, 7.5, 288.15).value,
0.194625582, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(69, 1013.25, 7.5, 288.15).value,
0.200203926, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(70, 1013.25, 7.5, 288.15).value,
0.205875306, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(71, 1013.25, 7.5, 288.15).value,
0.211639845, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(72, 1013.25, 7.5, 288.15).value,
0.217497702, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(73, 1013.25, 7.5, 288.15).value,
0.223449076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(74, 1013.25, 7.5, 288.15).value,
0.229494196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(75, 1013.25, 7.5, 288.15).value,
0.235633329, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(76, 1013.25, 7.5, 288.15).value,
0.241866771, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(77, 1013.25, 7.5, 288.15).value,
0.248194851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(78, 1013.25, 7.5, 288.15).value,
0.254617931, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(79, 1013.25, 7.5, 288.15).value,
0.261136401, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(80, 1013.25, 7.5, 288.15).value,
0.267750686, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(81, 1013.25, 7.5, 288.15).value,
0.274461239, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(82, 1013.25, 7.5, 288.15).value,
0.281268547, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(83, 1013.25, 7.5, 288.15).value,
0.28817313, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(84, 1013.25, 7.5, 288.15).value,
0.295175539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(85, 1013.25, 7.5, 288.15).value,
0.302276362, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(86, 1013.25, 7.5, 288.15).value,
0.309476219, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(87, 1013.25, 7.5, 288.15).value,
0.316775769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(88, 1013.25, 7.5, 288.15).value,
0.324175708, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(89, 1013.25, 7.5, 288.15).value,
0.331676772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 7.5, 288.15).value,
0.339279738, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(91, 1013.25, 7.5, 288.15).value,
0.346985426, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(92, 1013.25, 7.5, 288.15).value,
0.354794703, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(93, 1013.25, 7.5, 288.15).value,
0.362708483, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(94, 1013.25, 7.5, 288.15).value,
0.370727732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(95, 1013.25, 7.5, 288.15).value,
0.378853468, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(96, 1013.25, 7.5, 288.15).value,
0.387086768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(97, 1013.25, 7.5, 288.15).value,
0.395428769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(98, 1013.25, 7.5, 288.15).value,
0.403880673, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(99, 1013.25, 7.5, 288.15).value,
0.412443748, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(100, 1013.25, 7.5, 288.15).value,
0.421119341, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(101, 1013.25, 7.5, 288.15).value,
0.429908872, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(102, 1013.25, 7.5, 288.15).value,
0.438813848, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(103, 1013.25, 7.5, 288.15).value,
0.447835866, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(104, 1013.25, 7.5, 288.15).value,
0.456976619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(105, 1013.25, 7.5, 288.15).value,
0.466237905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(106, 1013.25, 7.5, 288.15).value,
0.475621633, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(107, 1013.25, 7.5, 288.15).value,
0.485129833, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(108, 1013.25, 7.5, 288.15).value,
0.494764666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(109, 1013.25, 7.5, 288.15).value,
0.504528432, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(110, 1013.25, 7.5, 288.15).value,
0.514423584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(111, 1013.25, 7.5, 288.15).value,
0.524452741, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(112, 1013.25, 7.5, 288.15).value,
0.5346187, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(113, 1013.25, 7.5, 288.15).value,
0.54492445, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(114, 1013.25, 7.5, 288.15).value,
0.555373195, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(115, 1013.25, 7.5, 288.15).value,
0.565968366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(116, 1013.25, 7.5, 288.15).value,
0.576713646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(117, 1013.25, 7.5, 288.15).value,
0.58761299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(118, 1013.25, 7.5, 288.15).value,
0.598670654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(119, 1013.25, 7.5, 288.15).value,
0.609891221, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(120, 1013.25, 7.5, 288.15).value,
0.621279631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(121, 1013.25, 7.5, 288.15).value,
0.63284122, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(122, 1013.25, 7.5, 288.15).value,
0.644581758, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(123, 1013.25, 7.5, 288.15).value,
0.656507491, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(124, 1013.25, 7.5, 288.15).value,
0.668625191, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(125, 1013.25, 7.5, 288.15).value,
0.680942215, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(126, 1013.25, 7.5, 288.15).value,
0.69346656, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(127, 1013.25, 7.5, 288.15).value,
0.70620694, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(128, 1013.25, 7.5, 288.15).value,
0.719172861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(129, 1013.25, 7.5, 288.15).value,
0.73237471, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 7.5, 288.15).value,
0.745823861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(131, 1013.25, 7.5, 288.15).value,
0.759532783, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(132, 1013.25, 7.5, 288.15).value,
0.773515178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(133, 1013.25, 7.5, 288.15).value,
0.787786128, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(134, 1013.25, 7.5, 288.15).value,
0.802362262, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(135, 1013.25, 7.5, 288.15).value,
0.817261961, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(136, 1013.25, 7.5, 288.15).value,
0.832505575, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(137, 1013.25, 7.5, 288.15).value,
0.848115693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(138, 1013.25, 7.5, 288.15).value,
0.864117433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(139, 1013.25, 7.5, 288.15).value,
0.880538802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(140, 1013.25, 7.5, 288.15).value,
0.897411097, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(141, 1013.25, 7.5, 288.15).value,
0.914769381, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(142, 1013.25, 7.5, 288.15).value,
0.93265304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(143, 1013.25, 7.5, 288.15).value,
0.951106434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(144, 1013.25, 7.5, 288.15).value,
0.970179674, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(145, 1013.25, 7.5, 288.15).value,
0.989929528, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(146, 1013.25, 7.5, 288.15).value,
1.010420514, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(147, 1013.25, 7.5, 288.15).value,
1.03172619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(148, 1013.25, 7.5, 288.15).value,
1.053930717, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(149, 1013.25, 7.5, 288.15).value,
1.077130727, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(150, 1013.25, 7.5, 288.15).value,
1.101437596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(151, 1013.25, 7.5, 288.15).value,
1.126980206, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(152, 1013.25, 7.5, 288.15).value,
1.153908327, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(153, 1013.25, 7.5, 288.15).value,
1.182396776, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(154, 1013.25, 7.5, 288.15).value,
1.212650574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(155, 1013.25, 7.5, 288.15).value,
1.244911365, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(156, 1013.25, 7.5, 288.15).value,
1.279465482, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(157, 1013.25, 7.5, 288.15).value,
1.316654129, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(158, 1013.25, 7.5, 288.15).value,
1.356886363, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(159, 1013.25, 7.5, 288.15).value,
1.400655759, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(160, 1013.25, 7.5, 288.15).value,
1.448562004, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(161, 1013.25, 7.5, 288.15).value,
1.501339131, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(162, 1013.25, 7.5, 288.15).value,
1.559892824, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(163, 1013.25, 7.5, 288.15).value,
1.625350216, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(164, 1013.25, 7.5, 288.15).value,
1.699127159, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(165, 1013.25, 7.5, 288.15).value,
1.783020212, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(166, 1013.25, 7.5, 288.15).value,
1.87933414, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(167, 1013.25, 7.5, 288.15).value,
1.991061177, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(168, 1013.25, 7.5, 288.15).value,
2.122137016, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(169, 1013.25, 7.5, 288.15).value,
2.277812508, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(170, 1013.25, 7.5, 288.15).value,
2.465203115, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(171, 1013.25, 7.5, 288.15).value,
2.694116678, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(172, 1013.25, 7.5, 288.15).value,
2.978325696, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(173, 1013.25, 7.5, 288.15).value,
3.337563176, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(174, 1013.25, 7.5, 288.15).value,
3.80071648, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(175, 1013.25, 7.5, 288.15).value,
4.411026238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(176, 1013.25, 7.5, 288.15).value,
5.23462829, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(177, 1013.25, 7.5, 288.15).value,
6.374446918, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(178, 1013.25, 7.5, 288.15).value,
7.991434174, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(179, 1013.25, 7.5, 288.15).value,
10.33006475, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(180, 1013.25, 7.5, 288.15).value,
13.71659631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(181, 1013.25, 7.5, 288.15).value,
18.39188186, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(182, 1013.25, 7.5, 288.15).value,
23.83194406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(183, 1013.25, 7.5, 288.15).value,
27.67449812, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(184, 1013.25, 7.5, 288.15).value,
27.03213321, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(185, 1013.25, 7.5, 288.15).value,
22.60135009, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(186, 1013.25, 7.5, 288.15).value,
17.47071693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(187, 1013.25, 7.5, 288.15).value,
13.32603388, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(188, 1013.25, 7.5, 288.15).value,
10.35715037, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(189, 1013.25, 7.5, 288.15).value,
8.290514155, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(190, 1013.25, 7.5, 288.15).value,
6.842342894, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(191, 1013.25, 7.5, 288.15).value,
5.808188543, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(192, 1013.25, 7.5, 288.15).value,
5.053346248, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(193, 1013.25, 7.5, 288.15).value,
4.490555513, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(194, 1013.25, 7.5, 288.15).value,
4.062799398, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(195, 1013.25, 7.5, 288.15).value,
3.73214099, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(196, 1013.25, 7.5, 288.15).value,
3.472798768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(197, 1013.25, 7.5, 288.15).value,
3.266872193, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(198, 1013.25, 7.5, 288.15).value,
3.101674612, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(199, 1013.25, 7.5, 288.15).value,
2.968040373, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(200, 1013.25, 7.5, 288.15).value,
2.859229328, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(201, 1013.25, 7.5, 288.15).value,
2.77020383, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(202, 1013.25, 7.5, 288.15).value,
2.697142323, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(203, 1013.25, 7.5, 288.15).value,
2.637106098, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(204, 1013.25, 7.5, 288.15).value,
2.587807034, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(205, 1013.25, 7.5, 288.15).value,
2.547443114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(206, 1013.25, 7.5, 288.15).value,
2.514580199, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(207, 1013.25, 7.5, 288.15).value,
2.488065853, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(208, 1013.25, 7.5, 288.15).value,
2.466965732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(209, 1013.25, 7.5, 288.15).value,
2.450516051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(210, 1013.25, 7.5, 288.15).value,
2.43808768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(211, 1013.25, 7.5, 288.15).value,
2.429158726, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(212, 1013.25, 7.5, 288.15).value,
2.423293411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(213, 1013.25, 7.5, 288.15).value,
2.420125627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(214, 1013.25, 7.5, 288.15).value,
2.419346051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(215, 1013.25, 7.5, 288.15).value,
2.420691947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(216, 1013.25, 7.5, 288.15).value,
2.423939044, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(217, 1013.25, 7.5, 288.15).value,
2.428895021, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(218, 1013.25, 7.5, 288.15).value,
2.435394244, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(219, 1013.25, 7.5, 288.15).value,
2.443293492, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(220, 1013.25, 7.5, 288.15).value,
2.452468459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(221, 1013.25, 7.5, 288.15).value,
2.462810881, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(222, 1013.25, 7.5, 288.15).value,
2.474226165, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(223, 1013.25, 7.5, 288.15).value,
2.486631411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(224, 1013.25, 7.5, 288.15).value,
2.499953772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(225, 1013.25, 7.5, 288.15).value,
2.514129072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(226, 1013.25, 7.5, 288.15).value,
2.529100646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(227, 1013.25, 7.5, 288.15).value,
2.544818361, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(228, 1013.25, 7.5, 288.15).value,
2.561237781, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(229, 1013.25, 7.5, 288.15).value,
2.578319459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(230, 1013.25, 7.5, 288.15).value,
2.596028333, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(231, 1013.25, 7.5, 288.15).value,
2.614333207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(232, 1013.25, 7.5, 288.15).value,
2.633206307, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(233, 1013.25, 7.5, 288.15).value,
2.652622892, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(234, 1013.25, 7.5, 288.15).value,
2.672560927, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(235, 1013.25, 7.5, 288.15).value,
2.693000794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(236, 1013.25, 7.5, 288.15).value,
2.713925039, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(237, 1013.25, 7.5, 288.15).value,
2.735318161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(238, 1013.25, 7.5, 288.15).value,
2.757166418, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(239, 1013.25, 7.5, 288.15).value,
2.779457666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(240, 1013.25, 7.5, 288.15).value,
2.80218121, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(241, 1013.25, 7.5, 288.15).value,
2.825327683, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(242, 1013.25, 7.5, 288.15).value,
2.848888936, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(243, 1013.25, 7.5, 288.15).value,
2.87285794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(244, 1013.25, 7.5, 288.15).value,
2.897228704, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(245, 1013.25, 7.5, 288.15).value,
2.921996202, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(246, 1013.25, 7.5, 288.15).value,
2.947156311, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(247, 1013.25, 7.5, 288.15).value,
2.972705756, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(248, 1013.25, 7.5, 288.15).value,
2.998642066, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(249, 1013.25, 7.5, 288.15).value,
3.024963531, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(250, 1013.25, 7.5, 288.15).value,
3.051669175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(251, 1013.25, 7.5, 288.15).value,
3.078758722, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(252, 1013.25, 7.5, 288.15).value,
3.106232585, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(253, 1013.25, 7.5, 288.15).value,
3.13409184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(254, 1013.25, 7.5, 288.15).value,
3.162338224, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(255, 1013.25, 7.5, 288.15).value,
3.190974123, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(256, 1013.25, 7.5, 288.15).value,
3.220002576, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(257, 1013.25, 7.5, 288.15).value,
3.249427276, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(258, 1013.25, 7.5, 288.15).value,
3.27925258, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(259, 1013.25, 7.5, 288.15).value,
3.309483522, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(260, 1013.25, 7.5, 288.15).value,
3.340125831, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(261, 1013.25, 7.5, 288.15).value,
3.371185956, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(262, 1013.25, 7.5, 288.15).value,
3.402671096, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(263, 1013.25, 7.5, 288.15).value,
3.434589233, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(264, 1013.25, 7.5, 288.15).value,
3.466949175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(265, 1013.25, 7.5, 288.15).value,
3.499760606, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(266, 1013.25, 7.5, 288.15).value,
3.533034141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(267, 1013.25, 7.5, 288.15).value,
3.566781392, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(268, 1013.25, 7.5, 288.15).value,
3.601015043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(269, 1013.25, 7.5, 288.15).value,
3.635748934, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(270, 1013.25, 7.5, 288.15).value,
3.670998161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(271, 1013.25, 7.5, 288.15).value,
3.706779184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(272, 1013.25, 7.5, 288.15).value,
3.743109957, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(273, 1013.25, 7.5, 288.15).value,
3.780010072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(274, 1013.25, 7.5, 288.15).value,
3.817500924, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(275, 1013.25, 7.5, 288.15).value,
3.855605898, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(276, 1013.25, 7.5, 288.15).value,
3.894350591, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(277, 1013.25, 7.5, 288.15).value,
3.933763053, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(278, 1013.25, 7.5, 288.15).value,
3.97387408, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(279, 1013.25, 7.5, 288.15).value,
4.014717535, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(280, 1013.25, 7.5, 288.15).value,
4.056330735, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(281, 1013.25, 7.5, 288.15).value,
4.098754887, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(282, 1013.25, 7.5, 288.15).value,
4.142035602, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(283, 1013.25, 7.5, 288.15).value,
4.186223487, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(284, 1013.25, 7.5, 288.15).value,
4.231374849, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(285, 1013.25, 7.5, 288.15).value,
4.277552506, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(286, 1013.25, 7.5, 288.15).value,
4.324826757, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(287, 1013.25, 7.5, 288.15).value,
4.373276518, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(288, 1013.25, 7.5, 288.15).value,
4.422990681, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(289, 1013.25, 7.5, 288.15).value,
4.474069728, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(290, 1013.25, 7.5, 288.15).value,
4.526627666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(291, 1013.25, 7.5, 288.15).value,
4.580794366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(292, 1013.25, 7.5, 288.15).value,
4.63671838, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(293, 1013.25, 7.5, 288.15).value,
4.694570386, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(294, 1013.25, 7.5, 288.15).value,
4.754547391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(295, 1013.25, 7.5, 288.15).value,
4.816877916, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(296, 1013.25, 7.5, 288.15).value,
4.881828419, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(297, 1013.25, 7.5, 288.15).value,
4.949711312, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(298, 1013.25, 7.5, 288.15).value,
5.020895036, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(299, 1013.25, 7.5, 288.15).value,
5.095816817, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(300, 1013.25, 7.5, 288.15).value,
5.174998967, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(301, 1013.25, 7.5, 288.15).value,
5.259069863, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(302, 1013.25, 7.5, 288.15).value,
5.348791238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(303, 1013.25, 7.5, 288.15).value,
5.445094008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(304, 1013.25, 7.5, 288.15).value,
5.549125837, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(305, 1013.25, 7.5, 288.15).value,
5.662315008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(306, 1013.25, 7.5, 288.15).value,
5.786457272, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(307, 1013.25, 7.5, 288.15).value,
5.923835584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(308, 1013.25, 7.5, 288.15).value,
6.077387588, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(309, 1013.25, 7.5, 288.15).value,
6.250943769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(310, 1013.25, 7.5, 288.15).value,
6.449572043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(311, 1013.25, 7.5, 288.15).value,
6.6800861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(312, 1013.25, 7.5, 288.15).value,
6.951811263, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(313, 1013.25, 7.5, 288.15).value,
7.277764947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(314, 1013.25, 7.5, 288.15).value,
7.676520947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(315, 1013.25, 7.5, 288.15).value,
8.175226571, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(316, 1013.25, 7.5, 288.15).value,
8.814587905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(317, 1013.25, 7.5, 288.15).value,
9.657150573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(318, 1013.25, 7.5, 288.15).value,
10.80040832, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(319, 1013.25, 7.5, 288.15).value,
12.39287203, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(320, 1013.25, 7.5, 288.15).value,
14.63291434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(321, 1013.25, 7.5, 288.15).value,
17.71848802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(322, 1013.25, 7.5, 288.15).value,
21.89833011, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(323, 1013.25, 7.5, 288.15).value,
27.52921207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(324, 1013.25, 7.5, 288.15).value,
33.93584273, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(325, 1013.25, 7.5, 288.15).value,
37.82487596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(326, 1013.25, 7.5, 288.15).value,
35.8615979, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(327, 1013.25, 7.5, 288.15).value,
29.89188489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(328, 1013.25, 7.5, 288.15).value,
23.80724266, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(329, 1013.25, 7.5, 288.15).value,
19.19466647, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(330, 1013.25, 7.5, 288.15).value,
16.01196137, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(331, 1013.25, 7.5, 288.15).value,
13.85529573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(332, 1013.25, 7.5, 288.15).value,
12.38126427, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(333, 1013.25, 7.5, 288.15).value,
11.35803945, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(334, 1013.25, 7.5, 288.15).value,
10.63752623, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(335, 1013.25, 7.5, 288.15).value,
10.12544, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(336, 1013.25, 7.5, 288.15).value,
9.760866789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(337, 1013.25, 7.5, 288.15).value,
9.503637932, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(338, 1013.25, 7.5, 288.15).value,
9.326698304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(339, 1013.25, 7.5, 288.15).value,
9.211467317, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(340, 1013.25, 7.5, 288.15).value,
9.144973785, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(341, 1013.25, 7.5, 288.15).value,
9.118051719, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(342, 1013.25, 7.5, 288.15).value,
9.124181675, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(343, 1013.25, 7.5, 288.15).value,
9.158733184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(344, 1013.25, 7.5, 288.15).value,
9.218462001, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(345, 1013.25, 7.5, 288.15).value,
9.301173194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(346, 1013.25, 7.5, 288.15).value,
9.405494946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(347, 1013.25, 7.5, 288.15).value,
9.53072851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(348, 1013.25, 7.5, 288.15).value,
9.676752463, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(349, 1013.25, 7.5, 288.15).value,
9.843967537, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(350, 1013.25, 7.5, 288.15).value,
10.03327368, places=5)
def test_gamma0_approx(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(1, 1013.25, 7.5, 288.15).value,
0.005388658, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(2, 1013.25, 7.5, 288.15).value,
0.006716038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(3, 1013.25, 7.5, 288.15).value,
0.00707596, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(4, 1013.25, 7.5, 288.15).value,
0.007258969, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(5, 1013.25, 7.5, 288.15).value,
0.007400426, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(6, 1013.25, 7.5, 288.15).value,
0.007537212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(7, 1013.25, 7.5, 288.15).value,
0.007682905, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(8, 1013.25, 7.5, 288.15).value,
0.007843794, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(9, 1013.25, 7.5, 288.15).value,
0.008023466, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(10, 1013.25, 7.5, 288.15).value,
0.008224416, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(11, 1013.25, 7.5, 288.15).value,
0.008448705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(12, 1013.25, 7.5, 288.15).value,
0.008698263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(13, 1013.25, 7.5, 288.15).value,
0.008975056, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14, 1013.25, 7.5, 288.15).value,
0.009281177, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(15, 1013.25, 7.5, 288.15).value,
0.009618923, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(16, 1013.25, 7.5, 288.15).value,
0.009990845, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(17, 1013.25, 7.5, 288.15).value,
0.010399811, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(18, 1013.25, 7.5, 288.15).value,
0.010849054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(19, 1013.25, 7.5, 288.15).value,
0.011342243, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(20, 1013.25, 7.5, 288.15).value,
0.011883547, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(21, 1013.25, 7.5, 288.15).value,
0.012477725, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(22, 1013.25, 7.5, 288.15).value,
0.013130219, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(23, 1013.25, 7.5, 288.15).value,
0.013847273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(24, 1013.25, 7.5, 288.15).value,
0.014636078, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(25, 1013.25, 7.5, 288.15).value,
0.015504937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(26, 1013.25, 7.5, 288.15).value,
0.016463481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(27, 1013.25, 7.5, 288.15).value,
0.017522921, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(28, 1013.25, 7.5, 288.15).value,
0.018696367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5, 288.15).value,
0.019999221, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(30, 1013.25, 7.5, 288.15).value,
0.021449673, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(31, 1013.25, 7.5, 288.15).value,
0.023069328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(32, 1013.25, 7.5, 288.15).value,
0.024883993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(33, 1013.25, 7.5, 288.15).value,
0.026924712, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(34, 1013.25, 7.5, 288.15).value,
0.029229084, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(35, 1013.25, 7.5, 288.15).value,
0.031843013, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(36, 1013.25, 7.5, 288.15).value,
0.034823023, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(37, 1013.25, 7.5, 288.15).value,
0.038239374, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(38, 1013.25, 7.5, 288.15).value,
0.042180317, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(39, 1013.25, 7.5, 288.15).value,
0.046757999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(40, 1013.25, 7.5, 288.15).value,
0.052116797, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(41, 1013.25, 7.5, 288.15).value,
0.058445339, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(42, 1013.25, 7.5, 288.15).value,
0.065994232, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(43, 1013.25, 7.5, 288.15).value,
0.075102941, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(44, 1013.25, 7.5, 288.15).value,
0.086241846, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(45, 1013.25, 7.5, 288.15).value,
0.100080659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(46, 1013.25, 7.5, 288.15).value,
0.117605188, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(47, 1013.25, 7.5, 288.15).value,
0.140329657, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(48, 1013.25, 7.5, 288.15).value,
0.170719546, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(49, 1013.25, 7.5, 288.15).value,
0.213175063, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(50, 1013.25, 7.5, 288.15).value,
0.277268297, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(51, 1013.25, 7.5, 288.15).value,
0.389670239, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(52, 1013.25, 7.5, 288.15).value,
0.618429331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(53, 1013.25, 7.5, 288.15).value,
1.126611463, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(54, 1013.25, 7.5, 288.15).value,
2.211541194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(55, 1013.25, 7.5, 288.15).value,
4.193281287, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(56, 1013.25, 7.5, 288.15).value,
7.055044748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(57, 1013.25, 7.5, 288.15).value,
10.0652395, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(58, 1013.25, 7.5, 288.15).value,
12.35314971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(59, 1013.25, 7.5, 288.15).value,
13.63529754, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(60, 1013.25, 7.5, 288.15).value,
14.62347701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(61, 1013.25, 7.5, 288.15).value,
15.00716194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(62, 1013.25, 7.5, 288.15).value,
13.99621411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(63, 1013.25, 7.5, 288.15).value,
10.83108919, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(64, 1013.25, 7.5, 288.15).value,
6.844588337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(65, 1013.25, 7.5, 288.15).value,
3.80880229, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(66, 1013.25, 7.5, 288.15).value,
1.966616477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(67, 1013.25, 7.5, 288.15).value,
1.033387448, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(68, 1013.25, 7.5, 288.15).value,
0.60546544, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(69, 1013.25, 7.5, 288.15).value,
0.406984877, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(70, 1013.25, 7.5, 288.15).value,
0.304104518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(71, 1013.25, 7.5, 288.15).value,
0.24160024, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(72, 1013.25, 7.5, 288.15).value,
0.198531458, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(73, 1013.25, 7.5, 288.15).value,
0.167045465, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(74, 1013.25, 7.5, 288.15).value,
0.143141978, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(75, 1013.25, 7.5, 288.15).value,
0.124484922, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(76, 1013.25, 7.5, 288.15).value,
0.109604088, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(77, 1013.25, 7.5, 288.15).value,
0.09752563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(78, 1013.25, 7.5, 288.15).value,
0.087579095, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(79, 1013.25, 7.5, 288.15).value,
0.079288509, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(80, 1013.25, 7.5, 288.15).value,
0.072307337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(81, 1013.25, 7.5, 288.15).value,
0.066377906, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(82, 1013.25, 7.5, 288.15).value,
0.061305161, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(83, 1013.25, 7.5, 288.15).value,
0.05693918, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(84, 1013.25, 7.5, 288.15).value,
0.053163238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(85, 1013.25, 7.5, 288.15).value,
0.049885471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(86, 1013.25, 7.5, 288.15).value,
0.047032946, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(87, 1013.25, 7.5, 288.15).value,
0.044547391, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(88, 1013.25, 7.5, 288.15).value,
0.042382069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(89, 1013.25, 7.5, 288.15).value,
0.040499471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(90, 1013.25, 7.5, 288.15).value,
0.038869622, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(91, 1013.25, 7.5, 288.15).value,
0.037468818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(92, 1013.25, 7.5, 288.15).value,
0.036278727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(93, 1013.25, 7.5, 288.15).value,
0.035285753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(94, 1013.25, 7.5, 288.15).value,
0.034480646, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(95, 1013.25, 7.5, 288.15).value,
0.033858315, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(96, 1013.25, 7.5, 288.15).value,
0.033417859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(97, 1013.25, 7.5, 288.15).value,
0.033162815, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(98, 1013.25, 7.5, 288.15).value,
0.033101677, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(99, 1013.25, 7.5, 288.15).value,
0.033248738, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(100, 1013.25, 7.5, 288.15).value,
0.033625377, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(101, 1013.25, 7.5, 288.15).value,
0.034261951, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(102, 1013.25, 7.5, 288.15).value,
0.03520058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(103, 1013.25, 7.5, 288.15).value,
0.036499225, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(104, 1013.25, 7.5, 288.15).value,
0.038237778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(105, 1013.25, 7.5, 288.15).value,
0.040527282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(106, 1013.25, 7.5, 288.15).value,
0.043524209, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(107, 1013.25, 7.5, 288.15).value,
0.047453183, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(108, 1013.25, 7.5, 288.15).value,
0.05264422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(109, 1013.25, 7.5, 288.15).value,
0.059596011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(110, 1013.25, 7.5, 288.15).value,
0.069087844, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(111, 1013.25, 7.5, 288.15).value,
0.082387054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(112, 1013.25, 7.5, 288.15).value,
0.101654574, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(113, 1013.25, 7.5, 288.15).value,
0.130786962, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(114, 1013.25, 7.5, 288.15).value,
0.177281273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(115, 1013.25, 7.5, 288.15).value,
0.25660834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(116, 1013.25, 7.5, 288.15).value,
0.402453591, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(117, 1013.25, 7.5, 288.15).value,
0.683016431, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(118, 1013.25, 7.5, 288.15).value,
1.134866447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(119, 1013.25, 7.5, 288.15).value,
1.306379447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(120, 1013.25, 7.5, 288.15).value,
0.886108944, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(121, 1013.25, 7.5, 288.15).value,
0.509171816, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(122, 1013.25, 7.5, 288.15).value,
0.307768488, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(123, 1013.25, 7.5, 288.15).value,
0.202100995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(124, 1013.25, 7.5, 288.15).value,
0.142570138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(125, 1013.25, 7.5, 288.15).value,
0.106445548, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(126, 1013.25, 7.5, 288.15).value,
0.083103974, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(127, 1013.25, 7.5, 288.15).value,
0.067232038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(128, 1013.25, 7.5, 288.15).value,
0.055982184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(129, 1013.25, 7.5, 288.15).value,
0.047732332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(130, 1013.25, 7.5, 288.15).value,
0.041509034, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(131, 1013.25, 7.5, 288.15).value,
0.036701642, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(132, 1013.25, 7.5, 288.15).value,
0.032912343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(133, 1013.25, 7.5, 288.15).value,
0.029873422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(134, 1013.25, 7.5, 288.15).value,
0.027399581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(135, 1013.25, 7.5, 288.15).value,
0.025359375, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(136, 1013.25, 7.5, 288.15).value,
0.02365753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(137, 1013.25, 7.5, 288.15).value,
0.022223652, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(138, 1013.25, 7.5, 288.15).value,
0.021004834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(139, 1013.25, 7.5, 288.15).value,
0.019960701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(140, 1013.25, 7.5, 288.15).value,
0.019060012, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(141, 1013.25, 7.5, 288.15).value,
0.018278288, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(142, 1013.25, 7.5, 288.15).value,
0.017596128, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(143, 1013.25, 7.5, 288.15).value,
0.016997997, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(144, 1013.25, 7.5, 288.15).value,
0.016471333, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(145, 1013.25, 7.5, 288.15).value,
0.016005887, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(146, 1013.25, 7.5, 288.15).value,
0.015593233, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(147, 1013.25, 7.5, 288.15).value,
0.015226386, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(148, 1013.25, 7.5, 288.15).value,
0.014899516, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(149, 1013.25, 7.5, 288.15).value,
0.014607727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(150, 1013.25, 7.5, 288.15).value,
0.01434688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(151, 1013.25, 7.5, 288.15).value,
0.014113455, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(152, 1013.25, 7.5, 288.15).value,
0.013904444, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(153, 1013.25, 7.5, 288.15).value,
0.013717263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(154, 1013.25, 7.5, 288.15).value,
0.013549678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(155, 1013.25, 7.5, 288.15).value,
0.013399755, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(156, 1013.25, 7.5, 288.15).value,
0.013265808, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(157, 1013.25, 7.5, 288.15).value,
0.013146362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(158, 1013.25, 7.5, 288.15).value,
0.013040122, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(159, 1013.25, 7.5, 288.15).value,
0.012945947, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(160, 1013.25, 7.5, 288.15).value,
0.012862828, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(161, 1013.25, 7.5, 288.15).value,
0.012789869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(162, 1013.25, 7.5, 288.15).value,
0.012726273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(163, 1013.25, 7.5, 288.15).value,
0.012671328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(164, 1013.25, 7.5, 288.15).value,
0.012624397, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(165, 1013.25, 7.5, 288.15).value,
0.012584907, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(166, 1013.25, 7.5, 288.15).value,
0.012552343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(167, 1013.25, 7.5, 288.15).value,
0.012526238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(168, 1013.25, 7.5, 288.15).value,
0.012506174, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(169, 1013.25, 7.5, 288.15).value,
0.012491766, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(170, 1013.25, 7.5, 288.15).value,
0.012482668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(171, 1013.25, 7.5, 288.15).value,
0.012478563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(172, 1013.25, 7.5, 288.15).value,
0.012479162, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(173, 1013.25, 7.5, 288.15).value,
0.012484201, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(174, 1013.25, 7.5, 288.15).value,
0.012493438, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(175, 1013.25, 7.5, 288.15).value,
0.01250665, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(176, 1013.25, 7.5, 288.15).value,
0.012523632, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(177, 1013.25, 7.5, 288.15).value,
0.012544196, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(178, 1013.25, 7.5, 288.15).value,
0.012568166, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(179, 1013.25, 7.5, 288.15).value,
0.012595383, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(180, 1013.25, 7.5, 288.15).value,
0.012625696, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(181, 1013.25, 7.5, 288.15).value,
0.012658968, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(182, 1013.25, 7.5, 288.15).value,
0.01269507, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(183, 1013.25, 7.5, 288.15).value,
0.012733882, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(184, 1013.25, 7.5, 288.15).value,
0.012775293, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(185, 1013.25, 7.5, 288.15).value,
0.012819198, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(186, 1013.25, 7.5, 288.15).value,
0.012865502, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(187, 1013.25, 7.5, 288.15).value,
0.012914112, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(188, 1013.25, 7.5, 288.15).value,
0.012964945, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(189, 1013.25, 7.5, 288.15).value,
0.01301792, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(190, 1013.25, 7.5, 288.15).value,
0.013072963, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(191, 1013.25, 7.5, 288.15).value,
0.013130004, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(192, 1013.25, 7.5, 288.15).value,
0.013188976, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(193, 1013.25, 7.5, 288.15).value,
0.013249818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(194, 1013.25, 7.5, 288.15).value,
0.013312471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(195, 1013.25, 7.5, 288.15).value,
0.01337688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(196, 1013.25, 7.5, 288.15).value,
0.013442993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(197, 1013.25, 7.5, 288.15).value,
0.01351076, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(198, 1013.25, 7.5, 288.15).value,
0.013580135, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(199, 1013.25, 7.5, 288.15).value,
0.013651074, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(200, 1013.25, 7.5, 288.15).value,
0.013723536, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(201, 1013.25, 7.5, 288.15).value,
0.01379748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(202, 1013.25, 7.5, 288.15).value,
0.013872869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(203, 1013.25, 7.5, 288.15).value,
0.013949668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(204, 1013.25, 7.5, 288.15).value,
0.014027843, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(205, 1013.25, 7.5, 288.15).value,
0.014107361, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(206, 1013.25, 7.5, 288.15).value,
0.014188192, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(207, 1013.25, 7.5, 288.15).value,
0.014270307, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(208, 1013.25, 7.5, 288.15).value,
0.014353678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(209, 1013.25, 7.5, 288.15).value,
0.014438278, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(210, 1013.25, 7.5, 288.15).value,
0.014524083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(211, 1013.25, 7.5, 288.15).value,
0.014611069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(212, 1013.25, 7.5, 288.15).value,
0.014699212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(213, 1013.25, 7.5, 288.15).value,
0.01478849, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(214, 1013.25, 7.5, 288.15).value,
0.014878883, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(215, 1013.25, 7.5, 288.15).value,
0.01497037, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(216, 1013.25, 7.5, 288.15).value,
0.015062932, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(217, 1013.25, 7.5, 288.15).value,
0.015156551, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(218, 1013.25, 7.5, 288.15).value,
0.01525121, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(219, 1013.25, 7.5, 288.15).value,
0.015346891, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(220, 1013.25, 7.5, 288.15).value,
0.015443579, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(221, 1013.25, 7.5, 288.15).value,
0.015541258, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(222, 1013.25, 7.5, 288.15).value,
0.015639912, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(223, 1013.25, 7.5, 288.15).value,
0.015739529, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(224, 1013.25, 7.5, 288.15).value,
0.015840094, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(225, 1013.25, 7.5, 288.15).value,
0.015941595, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(226, 1013.25, 7.5, 288.15).value,
0.016044018, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(227, 1013.25, 7.5, 288.15).value,
0.016147352, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(228, 1013.25, 7.5, 288.15).value,
0.016251585, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(229, 1013.25, 7.5, 288.15).value,
0.016356706, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(230, 1013.25, 7.5, 288.15).value,
0.016462705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(231, 1013.25, 7.5, 288.15).value,
0.016569571, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(232, 1013.25, 7.5, 288.15).value,
0.016677295, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(233, 1013.25, 7.5, 288.15).value,
0.016785866, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(234, 1013.25, 7.5, 288.15).value,
0.016895277, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(235, 1013.25, 7.5, 288.15).value,
0.017005518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(236, 1013.25, 7.5, 288.15).value,
0.017116581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(237, 1013.25, 7.5, 288.15).value,
0.017228459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(238, 1013.25, 7.5, 288.15).value,
0.017341142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(239, 1013.25, 7.5, 288.15).value,
0.017454625, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(240, 1013.25, 7.5, 288.15).value,
0.0175689, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(241, 1013.25, 7.5, 288.15).value,
0.01768396, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(242, 1013.25, 7.5, 288.15).value,
0.017799799, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(243, 1013.25, 7.5, 288.15).value,
0.017916411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(244, 1013.25, 7.5, 288.15).value,
0.018033789, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(245, 1013.25, 7.5, 288.15).value,
0.018151929, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(246, 1013.25, 7.5, 288.15).value,
0.018270824, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(247, 1013.25, 7.5, 288.15).value,
0.01839047, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(248, 1013.25, 7.5, 288.15).value,
0.018510862, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(249, 1013.25, 7.5, 288.15).value,
0.018631995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(250, 1013.25, 7.5, 288.15).value,
0.018753865, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(251, 1013.25, 7.5, 288.15).value,
0.018876467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(252, 1013.25, 7.5, 288.15).value,
0.018999798, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(253, 1013.25, 7.5, 288.15).value,
0.019123854, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(254, 1013.25, 7.5, 288.15).value,
0.019248631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(255, 1013.25, 7.5, 288.15).value,
0.019374127, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(256, 1013.25, 7.5, 288.15).value,
0.019500338, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(257, 1013.25, 7.5, 288.15).value,
0.019627261, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(258, 1013.25, 7.5, 288.15).value,
0.019754894, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(259, 1013.25, 7.5, 288.15).value,
0.019883235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(260, 1013.25, 7.5, 288.15).value,
0.02001228, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(261, 1013.25, 7.5, 288.15).value,
0.020142029, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(262, 1013.25, 7.5, 288.15).value,
0.02027248, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(263, 1013.25, 7.5, 288.15).value,
0.020403631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(264, 1013.25, 7.5, 288.15).value,
0.020535481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(265, 1013.25, 7.5, 288.15).value,
0.020668028, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(266, 1013.25, 7.5, 288.15).value,
0.020801273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(267, 1013.25, 7.5, 288.15).value,
0.020935214, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(268, 1013.25, 7.5, 288.15).value,
0.021069851, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(269, 1013.25, 7.5, 288.15).value,
0.021205184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(270, 1013.25, 7.5, 288.15).value,
0.021341213, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(271, 1013.25, 7.5, 288.15).value,
0.021477939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(272, 1013.25, 7.5, 288.15).value,
0.021615362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(273, 1013.25, 7.5, 288.15).value,
0.021753483, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(274, 1013.25, 7.5, 288.15).value,
0.021892304, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(275, 1013.25, 7.5, 288.15).value,
0.022031825, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(276, 1013.25, 7.5, 288.15).value,
0.02217205, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(277, 1013.25, 7.5, 288.15).value,
0.022312979, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(278, 1013.25, 7.5, 288.15).value,
0.022454615, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(279, 1013.25, 7.5, 288.15).value,
0.022596961, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(280, 1013.25, 7.5, 288.15).value,
0.022740019, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(281, 1013.25, 7.5, 288.15).value,
0.022883795, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(282, 1013.25, 7.5, 288.15).value,
0.02302829, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(283, 1013.25, 7.5, 288.15).value,
0.02317351, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(284, 1013.25, 7.5, 288.15).value,
0.023319459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(285, 1013.25, 7.5, 288.15).value,
0.023466142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(286, 1013.25, 7.5, 288.15).value,
0.023613565, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(287, 1013.25, 7.5, 288.15).value,
0.023761733, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(288, 1013.25, 7.5, 288.15).value,
0.023910653, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(289, 1013.25, 7.5, 288.15).value,
0.024060332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(290, 1013.25, 7.5, 288.15).value,
0.024210778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(291, 1013.25, 7.5, 288.15).value,
0.024361999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(292, 1013.25, 7.5, 288.15).value,
0.024514003, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(293, 1013.25, 7.5, 288.15).value,
0.024666801, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(294, 1013.25, 7.5, 288.15).value,
0.024820402, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(295, 1013.25, 7.5, 288.15).value,
0.024974817, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(296, 1013.25, 7.5, 288.15).value,
0.025130058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(297, 1013.25, 7.5, 288.15).value,
0.025286138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(298, 1013.25, 7.5, 288.15).value,
0.025443071, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(299, 1013.25, 7.5, 288.15).value,
0.025600871, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(300, 1013.25, 7.5, 288.15).value,
0.025759555, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(301, 1013.25, 7.5, 288.15).value,
0.025919138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(302, 1013.25, 7.5, 288.15).value,
0.026079639, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(303, 1013.25, 7.5, 288.15).value,
0.026241079, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(304, 1013.25, 7.5, 288.15).value,
0.026403477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(305, 1013.25, 7.5, 288.15).value,
0.026566857, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(306, 1013.25, 7.5, 288.15).value,
0.026731244, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(307, 1013.25, 7.5, 288.15).value,
0.026896663, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(308, 1013.25, 7.5, 288.15).value,
0.027063143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(309, 1013.25, 7.5, 288.15).value,
0.027230715, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(310, 1013.25, 7.5, 288.15).value,
0.027399412, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(311, 1013.25, 7.5, 288.15).value,
0.02756927, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(312, 1013.25, 7.5, 288.15).value,
0.027740328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(313, 1013.25, 7.5, 288.15).value,
0.027912629, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(314, 1013.25, 7.5, 288.15).value,
0.028086218, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(315, 1013.25, 7.5, 288.15).value,
0.028261145, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(316, 1013.25, 7.5, 288.15).value,
0.028437464, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(317, 1013.25, 7.5, 288.15).value,
0.028615235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(318, 1013.25, 7.5, 288.15).value,
0.028794523, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(319, 1013.25, 7.5, 288.15).value,
0.028975399, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(320, 1013.25, 7.5, 288.15).value,
0.029157939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(321, 1013.25, 7.5, 288.15).value,
0.029342231, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(322, 1013.25, 7.5, 288.15).value,
0.029528367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(323, 1013.25, 7.5, 288.15).value,
0.029716451, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(324, 1013.25, 7.5, 288.15).value,
0.029906599, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(325, 1013.25, 7.5, 288.15).value,
0.030098937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(326, 1013.25, 7.5, 288.15).value,
0.030293607, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(327, 1013.25, 7.5, 288.15).value,
0.030490765, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(328, 1013.25, 7.5, 288.15).value,
0.030690588, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(329, 1013.25, 7.5, 288.15).value,
0.030893273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(330, 1013.25, 7.5, 288.15).value,
0.031099041, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(331, 1013.25, 7.5, 288.15).value,
0.031308143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(332, 1013.25, 7.5, 288.15).value,
0.031520859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(333, 1013.25, 7.5, 288.15).value,
0.031737512, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(334, 1013.25, 7.5, 288.15).value,
0.031958467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(335, 1013.25, 7.5, 288.15).value,
0.032184144, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(336, 1013.25, 7.5, 288.15).value,
0.032415022, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(337, 1013.25, 7.5, 288.15).value,
0.032651659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(338, 1013.25, 7.5, 288.15).value,
0.032894698, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(339, 1013.25, 7.5, 288.15).value,
0.033144893, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(340, 1013.25, 7.5, 288.15).value,
0.033403124, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(341, 1013.25, 7.5, 288.15).value,
0.033670433, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(342, 1013.25, 7.5, 288.15).value,
0.033948053, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(343, 1013.25, 7.5, 288.15).value,
0.034237459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(344, 1013.25, 7.5, 288.15).value,
0.034540422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(345, 1013.25, 7.5, 288.15).value,
0.034859093, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(346, 1013.25, 7.5, 288.15).value,
0.035196097, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(347, 1013.25, 7.5, 288.15).value,
0.03555467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(348, 1013.25, 7.5, 288.15).value,
0.03593884, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(349, 1013.25, 7.5, 288.15).value,
0.036353672, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(350, 1013.25, 7.5, 288.15).value,
0.036805605, places=5)
def test_zenit_water_vapour_attenuation(self):
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 14.25).value,
0.064981043, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 14.25).value,
0.070360091, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 14.25).value,
0.074660262, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 14.25).value,
0.06911297, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 14.25).value,
0.073434531, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 14.25).value,
0.080098077, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 14.25).value,
0.072394726, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 14.25).value,
0.075162715, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 14.25).value,
0.083750389, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 14.25).value,
0.074394064, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 14.25).value,
0.076695287, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 14.25).value,
0.086350752, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 29).value,
0.305636526, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 29).value,
0.331425898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 29).value,
0.355205229, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 29).value,
0.324977228, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 29).value,
0.345830132, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 29).value,
0.38091961, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 29).value,
0.340327583, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 29).value,
0.353923317, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 29).value,
0.398176611, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 29).value,
0.349674822, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 29).value,
0.361098289, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 29).value,
0.410456469, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 14.25).value,
0.099820608, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 14.25).value,
0.118484695, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 14.25).value,
0.105446054, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 14.25).value,
0.12252307, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 14.25).value,
0.108812058, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 14.25).value,
0.125093339, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 14.25).value,
0.111441086, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 14.25).value,
0.127090376, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 29).value,
0.473979935, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 29).value,
0.561753331, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 29).value,
0.500468518, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 29).value,
0.580717641, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 29).value,
0.516307047, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 29).value,
0.592782098, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 29).value,
0.528672179, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 29).value,
0.602152942, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 14.25).value,
0.149156898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 14.25).value,
0.121165007, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 14.25).value,
0.051589359, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 14.25).value,
0.153859398, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 14.25).value,
0.123550552, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 14.25).value,
0.052996133, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 14.25).value,
0.156616572, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 14.25).value,
0.125325192, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 14.25).value,
0.053871006, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 14.25).value,
0.158958354, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 14.25).value,
0.126766365, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 14.25).value,
0.054721343, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 29).value,
0.683528163, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 29).value,
0.555168022, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 29).value,
0.188559832, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 29).value,
0.704836196, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 29).value,
0.565993797, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 29).value,
0.193687836, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 29).value,
0.717323975, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 29).value,
0.574044911, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 29).value,
0.19687619, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 29).value,
0.727927181, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 29).value,
0.580581723, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 29).value,
0.19997458, places=5)
class ITUR836_6TestCase(test.TestCase):
def setUp(self):
models.itu836.change_version(6)
def surface_water_vapour_density(self):
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.1, 0.236104459).value,
22.93756598, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.15, 0.236104459).value,
22.80534575, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.3, 0.236104459).value,
22.55507955, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.35, 0.236104459).value,
22.49361957, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.1, 0).value,
21.59164912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.15, 0).value,
21.46164369, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.3, 0).value,
21.24753319, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.35, 0).value,
21.18676013, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.1, 0.247).value,
11.88170822, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.15, 0.247).value,
11.61777268, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.3, 0.247).value,
11.12235912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.35, 0.247).value,
11.00877052, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.1, 7.51071e-05).value,
23.50748104, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.15, 7.51071e-05).value,
23.34324475, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.3, 7.51071e-05).value,
23.06574222, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.35, 7.51071e-05).value,
23.00327243, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.1, 0.217559455).value,
25.95287453, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.15, 0.217559455).value,
25.71217873, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.3, 0.217559455).value,
25.34018758, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.35, 0.217559455).value,
25.2557054, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.1, 0).value,
24.00156532, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.15, 0).value,
23.85987554, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.3, 0).value,
23.51464505, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.35, 0).value,
23.41954477, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.1, 0.056701045).value,
19.78501126, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.15, 0.056701045).value,
19.48948848, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.3, 0.056701045).value,
19.02450953, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.35, 0.056701045).value,
18.92055161, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.1, 0.069164224).value,
15.21351315, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.15, 0.069164224).value,
15.0172773, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.3, 0.069164224).value,
14.6189506, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.35, 0.069164224).value,
14.50640729, places=5)
def total_water_vapour_content(self):
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.1, 0.23610446).value,
62.16532093, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.15, 0.23610446).value,
61.59527521, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.3, 0.23610446).value,
60.58285243, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.35, 0.23610446).value,
60.35619302, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.1, 0.0).value,
56.38788554, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.15, 0.0).value,
55.36064664, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.3, 0.0).value,
53.4851113, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.35, 0.0).value,
53.03918259, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.1, 0.247).value,
38.47288189, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.15, 0.247).value,
37.21449337, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.3, 0.247).value,
34.63093178, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.35, 0.247).value,
34.06569649, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.1, 7.511e-05).value,
62.84315177, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.15, 7.511e-05).value,
61.95641322, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.3, 7.511e-05).value,
60.48487688, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.35, 7.511e-05).value,
60.1561742, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.1, 0.21755946).value,
75.44891006, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.15, 0.21755946).value,
74.79639702, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.3, 0.21755946).value,
73.40408393, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.35, 0.21755946).value,
73.07234727, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.1, 0.0).value,
45.19895208, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.15, 0.0).value,
44.15275162, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.3, 0.0).value,
42.21022387, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.35, 0.0).value,
41.69772633, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.1, 0.05670104).value,
39.93693588, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.15, 0.05670104).value,
39.33984158, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.3, 0.05670104).value,
38.19321515, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.35, 0.05670104).value,
37.94621912, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.1, 0.06916422).value,
39.23803432, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.15, 0.06916422).value,
38.41414987, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.3, 0.06916422).value,
36.88058222, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.35, 0.06916422).value,
36.4074561, places=5)
class ITUR838_3TestCase(test.TestCase):
def setUp(self):
models.itu838.change_version(3)
def test_rain_specific_attenuation(self):
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
# New values in validation 4
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
class ITUR837_6TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(6)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
class ITUR837_7TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(7)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.1).value,
34.64798123, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.15).value,
27.7636201, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.3).value,
18.26254364, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.35).value,
16.49493229, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.1).value,
14.58963041, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.15).value,
11.00510082, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.3).value,
6.23796236, places=2)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.35).value,
5.38239642, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.1).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.15).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.3).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.35).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.1).value,
25.33888119, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.15).value,
19.86683577, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.3).value,
12.43676554, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.35).value,
11.07566126, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.1).value,
16.53857378, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.15).value,
12.04651363, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.3).value,
6.21600589, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.35).value,
5.19609765, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.1).value,
7.43193175, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.15).value,
5.53031864, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.3).value,
3.03506603, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.35).value,
2.59276061, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.1).value,
11.19798305, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.15).value,
8.88472572, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.3).value,
5.75356253, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.35).value,
5.18058827, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.1).value,
8.9924712, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.15).value,
7.17369312, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.3).value,
4.69033625, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.35).value,
4.23258601, places=3)
def test_rainfall_probability(self):
self.assertAlmostEqual(
models.itu837.rainfall_probability(3.133, 101.7).value,
4.53654368, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(22.9, -43.23).value,
1.41773353, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(23, 30).value,
0.00051911, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(25.78, -80.22).value,
2.90785192, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(28.717, 77.3).value,
1.07089363, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(33.94, 18.43).value,
1.27567391, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(41.9, 12.49).value,
5.26971907, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(51.5, -0.14).value,
5.3615096, places=5)
def test_rainfall_rate_R001(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.01).value,
99.1481136, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.01).value,
50.639304, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23.0, 30.0, 0.01).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.01).value,
78.2982928, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.01).value,
63.5972464, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.01).value,
27.1349664, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.01).value,
33.936232, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.01).value,
26.48052, places=5)
class ITUR839_4TestCase(test.TestCase):
def setUp(self):
models.itu839.change_version(4)
def test_isoterm_0_deg(self):
self.assertAlmostEqual(
models.itu839.isoterm_0(3.133, 101.7).value,
4.5979744, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(22.9, -43.23).value,
3.79877867, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(23, 30).value,
4.168, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(25.78, -80.22).value,
4.20946133, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(28.717, 77.3).value,
4.89820404, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(33.94, 18.43).value,
2.20330276, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(41.9, 12.49).value,
2.68749333, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(51.5, -0.14).value,
2.09273333, places=5)
def test_rain_height(self):
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.7).value,
4.9579744, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.9, -43.23).value,
4.15877867, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(23, 30).value,
4.528, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.78, -80.22).value,
4.56946133, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.3).value,
5.25820404, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.94, 18.43).value,
2.56330276, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.9, 12.49).value,
3.04749333, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.5, -0.14).value,
2.45273333, places=5)
class ITUR618_12TestCase(test.TestCase):
def setUp(self):
models.itu618.change_version(12)
models.itu453.change_version(12)
models.itu838.change_version(3)
models.itu836.change_version(5)
models.itu837.change_version(6)
models.itu840.change_version(6)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.38308757, 14.25, 30.870677680, 0.001, 0).value,
27.143007980, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.89479806, 14.25, 40.970527730, 0.1, 0).value,
37.386086000, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.71179484, 14.25, 47.912804910, 0.01, 0).value,
33.812795580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
71.44613350, 29, 40.970527730, 0.001, 0).value,
21.244470560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.87478397, 29, 47.912804910, 0.1, 0).value,
35.166125690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
39.07323323, 29, 40.970527730, 0.01, 0).value,
25.180145740, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.92489694, 14.25, 59.814871740, 0.1, 0).value,
41.798127850, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.96606559, 14.25, 49.209003690, 0.1, 0).value,
34.830206060, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.76053997, 14.25, 59.814871740, 0.01, 0).value,
36.168649690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.06938866, 14.25, 49.209003690, 0.01, 0).value,
28.803871260, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
100.96022257, 29, 49.209003690, 0.001, 0).value,
20.365001500, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
20.43214239, 29, 59.814871740, 0.1, 0).value,
35.581135690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
27.86774318, 29, 49.209003690, 0.1, 0).value,
28.745547830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.32024457, 29, 59.814871740, 0.01, 0).value,
30.303830010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
63.46384760, 29, 49.209003690, 0.01, 0).value,
23.046241580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
73.05533363, 29, 59.814871740, 0.001, 0).value,
28.089155910, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.44533923, 14.25, 38.141048320, 0.1, 0).value,
35.652315760, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
11.06265445, 14.25, 38.141048320, 0.01, 0).value,
30.034285750, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.84602116, 29, 55.905913620, 0.1, 0).value,
33.289964560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
51.72271818, 29, 55.905913620, 0.01, 0).value,
27.480618010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
53.61322867, 29, 38.141048320, 0.001, 0).value,
23.354012700, places=5)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.001, 0.9, eta=0.6).value,
0.866044, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.001, 0.9, eta=0.6).value,
0.710527, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.001, 0.9, eta=0.6).value,
0.764448, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.001, 0.9, eta=0.6).value,
1.289482, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.001, 0.9, eta=0.6).value,
1.054611, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.001, 0.9, eta=0.6).value,
1.132606, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.001, 0.9, eta=0.6).value,
0.699472, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.001, 0.9, eta=0.6).value,
0.912438, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.001, 0.9, eta=0.6).value,
1.033819, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.001, 0.9, eta=0.6).value,
1.351457, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.571530, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.001, 0.9, eta=0.6).value,
0.736636, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.001, 0.9, eta=0.6).value,
0.733740, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.845322, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.001, 0.9, eta=0.6).value,
1.087468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.001, 0.9, eta=0.6).value,
1.089954, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.402326, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.330080, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.355129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.599037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.489926, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.526159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.324944, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.423879, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.480267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.627828, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.265508, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.342209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.340864, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.392700, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.505190, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.506345, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 1, 0.9, eta=0.6).value,
0.249221, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 1, 0.9, eta=0.6).value,
0.204468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 1, 0.9, eta=0.6).value,
0.219985, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 1, 0.9, eta=0.6).value,
0.371074, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 1, 0.9, eta=0.6).value,
0.303485, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 1, 0.9, eta=0.6).value,
0.325930, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 1, 0.9, eta=0.6).value,
0.201287, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 1, 0.9, eta=0.6).value,
0.262572, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 1, 0.9, eta=0.6).value,
0.297502, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 1, 0.9, eta=0.6).value,
0.388909, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 1, 0.9, eta=0.6).value,
0.164469, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 1, 0.9, eta=0.6).value,
0.211982, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 1, 0.9, eta=0.6).value,
0.211148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 1, 0.9, eta=0.6).value,
0.243258, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 1, 0.9, eta=0.6).value,
0.312940, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 1, 0.9, eta=0.6).value,
0.313656, places=5)
class ITUR618_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
0.4891464, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
0.62159245, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
0.42101702, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
2.16093996, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
2.69015654, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
1.91338757, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
6.72784425, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
8.20500328, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
5.9418061, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
14.76177358, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
17.636376, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
12.98151687, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
2.17898357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
2.81537632, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
1.96063611, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
8.46779316, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
10.70289842, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
7.80832251, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
23.1908096, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
28.67449232, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
21.24861968, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
44.76009125, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
54.14015005, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
40.68133015, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
1.70690128, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
1.43904149, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
8.27164744, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
6.30417186, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
18.94410356, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
16.44617644, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
29.91171296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
29.95767701, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
6.81336808, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
6.66385625, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
29.31896844, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
25.59455941, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
59.62576355, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
58.53988572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
83.5996391, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
93.48939944, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
1.2731081, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
1.93713255, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
1.04440572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
5.48101228, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
10.67987642, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
6.0510347, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
14.85903351, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
21.03740448, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
12.61120361, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
28.21372983, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
28.13337932, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
17.85045772, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
5.88085649, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
9.84052929, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
3.8213237, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
22.20219047, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
47.18910296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
19.80717661, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
52.7819415, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
80.85074503, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
36.93157357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
87.88505965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
94.0437949, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
46.76694226, places=5)
def test_probability_of_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
51.5, -0.14, 31.07694309).value,
7.32466089, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
41.9, 12.49, 40.23202374).value,
7.08992377, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
33.94, 18.43, 46.35969261).value,
1.74467895, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
22.9, -43.23, 22.27833468).value,
2.5828985, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
25.78, -80.22, 52.6789929).value,
4.0392312, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
28.717, 77.3, 48.24116215).value,
1.64420965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
3.133, 101.7, 85.80457401).value,
5.00075505, places=4)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
9.05, 38.7, 20.14348033).value,
7.0357202, places=5)
# def test_site_diversity(self):
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 14.5, tau=0).value,
# 0.00098637, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 14.5, tau=0).value,
# 0.0049444, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 14.5, tau=0).value,
# 0.00503721, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 18, tau=0).value,
# 0.00513052, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 18, tau=0).value,
# 0.01982845, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 18, tau=0).value,
# 0.02027952, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 29, tau=0).value,
# 0.07543135, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 9, 52.40999326,
# 25.463, -80.486, 3, 52.48526958, 29, tau=0).value,
# 0.16564191, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.768, -80.205, 3, 52.40999326,
# 25.463, -80.486, 9, 52.48526958, 29, tau=0).value,
# 0.17005653, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 9, 52.33141826,
# 25.889, -80.278, 9, 52.25682688, 29, tau=0).value,
# 0.25228844, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 9, 52.33141826,
# 25.889, -80.278, 3, 52.25682688, 29, tau=0).value,
# 0.40360211, places=5)
# self.assertAlmostEqual(
# models.itu618.site_diversity_rain_outage_probability(
# 25.796, -80.287, 3, 52.33141826,
# 25.889, -80.278, 9, 52.25682688, 29, tau=0).value,
# 0.39740505, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65).value,
0.26193234, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65).value,
0.22405226, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65).value,
0.23279942, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65).value,
0.4228461, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65).value,
0.36169504, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65).value,
0.37581586, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65).value,
0.62828836, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65).value,
0.5374267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65).value,
0.55840821, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65).value,
0.91021486, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65).value,
0.77858162, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65).value,
0.80897798, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65).value,
0.38849319, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65).value,
0.33115269, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65).value,
0.34339899, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65).value,
0.62715751, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65).value,
0.53459083, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65).value,
0.55436043, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65).value,
0.93186567, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65).value,
0.79432493, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65).value,
0.82369971, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65).value,
1.35001384, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65).value,
1.15075561, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65).value,
1.19331148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65).value,
0.62009744, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65).value,
0.2664749, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65).value,
1.00104396, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65).value,
0.43017931, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65).value,
1.48740705, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65).value,
0.63918446, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65).value,
2.15483859, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65).value,
0.92600027, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65).value,
0.92341029, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65).value,
0.39237999, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65).value,
1.49069201, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65).value,
0.63343209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65).value,
2.21495349, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65).value,
0.9411888, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65).value,
3.20885076, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65).value,
1.36352046, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65).value,
0.2156413, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65).value,
0.22167129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65).value,
0.48533645, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65).value,
0.34811693, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65).value,
0.35785136, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65).value,
0.78349481, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65).value,
0.51725159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65).value,
0.53171554, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65).value,
1.16416037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65).value,
0.7493535, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65).value,
0.77030774, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65).value,
1.68654418, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65).value,
0.31791278, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65).value,
0.32486881, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65).value,
0.72351614, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65).value,
0.5132172, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65).value,
0.52444655, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65).value,
1.16799623, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65).value,
0.76256679, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65).value,
0.77925198, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65).value,
1.73547406, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65).value,
1.10474691, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65).value,
1.12891911, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65).value,
2.5142186, places=5)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.4891464, 14.25, 31.07694309, 1.0, 0).value,
49.57582307, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.62159245, 14.25, 40.23202374, 1.0, 0).value,
49.3981550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.42101702, 14.25, 46.35969261, 1.0, 0).value,
53.93857057, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.16093996, 14.25, 31.07694309, 0.1, 0).value,
40.29800396, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.69015654, 14.25, 40.23202374, 0.1, 0).value,
40.28034662, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.91338757, 14.25, 46.35969261, 0.1, 0).value,
44.68265675, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.72784425, 14.25, 31.07694309, 0.01, 0).value,
32.97842659, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.20500328, 14.25, 40.23202374, 0.01, 0).value,
33.13972017, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.9418061, 14.25, 46.35969261, 0.01, 0).value,
37.62918682, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.76177358, 14.25, 31.07694309, 0.001, 0).value,
28.14021762, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.636376, 14.25, 40.23202374, 0.001, 0).value,
28.49940232, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.98151687, 14.25, 46.35969261, 0.001, 0).value,
33.07510332, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.17898357, 29.0, 31.07694309, 1.0, 0).value,
44.30006506, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.81537632, 29.0, 40.23202374, 1.0, 0).value,
43.8603725, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.96063611, 29.0, 46.35969261, 1.0, 0).value,
48.36964892, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.46779316, 29.0, 31.07694309, 0.1, 0).value,
35.03444, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.70289842, 29.0, 40.23202374, 0.1, 0).value,
34.76315732, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
7.80832251, 29.0, 46.35969261, 0.1, 0).value,
39.12690283, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.1908096, 29.0, 31.07694309, 0.01, 0).value,
27.96431726, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.67449232, 29.0, 40.23202374, 0.01, 0).value,
27.8830305, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.24861968, 29.0, 46.35969261, 0.01, 0).value,
32.34366876, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
44.76009125, 29.0, 31.07694309, 0.001, 0).value,
23.64462724, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
54.14015005, 29.0, 40.23202374, 0.001, 0).value,
23.7749224, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
40.68133015, 29.0, 46.35969261, 0.001, 0).value,
28.33381119, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.70690128, 14.25, 22.27833468, 1.0, 0).value,
38.65072987, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.43904149, 14.25, 52.6789929, 1.0, 0).value,
46.23051298, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.27164744, 14.25, 22.27833468, 0.1, 0).value,
27.9634536, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.30417186, 14.25, 52.6789929, 0.1, 0).value,
36.82555192, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.94410356, 14.25, 22.27833468, 0.01, 0).value,
22.64492814, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.44617644, 14.25, 52.6789929, 0.01, 0).value,
30.86009092, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.91171296, 14.25, 22.27833468, 0.001, 0).value,
20.29292318, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.95767701, 14.25, 52.6789929, 0.001, 0).value,
27.62415271, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.81336808, 29.0, 22.27833468, 1.0, 0).value,
33.64688473, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.66385625, 29.0, 52.6789929, 1.0, 0).value,
40.0755612, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.31896844, 29.0, 22.27833468, 0.1, 0).value,
22.85413903, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
25.59455941, 29.0, 52.6789929, 0.1, 0).value,
30.6650529, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
59.62576355, 29.0, 22.27833468, 0.01, 0).value,
17.88255372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
58.53988572, 29.0, 52.6789929, 0.01, 0).value,
25.03203051, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
83.5996391, 29.0, 22.27833468, 0.001, 0).value,
16.16922861, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
93.48939944, 29.0, 52.6789929, 0.001, 0).value,
22.41718851, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.2731081, 14.25, 48.24116215, 1.0, 90).value,
45.80237934, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.93713255, 14.25, 85.80457401, 1.0, 90).value,
75.12972446, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.04440572, 14.25, 20.14348033, 1.0, 90).value,
42.28242577, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.48101228, 14.25, 48.24116215, 0.1, 90).value,
36.51649699, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.67987642, 14.25, 85.80457401, 0.1, 90).value,
65.51910372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.0510347, 14.25, 20.14348033, 0.1, 90).value,
30.32827626, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.85903351, 14.25, 48.24116215, 0.01, 90).value,
30.19759496, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.03740448, 14.25, 85.80457401, 0.01, 90).value,
63.60558975, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.61120361, 14.25, 20.14348033, 0.01, 90).value,
25.96615447, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.21372983, 14.25, 48.24116215, 0.001, 90).value,
26.54453432, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.13337932, 14.25, 85.80457401, 0.001, 90).value,
64.9390721, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.85045772, 14.25, 20.14348033, 0.001, 90).value,
24.79563761, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.88085649, 29.0, 48.24116215, 1.0, 90).value,
39.73121592, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.84052929, 29.0, 85.80457401, 1.0, 90).value,
68.04931733, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.8213237, 29.0, 20.14348033, 1.0, 90).value,
38.25788658, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
22.20219047, 29.0, 48.24116215, 0.1, 90).value,
30.45232663, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
47.18910296, 29.0, 85.80457401, 0.1, 90).value,
58.32352317, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
19.80717661, 29.0, 20.14348033, 0.1, 90).value,
26.0924586, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
52.7819415, 29.0, 48.24116215, 0.01, 90).value,
24.4471052, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
80.85074503, 29.0, 85.80457401, 0.01, 90).value,
56.92074963, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
36.93157357, 29.0, 20.14348033, 0.01, 90).value,
22.11041426, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
87.88505965, 29.0, 48.24116215, 0.001, 90).value,
21.39198393, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
94.0437949, 29.0, 85.80457401, 0.001, 90).value,
59.09547625, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.76694226, 29.0, 20.14348033, 0.001, 90).value,
21.61918999, places=5)
def total_attenuation_fcn(self, lat, lon, f, el, p, D, eta, tau,
val_g, val_c, val_r, val_s, val_t):
# The validation function uses the exact method to compute the rainfall
# rate exceeded for 0.01% of the time
R001 = models.itu837.rainfall_rate(lat, lon, 0.01000000001)
A_g, A_c, A_r, A_s, A = itur.atmospheric_attenuation_slant_path(
lat, lon, f, el, p, D, eta=eta, tau=tau, R001=R001,
return_contributions=True)
self.assertAlmostEqual(A_g.value, val_g, places=5)
self.assertAlmostEqual(A_c.value, val_c, places=5)
self.assertAlmostEqual(A_r.value, val_r, places=5)
self.assertAlmostEqual(A_s.value, val_s, places=5)
self.assertAlmostEqual(A.value, val_t, places=5)
def test_total_attenuation(self):
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65, 0,
0.223693782, 0.45517046, 0.48914539, 0.26193234, 1.203663661)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65, 0,
0.184499507, 0.26338517, 0.62159459, 0.22405226, 1.097400703)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65, 0,
0.168635988, 0.18779409, 0.42101546, 0.23279942, 0.820437057)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65, 0,
0.223693782, 0.45517046, 2.16093588, 0.4228461, 2.873752501)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65, 0,
0.184499507, 0.26338517, 2.69016502, 0.36169504, 3.16011407)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65, 0,
0.168635988, 0.18779409, 1.91338106, 0.37581586, 2.303155743)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65, 0,
0.223693782, 0.45517046, 6.72783273, 0.62828836, 7.434122415)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65, 0,
0.184499507, 0.26338517, 8.20502671, 0.5374267, 8.669947478)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65, 0,
0.168635988, 0.18779409, 5.94178778, 0.55840821, 6.323600947)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65, 0,
0.223693782, 0.45517046, 14.76175093, 0.91021486, 15.46781355)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65, 0,
0.184499507, 0.26338517, 17.63642115, 0.77858162, 18.10123067)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65, 0,
0.168635988, 0.18779409, 12.981481, 0.80897798, 13.36273512)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65, 0,
0.799999368, 1.77247154, 2.17897957, 0.38849319, 4.770502219)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65, 0,
0.673619867, 1.0256437, 2.81538514, 0.33115269, 4.528897381)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65, 0,
0.62972417, 0.73128577, 1.96062953, 0.34339899, 3.343454224)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65, 0,
0.799999368, 1.77247154, 8.46777895, 0.62715751, 11.05943682)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65, 0,
0.673619867, 1.0256437, 10.70292908, 0.53459083, 12.41436971)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65, 0,
0.62972417, 0.73128577, 7.80829852, 0.55436043, 9.18728313)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65, 0,
0.799999368, 1.77247154, 23.19077435, 0.93186567, 25.78063225)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65, 0,
0.673619867, 1.0256437, 28.67456675, 0.79432493, 30.38445044)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65, 0,
0.62972417, 0.73128577, 21.24856054, 0.82369971, 22.62499923)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65, 0,
0.799999368, 1.77247154, 44.76003026, 1.35001384, 47.35208054)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65, 0,
0.673619867, 1.0256437, 54.14027603, 1.15075561, 55.85154062)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65, 0,
0.62972417, 0.73128577, 40.68122866, 1.19331148, 42.05942781)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65, 0,
0.383178724, 0.54183293, 1.70690691, 0.62009744, 2.715849229)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65, 0,
0.206227197, 0.53317506, 1.43904233, 0.2664749, 2.196365451)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65, 0,
0.383178724, 0.54183293, 8.27167236, 1.00104396, 9.253351467)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65, 0,
0.206227197, 0.53317506, 6.30417519, 0.43017931, 7.057096675)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65, 0,
0.383178724, 0.54183293, 18.94415527, 1.48740705, 19.92585295)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65, 0,
0.206227197, 0.53317506, 16.44618432, 0.63918446, 17.1976133)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65, 0,
0.383178724, 0.54183293, 29.91178614, 2.15483859, 30.91293869)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65, 0,
0.206227197, 0.53317506, 29.95768987, 0.92600027, 30.71115009)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65, 0,
1.504259763, 2.1099424, 6.81338837, 0.92341029, 10.4752418)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65, 0,
0.827675954, 2.07622792, 6.66385994, 0.39237999, 9.576567189)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65, 0,
1.504259763, 2.1099424, 29.31904828, 1.49069201, 32.9685827)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65, 0,
0.827675954, 2.07622792, 25.59457239, 0.63343209, 28.50572549)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65, 0,
1.504259763, 2.1099424, 59.62591067, 2.21495349, 63.27983401)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65, 0,
0.827675954, 2.07622792, 58.53991262, 0.9411888, 61.45112298)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65, 0,
1.504259763, 2.1099424, 83.59982398, 3.20885076, 87.2740725)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65, 0,
0.827675954, 2.07622792, 93.48943794, 1.36352046, 96.4030686)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65, 90,
0.257653026, 0.68592197, 1.27311232, 0.2156413, 2.228519972)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65, 90,
0.163655312, 0.62211863, 1.93712821, 0.22167129, 2.732484342)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65, 90,
0.22310495, 0.65764822, 1.04440674, 0.48533645, 1.993003982)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65, 90,
0.257653026, 0.68592197, 5.48102886, 0.34811693, 6.434421439)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65, 90,
0.163655312, 0.62211863, 10.67985456, 0.35785136, 11.47129236)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65, 90,
0.22310495, 0.65764822, 6.05104013, 0.78349481, 6.977389778)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65, 90,
0.257653026, 0.68592197, 14.85907425, 0.51725159, 15.81125251)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65, 90,
0.163655312, 0.62211863, 21.03736546, 0.53171554, 21.82966493)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65, 90,
0.22310495, 0.65764822, 12.61121387, 1.16416037, 13.54293868)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65, 90,
0.257653026, 0.68592197, 28.21379917, 0.7493535, 29.16708769)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65, 90,
0.163655312, 0.62211863, 28.13333254, 0.77030774, 28.92942223)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65, 90,
0.22310495, 0.65764822, 17.85047073, 1.68654418, 18.80790784)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65, 90,
1.038585522, 2.67103709, 5.88087518, 0.31791278, 9.596404871)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65, 90,
0.645959831, 2.42258159, 9.84050759, 0.32486881, 12.9133514)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65, 90,
0.703128217, 2.56093676, 3.82132703, 0.72351614, 7.126271267)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65, 90,
1.038585522, 2.67103709, 22.20225497, 0.5132172, 25.9171717)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65, 90,
0.645959831, 2.42258159, 47.18900784, 0.52444655, 50.26032116)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65, 90,
0.703128217, 2.56093676, 19.80719237, 1.16799623, 23.10173121)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65, 90,
1.038585522, 2.67103709, 52.78208044, 0.76256679, 56.49694605)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65, 90,
0.645959831, 2.42258159, 80.85059735, 0.77925198, 83.92278473)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65, 90,
0.703128217, 2.56093676, 36.9316002, 1.73547406, 40.23377893)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65, 90,
1.038585522, 2.67103709, 87.88526702, 1.10474691, 91.6016281)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65, 90,
0.645959831, 2.42258159, 94.04364093, 1.12891911, 97.11878785)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65, 90,
0.703128217, 2.56093676, 46.76697248, 2.5142186, 50.09507012)
#class ITUR840_4TestCase(test.TestCase):
#
# def setUp(self):
# models.itu840.change_version(4)
#
# def test_columnar_content_reduced_liquid(self):
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 1.000).value,
# 1.26328612, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 1.000).value,
# 0.91467189, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 1.000).value,
# 0.73072098, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.100).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.100).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.100).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.010).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.010).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.010).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.001).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.001).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.001).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 1.000).value,
# 1.26328612, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 1.000).value,
# 0.91467189, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 1.000).value,
# 0.73072098, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.100).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.100).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.100).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.010).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.010).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.010).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 51.5, -0.14, 0.001).value,
# 1.90329847, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 41.9, 12.49, 0.001).value,
# 1.49845951, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 33.94, 18.43, 0.001).value,
# 1.47628568, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 1.000).value,
# 1.10444871, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 1.000).value,
# 2.27978216, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.100).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.100).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.010).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.010).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.001).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.001).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 1.000).value,
# 1.10444871, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 1.000).value,
# 2.27978216, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.100).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.100).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.010).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.010).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 22.9, -43.23, 0.001).value,
# 2.82993169, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 25.78, -80.22, 0.001).value,
# 3.52927516, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 1.000).value,
# 2.75109958, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 1.000).value,
# 3.33600769, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 1.000).value,
# 1.21770185, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.100).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.100).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.100).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.010).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.010).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.010).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.001).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.001).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.001).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 1.000).value,
# 2.75109958, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 1.000).value,
# 3.33600769, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 1.000).value,
# 1.21770185, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.100).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.100).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.100).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.010).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.010).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.010).value,
# 1.49251459, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 28.717, 77.3, 0.001).value,
# 4.23072604, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 3.133, 101.7, 0.001).value,
# 3.80525123, places=5)
# self.assertAlmostEqual(
# models.itu840.columnar_content_reduced_liquid(
# 9.05, 38.7, 0.001).value,
# 1.49251459, places=5)
#
# def test_cloud_attenuation(self):
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 1.000).value,
# 0.45792895, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 1.000).value,
# 0.25946553, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 1.000).value,
# 0.18313623, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.100).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.100).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.100).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.010).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.010).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.010).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 14.25, 0.001).value,
# 0.68992722, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 14.25, 0.001).value,
# 0.42506892, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 14.25, 0.001).value,
# 0.36999265, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 1.000).value,
# 1.79599547, places=2)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 1.000).value,
# 1.01762274, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 1.000).value,
# 0.71825953, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.100).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.100).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.100).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.010).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.010).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.010).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 51.5, -0.14, 30.87067768, 29, 0.001).value,
# 2.70589171, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 41.9, 12.49, 40.97052773, 29, 0.001).value,
# 1.66711854, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 33.94, 18.43, 47.91280491, 29, 0.001).value,
# 1.45110964, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 1.000).value,
# 0.23764476, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 1.000).value,
# 0.56006901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.100).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.100).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.010).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.010).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 14.25, 0.001).value,
# 0.60891776, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 14.25, 0.001).value,
# 0.86702917, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 1.000).value,
# 0.93204177, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 1.000).value,
# 2.19658834, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.100).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.100).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.010).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.010).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 22.9, -43.23, 59.81487174, 29, 0.001).value,
# 2.38817297, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 25.78, -80.22, 49.20900369, 29, 0.001).value,
# 3.40048483, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 1.000).value,
# 0.6178942, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 1.000).value,
# 0.67031269, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 1.000).value,
# 0.36671963, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.100).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.100).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.100).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.010).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.010).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.010).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 14.25, 0.001).value,
# 0.95021681, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 14.25, 0.001).value,
# 0.76459901, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 14.25, 0.001).value,
# 0.44948146, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 1.000).value,
# 2.4233785, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 1.000).value,
# 2.6289636, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 1.000).value,
# 1.43827289, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.100).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.100).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.100).value,
# 1.76286444, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.010).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.010).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.010).value,
# 1.76286444, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 28.717, 77.3, 55.90591362, 29, 0.001).value,
# 3.72674641, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 3.133, 101.7, 67.76751981, 29, 0.001).value,
# 2.99875418, places=3)
# self.assertAlmostEqual(
# models.itu840.cloud_attenuation(
# 9.05, 38.7, 38.14104832, 29, 0.001).value,
# 1.76286444, places=3)
class ITUR840_7TestCase(test.TestCase):
def setUp(self):
models.itu840.change_version(7)
def test_columnar_content_reduced_liquid(self):
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.1).value,
3.805251208, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.15).value,
3.744512329, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.3).value,
3.630957766, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.35).value,
3.594946111, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.1).value,
2.829931669, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.15).value,
2.615428331, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.3).value,
2.152560931, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.35).value,
2.030424796, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.1).value,
0.443821013, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.15).value,
0.367758574, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.3).value,
0.25249597, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.35).value,
0.230476914, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.1).value,
3.52927514, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.15).value,
3.368053109, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.3).value,
3.090031167, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.35).value,
2.98280226, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.1).value,
4.230726014, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.15).value,
4.004951665, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.3).value,
3.641943304, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.35).value,
3.550068054, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.1).value,
1.476285677, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.15).value,
1.342662497, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.3).value,
1.117630129, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.35).value,
1.061278891, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.1).value,
1.498459518, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.15).value,
1.411411719, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.3).value,
1.254176128, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.35).value,
1.214239524, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.1).value,
1.903298487, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.15).value,
1.803803604, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.3).value,
1.641289077, places=5)
def test_cloud_attenuation(self):
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 1.0).value,
0.45517046, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 1.0).value,
0.26338517, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 1.0).value,
0.18779409, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.5).value,
0.53457216, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.5).value,
0.3230387, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.5).value,
0.23923797, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.3).value,
0.59136745, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.3).value,
0.36114741, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.3).value,
0.2872291, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.2).value,
0.62448748, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.2).value,
0.38863977, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.2).value,
0.32069677, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 1.0).value,
1.77247154, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 1.0).value,
1.0256437, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 1.0).value,
0.73128577, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.5).value,
2.08166837, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.5).value,
1.2579395, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.5).value,
0.9316125, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.3).value,
2.30283391, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.3).value,
1.40633801, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.3).value,
1.11849396, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.2).value,
2.43180607, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.2).value,
1.51339553, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.2).value,
1.24881983, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 1.0).value,
0.54183293, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 1.0).value,
0.53317506, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.5).value,
0.85746792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.5).value,
0.63956606, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.3).value,
1.05602769, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.3).value,
0.72266885, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.2).value,
1.20844208, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.2).value,
0.76093789, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 1.0).value,
2.1099424, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 1.0).value,
2.07622792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.5).value,
3.33905126, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.5).value,
2.49052334, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.3).value,
4.11225948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.3).value,
2.81413248, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.2).value,
4.70577375, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.2).value,
2.96315532, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 1.0).value,
0.68560078, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 1.0).value,
0.62214817, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 1.0).value,
0.65764822, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.5).value,
0.83179446, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.5).value,
0.65489922, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.5).value,
0.7181604, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.3).value,
0.90773089, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.3).value,
0.6771593, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.3).value,
0.75244454, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.2).value,
0.95830261, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.2).value,
0.69030616, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.2).value,
0.77111549, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 1.0).value,
2.66978635, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 1.0).value,
2.42269662, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 1.0).value,
2.56093676, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.5).value,
3.23907665, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.5).value,
2.55023192, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.5).value,
2.79657622, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.3).value,
3.53477943, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.3).value,
2.63691452, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.3).value,
2.93008149, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.2).value,
3.73170991, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.2).value,
2.68810948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.2).value,
3.00278773, places=5)
class ITUR1511_1TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(1)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.23610446, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(23.0, 30.0).value,
0.247, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
7.511e-05, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.21755946, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.05670104, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.06916422, places=5)
class ITUR1511_2TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(2)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.031382983999999, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.0461229880100015, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
0.00861727999508758, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.209383698952704, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.0512514559528945, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(9.05, 38.7).value,
2.5398618775, places=4)
if __name__ == '__main__':
pass
suite = suite()
print('Validation tests for the ITU-R models')
print('------------------------')
print(
'A total of %d test-cases are going to be tested' %
suite.countTestCases())
sys.stdout.flush()
test.TextTestRunner(verbosity=2).run(suite)
| 41.444585
| 79
| 0.567102
|
import unittest as test
import itur
import itur.models as models
import sys
from astropy import units as u
def suite():
suite = test.TestSuite()
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
suite.addTest(ITUR676_9TestCase('test_gammaw'))
suite.addTest(ITUR676_9TestCase('test_gamma0'))
suite.addTest(ITUR676_11TestCase('test_gammaw_exact'))
suite.addTest(ITUR676_11TestCase('test_gamma0_exact'))
suite.addTest(ITUR676_11TestCase('test_gammaw_approx'))
suite.addTest(ITUR676_11TestCase('test_gamma0_approx'))
suite.addTest(ITUR676_11TestCase('test_zenit_water_vapour_attenuation'))
suite.addTest(ITUR618_12TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_12TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_12TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase('test_rain_attenuation'))
suite.addTest(ITUR618_13TestCase('test_probability_of_rain_attenuation'))
suite.addTest(ITUR618_13TestCase('test_scintillation_attenuation'))
suite.addTest(ITUR618_13TestCase(
'test_rain_cross_polarization_discrimination'))
suite.addTest(ITUR618_13TestCase('test_total_attenuation'))
suite.addTest(ITUR453_12TestCase('test_wet_term_radio_refractivity'))
suite.addTest(ITUR453_13TestCase('test_wet_term_radio_refractivity'))
suite.addTest(ITUR836_6TestCase('surface_water_vapour_density'))
suite.addTest(ITUR836_6TestCase('total_water_vapour_content'))
suite.addTest(ITUR837_6TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate'))
suite.addTest(ITUR837_7TestCase('test_rainfall_probability'))
suite.addTest(ITUR837_7TestCase('test_rainfall_rate_R001'))
suite.addTest(ITUR838_3TestCase('test_rain_specific_attenuation'))
suite.addTest(ITUR839_4TestCase('test_isoterm_0_deg'))
suite.addTest(ITUR839_4TestCase('test_rain_height'))
suite.addTest(ITUR840_7TestCase('test_columnar_content_reduced_liquid'))
suite.addTest(ITUR840_7TestCase('test_cloud_attenuation'))
suite.addTest(ITUR1511_1TestCase('test_topographic_altitude'))
suite.addTest(ITUR1511_2TestCase('test_topographic_altitude'))
return suite
class ITUR453_12TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(12)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(51.5, 359.86).value,
45.130667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(41.9, 12.49).value,
53.756489, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(33.94, 18.43).value,
76.349680, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(22.9, 316.77).value,
87.907733, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(25.78, 279.78).value,
101.416373, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(28.717, 77.3).value,
60.060569, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(3.133, 101.7).value,
105.920333, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(9.05, 38.7).value,
50.162000, places=5)
class ITUR453_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
def test_wet_term_radio_refractivity(self):
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
3.133, 101.7, 50).value,
128.14080027, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
22.9, -43.23, 50).value,
104.35847467, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
23, 30, 50).value,
36.47166667, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
25.78, -80.22, 50).value,
113.2738672, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
28.717, 77.3, 50).value,
75.66013547, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
33.94, 18.43, 50).value,
80.14015964, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
41.9, 12.49, 50).value,
61.21890044, places=5)
self.assertAlmostEqual(
models.itu453.map_wet_term_radio_refractivity(
51.5, -0.14, 50).value,
50.38926222, places=5)
class ITUR676_9TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(9)
models.itu836.change_version(4)
def test_gammaw(self):
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.00705700000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.06742720000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.11538020000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.25568340000, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 4.98154290000,
(5.9435147000 - 0.15) * u.deg_C).value,
0.56358380000, places=5)
def test_gamma0(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
282.724 - 0.15).value,
0.00941327, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.00898682, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.00851359, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
282.724 - 0.15).value,
0.02043748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.4834667 - 0.15).value,
0.01954568, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
293.1487022 - 0.15).value,
0.01856193, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
296.602 - 0.15).value,
0.00824203, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(
14.25, 1013.25, 7.5, 296.7208533 - 0.15).value,
0.0082329, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.602 - 0.15).value,
0.01800011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
296.7208533 - 0.15).value,
0.01798125, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.00805331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.00814064, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14.25, 1013.25, 7.5,
287.444 - 0.15).value,
0.00899025, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
299.0966578 - 0.15).value,
0.01761077, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
297.9322267 - 0.15).value,
0.01779083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5,
287.444 - 0.15).value,
0.01955282, places=5)
class ITUR676_11TestCase(test.TestCase):
def setUp(self):
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu1511.change_version(1)
def test_gammaw_exact(self):
self.assertAlmostEqual(
models.itu676.gammaw_exact(12, 1013.25, 7.5, 288.15).value,
0.00953539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(20, 1013.25, 7.5, 288.15).value,
0.09704730, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(60, 1013.25, 7.5, 288.15).value,
0.15484184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(90, 1013.25, 7.5, 288.15).value,
0.34197339, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_exact(130, 1013.25, 7.5, 288.15).value,
0.75184470, places=5)
def test_gamma0_exact(self):
self.assertAlmostEqual(
models.itu676.gamma0_exact(12, 1013.25, 7.5, 288.15).value,
0.00869826, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(20, 1013.25, 7.5, 288.15).value,
0.01188355, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(60, 1013.25, 7.5, 288.15).value,
14.62347480, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(90, 1013.25, 7.5, 288.15).value,
0.03886971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_exact(130, 1013.25, 7.5, 288.15).value,
0.04150908, places=5)
def test_gammaw_approx(self):
self.assertAlmostEqual(
models.itu676.gammaw_approx(1, 1013.25, 7.5, 288.15).value,
5.06e-05, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(2, 1013.25, 7.5, 288.15).value,
0.000203124, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(3, 1013.25, 7.5, 288.15).value,
0.000459962, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(4, 1013.25, 7.5, 288.15).value,
0.000825295, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(5, 1013.25, 7.5, 288.15).value,
0.001305574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(6, 1013.25, 7.5, 288.15).value,
0.001910194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(7, 1013.25, 7.5, 288.15).value,
0.00265257, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(8, 1013.25, 7.5, 288.15).value,
0.00355178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(9, 1013.25, 7.5, 288.15).value,
0.00463511, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(10, 1013.25, 7.5, 288.15).value,
0.005942065, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(11, 1013.25, 7.5, 288.15).value,
0.007530789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(12, 1013.25, 7.5, 288.15).value,
0.009488627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(13, 1013.25, 7.5, 288.15).value,
0.01194992, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(14, 1013.25, 7.5, 288.15).value,
0.015126834, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(15, 1013.25, 7.5, 288.15).value,
0.019364141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(16, 1013.25, 7.5, 288.15).value,
0.025238305, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(17, 1013.25, 7.5, 288.15).value,
0.033736014, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(18, 1013.25, 7.5, 288.15).value,
0.04655406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(19, 1013.25, 7.5, 288.15).value,
0.066459485, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(20, 1013.25, 7.5, 288.15).value,
0.096940958, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(21, 1013.25, 7.5, 288.15).value,
0.137887422, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(22, 1013.25, 7.5, 288.15).value,
0.17418431, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(23, 1013.25, 7.5, 288.15).value,
0.180393135, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(24, 1013.25, 7.5, 288.15).value,
0.15839854, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(25, 1013.25, 7.5, 288.15).value,
0.130540688, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(26, 1013.25, 7.5, 288.15).value,
0.108338372, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(27, 1013.25, 7.5, 288.15).value,
0.092962551, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(28, 1013.25, 7.5, 288.15).value,
0.082791566, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(29, 1013.25, 7.5, 288.15).value,
0.076209755, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(30, 1013.25, 7.5, 288.15).value,
0.072073391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(31, 1013.25, 7.5, 288.15).value,
0.069632181, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(32, 1013.25, 7.5, 288.15).value,
0.06839841, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(33, 1013.25, 7.5, 288.15).value,
0.068050819, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(34, 1013.25, 7.5, 288.15).value,
0.068373336, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(35, 1013.25, 7.5, 288.15).value,
0.069217296, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(36, 1013.25, 7.5, 288.15).value,
0.070478105, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(37, 1013.25, 7.5, 288.15).value,
0.072080617, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(38, 1013.25, 7.5, 288.15).value,
0.073969796, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(39, 1013.25, 7.5, 288.15).value,
0.076104615, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(40, 1013.25, 7.5, 288.15).value,
0.078454003, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(41, 1013.25, 7.5, 288.15).value,
0.080994086, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(42, 1013.25, 7.5, 288.15).value,
0.08370628, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(43, 1013.25, 7.5, 288.15).value,
0.086575946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(44, 1013.25, 7.5, 288.15).value,
0.089591433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(45, 1013.25, 7.5, 288.15).value,
0.092743375, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(46, 1013.25, 7.5, 288.15).value,
0.096024183, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(47, 1013.25, 7.5, 288.15).value,
0.099427654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(48, 1013.25, 7.5, 288.15).value,
0.102948692, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(49, 1013.25, 7.5, 288.15).value,
0.106583076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(50, 1013.25, 7.5, 288.15).value,
0.110327299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(51, 1013.25, 7.5, 288.15).value,
0.11417843, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(52, 1013.25, 7.5, 288.15).value,
0.118134012, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(53, 1013.25, 7.5, 288.15).value,
0.122191981, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(54, 1013.25, 7.5, 288.15).value,
0.126350598, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(55, 1013.25, 7.5, 288.15).value,
0.130608397, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(56, 1013.25, 7.5, 288.15).value,
0.134964144, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(57, 1013.25, 7.5, 288.15).value,
0.139416798, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(58, 1013.25, 7.5, 288.15).value,
0.143965489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(59, 1013.25, 7.5, 288.15).value,
0.148609489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(60, 1013.25, 7.5, 288.15).value,
0.153348196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(61, 1013.25, 7.5, 288.15).value,
0.158181114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(62, 1013.25, 7.5, 288.15).value,
0.163107847, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(63, 1013.25, 7.5, 288.15).value,
0.168128079, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(64, 1013.25, 7.5, 288.15).value,
0.173241572, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(65, 1013.25, 7.5, 288.15).value,
0.178448154, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(66, 1013.25, 7.5, 288.15).value,
0.183747712, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(67, 1013.25, 7.5, 288.15).value,
0.18914019, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(68, 1013.25, 7.5, 288.15).value,
0.194625582, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(69, 1013.25, 7.5, 288.15).value,
0.200203926, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(70, 1013.25, 7.5, 288.15).value,
0.205875306, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(71, 1013.25, 7.5, 288.15).value,
0.211639845, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(72, 1013.25, 7.5, 288.15).value,
0.217497702, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(73, 1013.25, 7.5, 288.15).value,
0.223449076, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(74, 1013.25, 7.5, 288.15).value,
0.229494196, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(75, 1013.25, 7.5, 288.15).value,
0.235633329, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(76, 1013.25, 7.5, 288.15).value,
0.241866771, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(77, 1013.25, 7.5, 288.15).value,
0.248194851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(78, 1013.25, 7.5, 288.15).value,
0.254617931, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(79, 1013.25, 7.5, 288.15).value,
0.261136401, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(80, 1013.25, 7.5, 288.15).value,
0.267750686, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(81, 1013.25, 7.5, 288.15).value,
0.274461239, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(82, 1013.25, 7.5, 288.15).value,
0.281268547, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(83, 1013.25, 7.5, 288.15).value,
0.28817313, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(84, 1013.25, 7.5, 288.15).value,
0.295175539, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(85, 1013.25, 7.5, 288.15).value,
0.302276362, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(86, 1013.25, 7.5, 288.15).value,
0.309476219, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(87, 1013.25, 7.5, 288.15).value,
0.316775769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(88, 1013.25, 7.5, 288.15).value,
0.324175708, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(89, 1013.25, 7.5, 288.15).value,
0.331676772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(90, 1013.25, 7.5, 288.15).value,
0.339279738, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(91, 1013.25, 7.5, 288.15).value,
0.346985426, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(92, 1013.25, 7.5, 288.15).value,
0.354794703, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(93, 1013.25, 7.5, 288.15).value,
0.362708483, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(94, 1013.25, 7.5, 288.15).value,
0.370727732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(95, 1013.25, 7.5, 288.15).value,
0.378853468, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(96, 1013.25, 7.5, 288.15).value,
0.387086768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(97, 1013.25, 7.5, 288.15).value,
0.395428769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(98, 1013.25, 7.5, 288.15).value,
0.403880673, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(99, 1013.25, 7.5, 288.15).value,
0.412443748, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(100, 1013.25, 7.5, 288.15).value,
0.421119341, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(101, 1013.25, 7.5, 288.15).value,
0.429908872, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(102, 1013.25, 7.5, 288.15).value,
0.438813848, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(103, 1013.25, 7.5, 288.15).value,
0.447835866, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(104, 1013.25, 7.5, 288.15).value,
0.456976619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(105, 1013.25, 7.5, 288.15).value,
0.466237905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(106, 1013.25, 7.5, 288.15).value,
0.475621633, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(107, 1013.25, 7.5, 288.15).value,
0.485129833, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(108, 1013.25, 7.5, 288.15).value,
0.494764666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(109, 1013.25, 7.5, 288.15).value,
0.504528432, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(110, 1013.25, 7.5, 288.15).value,
0.514423584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(111, 1013.25, 7.5, 288.15).value,
0.524452741, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(112, 1013.25, 7.5, 288.15).value,
0.5346187, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(113, 1013.25, 7.5, 288.15).value,
0.54492445, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(114, 1013.25, 7.5, 288.15).value,
0.555373195, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(115, 1013.25, 7.5, 288.15).value,
0.565968366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(116, 1013.25, 7.5, 288.15).value,
0.576713646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(117, 1013.25, 7.5, 288.15).value,
0.58761299, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(118, 1013.25, 7.5, 288.15).value,
0.598670654, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(119, 1013.25, 7.5, 288.15).value,
0.609891221, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(120, 1013.25, 7.5, 288.15).value,
0.621279631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(121, 1013.25, 7.5, 288.15).value,
0.63284122, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(122, 1013.25, 7.5, 288.15).value,
0.644581758, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(123, 1013.25, 7.5, 288.15).value,
0.656507491, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(124, 1013.25, 7.5, 288.15).value,
0.668625191, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(125, 1013.25, 7.5, 288.15).value,
0.680942215, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(126, 1013.25, 7.5, 288.15).value,
0.69346656, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(127, 1013.25, 7.5, 288.15).value,
0.70620694, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(128, 1013.25, 7.5, 288.15).value,
0.719172861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(129, 1013.25, 7.5, 288.15).value,
0.73237471, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(130, 1013.25, 7.5, 288.15).value,
0.745823861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(131, 1013.25, 7.5, 288.15).value,
0.759532783, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(132, 1013.25, 7.5, 288.15).value,
0.773515178, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(133, 1013.25, 7.5, 288.15).value,
0.787786128, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(134, 1013.25, 7.5, 288.15).value,
0.802362262, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(135, 1013.25, 7.5, 288.15).value,
0.817261961, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(136, 1013.25, 7.5, 288.15).value,
0.832505575, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(137, 1013.25, 7.5, 288.15).value,
0.848115693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(138, 1013.25, 7.5, 288.15).value,
0.864117433, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(139, 1013.25, 7.5, 288.15).value,
0.880538802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(140, 1013.25, 7.5, 288.15).value,
0.897411097, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(141, 1013.25, 7.5, 288.15).value,
0.914769381, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(142, 1013.25, 7.5, 288.15).value,
0.93265304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(143, 1013.25, 7.5, 288.15).value,
0.951106434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(144, 1013.25, 7.5, 288.15).value,
0.970179674, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(145, 1013.25, 7.5, 288.15).value,
0.989929528, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(146, 1013.25, 7.5, 288.15).value,
1.010420514, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(147, 1013.25, 7.5, 288.15).value,
1.03172619, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(148, 1013.25, 7.5, 288.15).value,
1.053930717, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(149, 1013.25, 7.5, 288.15).value,
1.077130727, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(150, 1013.25, 7.5, 288.15).value,
1.101437596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(151, 1013.25, 7.5, 288.15).value,
1.126980206, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(152, 1013.25, 7.5, 288.15).value,
1.153908327, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(153, 1013.25, 7.5, 288.15).value,
1.182396776, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(154, 1013.25, 7.5, 288.15).value,
1.212650574, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(155, 1013.25, 7.5, 288.15).value,
1.244911365, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(156, 1013.25, 7.5, 288.15).value,
1.279465482, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(157, 1013.25, 7.5, 288.15).value,
1.316654129, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(158, 1013.25, 7.5, 288.15).value,
1.356886363, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(159, 1013.25, 7.5, 288.15).value,
1.400655759, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(160, 1013.25, 7.5, 288.15).value,
1.448562004, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(161, 1013.25, 7.5, 288.15).value,
1.501339131, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(162, 1013.25, 7.5, 288.15).value,
1.559892824, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(163, 1013.25, 7.5, 288.15).value,
1.625350216, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(164, 1013.25, 7.5, 288.15).value,
1.699127159, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(165, 1013.25, 7.5, 288.15).value,
1.783020212, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(166, 1013.25, 7.5, 288.15).value,
1.87933414, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(167, 1013.25, 7.5, 288.15).value,
1.991061177, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(168, 1013.25, 7.5, 288.15).value,
2.122137016, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(169, 1013.25, 7.5, 288.15).value,
2.277812508, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(170, 1013.25, 7.5, 288.15).value,
2.465203115, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(171, 1013.25, 7.5, 288.15).value,
2.694116678, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(172, 1013.25, 7.5, 288.15).value,
2.978325696, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(173, 1013.25, 7.5, 288.15).value,
3.337563176, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(174, 1013.25, 7.5, 288.15).value,
3.80071648, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(175, 1013.25, 7.5, 288.15).value,
4.411026238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(176, 1013.25, 7.5, 288.15).value,
5.23462829, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(177, 1013.25, 7.5, 288.15).value,
6.374446918, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(178, 1013.25, 7.5, 288.15).value,
7.991434174, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(179, 1013.25, 7.5, 288.15).value,
10.33006475, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(180, 1013.25, 7.5, 288.15).value,
13.71659631, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(181, 1013.25, 7.5, 288.15).value,
18.39188186, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(182, 1013.25, 7.5, 288.15).value,
23.83194406, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(183, 1013.25, 7.5, 288.15).value,
27.67449812, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(184, 1013.25, 7.5, 288.15).value,
27.03213321, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(185, 1013.25, 7.5, 288.15).value,
22.60135009, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(186, 1013.25, 7.5, 288.15).value,
17.47071693, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(187, 1013.25, 7.5, 288.15).value,
13.32603388, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(188, 1013.25, 7.5, 288.15).value,
10.35715037, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(189, 1013.25, 7.5, 288.15).value,
8.290514155, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(190, 1013.25, 7.5, 288.15).value,
6.842342894, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(191, 1013.25, 7.5, 288.15).value,
5.808188543, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(192, 1013.25, 7.5, 288.15).value,
5.053346248, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(193, 1013.25, 7.5, 288.15).value,
4.490555513, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(194, 1013.25, 7.5, 288.15).value,
4.062799398, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(195, 1013.25, 7.5, 288.15).value,
3.73214099, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(196, 1013.25, 7.5, 288.15).value,
3.472798768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(197, 1013.25, 7.5, 288.15).value,
3.266872193, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(198, 1013.25, 7.5, 288.15).value,
3.101674612, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(199, 1013.25, 7.5, 288.15).value,
2.968040373, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(200, 1013.25, 7.5, 288.15).value,
2.859229328, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(201, 1013.25, 7.5, 288.15).value,
2.77020383, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(202, 1013.25, 7.5, 288.15).value,
2.697142323, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(203, 1013.25, 7.5, 288.15).value,
2.637106098, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(204, 1013.25, 7.5, 288.15).value,
2.587807034, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(205, 1013.25, 7.5, 288.15).value,
2.547443114, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(206, 1013.25, 7.5, 288.15).value,
2.514580199, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(207, 1013.25, 7.5, 288.15).value,
2.488065853, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(208, 1013.25, 7.5, 288.15).value,
2.466965732, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(209, 1013.25, 7.5, 288.15).value,
2.450516051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(210, 1013.25, 7.5, 288.15).value,
2.43808768, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(211, 1013.25, 7.5, 288.15).value,
2.429158726, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(212, 1013.25, 7.5, 288.15).value,
2.423293411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(213, 1013.25, 7.5, 288.15).value,
2.420125627, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(214, 1013.25, 7.5, 288.15).value,
2.419346051, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(215, 1013.25, 7.5, 288.15).value,
2.420691947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(216, 1013.25, 7.5, 288.15).value,
2.423939044, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(217, 1013.25, 7.5, 288.15).value,
2.428895021, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(218, 1013.25, 7.5, 288.15).value,
2.435394244, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(219, 1013.25, 7.5, 288.15).value,
2.443293492, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(220, 1013.25, 7.5, 288.15).value,
2.452468459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(221, 1013.25, 7.5, 288.15).value,
2.462810881, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(222, 1013.25, 7.5, 288.15).value,
2.474226165, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(223, 1013.25, 7.5, 288.15).value,
2.486631411, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(224, 1013.25, 7.5, 288.15).value,
2.499953772, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(225, 1013.25, 7.5, 288.15).value,
2.514129072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(226, 1013.25, 7.5, 288.15).value,
2.529100646, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(227, 1013.25, 7.5, 288.15).value,
2.544818361, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(228, 1013.25, 7.5, 288.15).value,
2.561237781, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(229, 1013.25, 7.5, 288.15).value,
2.578319459, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(230, 1013.25, 7.5, 288.15).value,
2.596028333, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(231, 1013.25, 7.5, 288.15).value,
2.614333207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(232, 1013.25, 7.5, 288.15).value,
2.633206307, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(233, 1013.25, 7.5, 288.15).value,
2.652622892, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(234, 1013.25, 7.5, 288.15).value,
2.672560927, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(235, 1013.25, 7.5, 288.15).value,
2.693000794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(236, 1013.25, 7.5, 288.15).value,
2.713925039, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(237, 1013.25, 7.5, 288.15).value,
2.735318161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(238, 1013.25, 7.5, 288.15).value,
2.757166418, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(239, 1013.25, 7.5, 288.15).value,
2.779457666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(240, 1013.25, 7.5, 288.15).value,
2.80218121, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(241, 1013.25, 7.5, 288.15).value,
2.825327683, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(242, 1013.25, 7.5, 288.15).value,
2.848888936, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(243, 1013.25, 7.5, 288.15).value,
2.87285794, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(244, 1013.25, 7.5, 288.15).value,
2.897228704, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(245, 1013.25, 7.5, 288.15).value,
2.921996202, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(246, 1013.25, 7.5, 288.15).value,
2.947156311, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(247, 1013.25, 7.5, 288.15).value,
2.972705756, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(248, 1013.25, 7.5, 288.15).value,
2.998642066, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(249, 1013.25, 7.5, 288.15).value,
3.024963531, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(250, 1013.25, 7.5, 288.15).value,
3.051669175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(251, 1013.25, 7.5, 288.15).value,
3.078758722, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(252, 1013.25, 7.5, 288.15).value,
3.106232585, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(253, 1013.25, 7.5, 288.15).value,
3.13409184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(254, 1013.25, 7.5, 288.15).value,
3.162338224, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(255, 1013.25, 7.5, 288.15).value,
3.190974123, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(256, 1013.25, 7.5, 288.15).value,
3.220002576, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(257, 1013.25, 7.5, 288.15).value,
3.249427276, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(258, 1013.25, 7.5, 288.15).value,
3.27925258, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(259, 1013.25, 7.5, 288.15).value,
3.309483522, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(260, 1013.25, 7.5, 288.15).value,
3.340125831, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(261, 1013.25, 7.5, 288.15).value,
3.371185956, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(262, 1013.25, 7.5, 288.15).value,
3.402671096, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(263, 1013.25, 7.5, 288.15).value,
3.434589233, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(264, 1013.25, 7.5, 288.15).value,
3.466949175, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(265, 1013.25, 7.5, 288.15).value,
3.499760606, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(266, 1013.25, 7.5, 288.15).value,
3.533034141, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(267, 1013.25, 7.5, 288.15).value,
3.566781392, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(268, 1013.25, 7.5, 288.15).value,
3.601015043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(269, 1013.25, 7.5, 288.15).value,
3.635748934, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(270, 1013.25, 7.5, 288.15).value,
3.670998161, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(271, 1013.25, 7.5, 288.15).value,
3.706779184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(272, 1013.25, 7.5, 288.15).value,
3.743109957, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(273, 1013.25, 7.5, 288.15).value,
3.780010072, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(274, 1013.25, 7.5, 288.15).value,
3.817500924, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(275, 1013.25, 7.5, 288.15).value,
3.855605898, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(276, 1013.25, 7.5, 288.15).value,
3.894350591, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(277, 1013.25, 7.5, 288.15).value,
3.933763053, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(278, 1013.25, 7.5, 288.15).value,
3.97387408, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(279, 1013.25, 7.5, 288.15).value,
4.014717535, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(280, 1013.25, 7.5, 288.15).value,
4.056330735, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(281, 1013.25, 7.5, 288.15).value,
4.098754887, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(282, 1013.25, 7.5, 288.15).value,
4.142035602, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(283, 1013.25, 7.5, 288.15).value,
4.186223487, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(284, 1013.25, 7.5, 288.15).value,
4.231374849, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(285, 1013.25, 7.5, 288.15).value,
4.277552506, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(286, 1013.25, 7.5, 288.15).value,
4.324826757, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(287, 1013.25, 7.5, 288.15).value,
4.373276518, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(288, 1013.25, 7.5, 288.15).value,
4.422990681, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(289, 1013.25, 7.5, 288.15).value,
4.474069728, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(290, 1013.25, 7.5, 288.15).value,
4.526627666, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(291, 1013.25, 7.5, 288.15).value,
4.580794366, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(292, 1013.25, 7.5, 288.15).value,
4.63671838, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(293, 1013.25, 7.5, 288.15).value,
4.694570386, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(294, 1013.25, 7.5, 288.15).value,
4.754547391, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(295, 1013.25, 7.5, 288.15).value,
4.816877916, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(296, 1013.25, 7.5, 288.15).value,
4.881828419, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(297, 1013.25, 7.5, 288.15).value,
4.949711312, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(298, 1013.25, 7.5, 288.15).value,
5.020895036, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(299, 1013.25, 7.5, 288.15).value,
5.095816817, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(300, 1013.25, 7.5, 288.15).value,
5.174998967, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(301, 1013.25, 7.5, 288.15).value,
5.259069863, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(302, 1013.25, 7.5, 288.15).value,
5.348791238, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(303, 1013.25, 7.5, 288.15).value,
5.445094008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(304, 1013.25, 7.5, 288.15).value,
5.549125837, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(305, 1013.25, 7.5, 288.15).value,
5.662315008, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(306, 1013.25, 7.5, 288.15).value,
5.786457272, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(307, 1013.25, 7.5, 288.15).value,
5.923835584, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(308, 1013.25, 7.5, 288.15).value,
6.077387588, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(309, 1013.25, 7.5, 288.15).value,
6.250943769, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(310, 1013.25, 7.5, 288.15).value,
6.449572043, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(311, 1013.25, 7.5, 288.15).value,
6.6800861, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(312, 1013.25, 7.5, 288.15).value,
6.951811263, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(313, 1013.25, 7.5, 288.15).value,
7.277764947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(314, 1013.25, 7.5, 288.15).value,
7.676520947, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(315, 1013.25, 7.5, 288.15).value,
8.175226571, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(316, 1013.25, 7.5, 288.15).value,
8.814587905, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(317, 1013.25, 7.5, 288.15).value,
9.657150573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(318, 1013.25, 7.5, 288.15).value,
10.80040832, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(319, 1013.25, 7.5, 288.15).value,
12.39287203, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(320, 1013.25, 7.5, 288.15).value,
14.63291434, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(321, 1013.25, 7.5, 288.15).value,
17.71848802, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(322, 1013.25, 7.5, 288.15).value,
21.89833011, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(323, 1013.25, 7.5, 288.15).value,
27.52921207, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(324, 1013.25, 7.5, 288.15).value,
33.93584273, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(325, 1013.25, 7.5, 288.15).value,
37.82487596, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(326, 1013.25, 7.5, 288.15).value,
35.8615979, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(327, 1013.25, 7.5, 288.15).value,
29.89188489, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(328, 1013.25, 7.5, 288.15).value,
23.80724266, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(329, 1013.25, 7.5, 288.15).value,
19.19466647, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(330, 1013.25, 7.5, 288.15).value,
16.01196137, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(331, 1013.25, 7.5, 288.15).value,
13.85529573, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(332, 1013.25, 7.5, 288.15).value,
12.38126427, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(333, 1013.25, 7.5, 288.15).value,
11.35803945, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(334, 1013.25, 7.5, 288.15).value,
10.63752623, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(335, 1013.25, 7.5, 288.15).value,
10.12544, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(336, 1013.25, 7.5, 288.15).value,
9.760866789, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(337, 1013.25, 7.5, 288.15).value,
9.503637932, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(338, 1013.25, 7.5, 288.15).value,
9.326698304, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(339, 1013.25, 7.5, 288.15).value,
9.211467317, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(340, 1013.25, 7.5, 288.15).value,
9.144973785, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(341, 1013.25, 7.5, 288.15).value,
9.118051719, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(342, 1013.25, 7.5, 288.15).value,
9.124181675, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(343, 1013.25, 7.5, 288.15).value,
9.158733184, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(344, 1013.25, 7.5, 288.15).value,
9.218462001, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(345, 1013.25, 7.5, 288.15).value,
9.301173194, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(346, 1013.25, 7.5, 288.15).value,
9.405494946, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(347, 1013.25, 7.5, 288.15).value,
9.53072851, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(348, 1013.25, 7.5, 288.15).value,
9.676752463, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(349, 1013.25, 7.5, 288.15).value,
9.843967537, places=5)
self.assertAlmostEqual(
models.itu676.gammaw_approx(350, 1013.25, 7.5, 288.15).value,
10.03327368, places=5)
def test_gamma0_approx(self):
self.assertAlmostEqual(
models.itu676.gamma0_approx(1, 1013.25, 7.5, 288.15).value,
0.005388658, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(2, 1013.25, 7.5, 288.15).value,
0.006716038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(3, 1013.25, 7.5, 288.15).value,
0.00707596, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(4, 1013.25, 7.5, 288.15).value,
0.007258969, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(5, 1013.25, 7.5, 288.15).value,
0.007400426, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(6, 1013.25, 7.5, 288.15).value,
0.007537212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(7, 1013.25, 7.5, 288.15).value,
0.007682905, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(8, 1013.25, 7.5, 288.15).value,
0.007843794, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(9, 1013.25, 7.5, 288.15).value,
0.008023466, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(10, 1013.25, 7.5, 288.15).value,
0.008224416, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(11, 1013.25, 7.5, 288.15).value,
0.008448705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(12, 1013.25, 7.5, 288.15).value,
0.008698263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(13, 1013.25, 7.5, 288.15).value,
0.008975056, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(14, 1013.25, 7.5, 288.15).value,
0.009281177, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(15, 1013.25, 7.5, 288.15).value,
0.009618923, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(16, 1013.25, 7.5, 288.15).value,
0.009990845, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(17, 1013.25, 7.5, 288.15).value,
0.010399811, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(18, 1013.25, 7.5, 288.15).value,
0.010849054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(19, 1013.25, 7.5, 288.15).value,
0.011342243, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(20, 1013.25, 7.5, 288.15).value,
0.011883547, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(21, 1013.25, 7.5, 288.15).value,
0.012477725, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(22, 1013.25, 7.5, 288.15).value,
0.013130219, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(23, 1013.25, 7.5, 288.15).value,
0.013847273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(24, 1013.25, 7.5, 288.15).value,
0.014636078, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(25, 1013.25, 7.5, 288.15).value,
0.015504937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(26, 1013.25, 7.5, 288.15).value,
0.016463481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(27, 1013.25, 7.5, 288.15).value,
0.017522921, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(28, 1013.25, 7.5, 288.15).value,
0.018696367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(29, 1013.25, 7.5, 288.15).value,
0.019999221, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(30, 1013.25, 7.5, 288.15).value,
0.021449673, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(31, 1013.25, 7.5, 288.15).value,
0.023069328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(32, 1013.25, 7.5, 288.15).value,
0.024883993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(33, 1013.25, 7.5, 288.15).value,
0.026924712, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(34, 1013.25, 7.5, 288.15).value,
0.029229084, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(35, 1013.25, 7.5, 288.15).value,
0.031843013, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(36, 1013.25, 7.5, 288.15).value,
0.034823023, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(37, 1013.25, 7.5, 288.15).value,
0.038239374, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(38, 1013.25, 7.5, 288.15).value,
0.042180317, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(39, 1013.25, 7.5, 288.15).value,
0.046757999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(40, 1013.25, 7.5, 288.15).value,
0.052116797, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(41, 1013.25, 7.5, 288.15).value,
0.058445339, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(42, 1013.25, 7.5, 288.15).value,
0.065994232, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(43, 1013.25, 7.5, 288.15).value,
0.075102941, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(44, 1013.25, 7.5, 288.15).value,
0.086241846, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(45, 1013.25, 7.5, 288.15).value,
0.100080659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(46, 1013.25, 7.5, 288.15).value,
0.117605188, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(47, 1013.25, 7.5, 288.15).value,
0.140329657, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(48, 1013.25, 7.5, 288.15).value,
0.170719546, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(49, 1013.25, 7.5, 288.15).value,
0.213175063, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(50, 1013.25, 7.5, 288.15).value,
0.277268297, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(51, 1013.25, 7.5, 288.15).value,
0.389670239, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(52, 1013.25, 7.5, 288.15).value,
0.618429331, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(53, 1013.25, 7.5, 288.15).value,
1.126611463, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(54, 1013.25, 7.5, 288.15).value,
2.211541194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(55, 1013.25, 7.5, 288.15).value,
4.193281287, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(56, 1013.25, 7.5, 288.15).value,
7.055044748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(57, 1013.25, 7.5, 288.15).value,
10.0652395, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(58, 1013.25, 7.5, 288.15).value,
12.35314971, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(59, 1013.25, 7.5, 288.15).value,
13.63529754, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(60, 1013.25, 7.5, 288.15).value,
14.62347701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(61, 1013.25, 7.5, 288.15).value,
15.00716194, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(62, 1013.25, 7.5, 288.15).value,
13.99621411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(63, 1013.25, 7.5, 288.15).value,
10.83108919, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(64, 1013.25, 7.5, 288.15).value,
6.844588337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(65, 1013.25, 7.5, 288.15).value,
3.80880229, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(66, 1013.25, 7.5, 288.15).value,
1.966616477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(67, 1013.25, 7.5, 288.15).value,
1.033387448, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(68, 1013.25, 7.5, 288.15).value,
0.60546544, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(69, 1013.25, 7.5, 288.15).value,
0.406984877, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(70, 1013.25, 7.5, 288.15).value,
0.304104518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(71, 1013.25, 7.5, 288.15).value,
0.24160024, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(72, 1013.25, 7.5, 288.15).value,
0.198531458, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(73, 1013.25, 7.5, 288.15).value,
0.167045465, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(74, 1013.25, 7.5, 288.15).value,
0.143141978, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(75, 1013.25, 7.5, 288.15).value,
0.124484922, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(76, 1013.25, 7.5, 288.15).value,
0.109604088, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(77, 1013.25, 7.5, 288.15).value,
0.09752563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(78, 1013.25, 7.5, 288.15).value,
0.087579095, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(79, 1013.25, 7.5, 288.15).value,
0.079288509, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(80, 1013.25, 7.5, 288.15).value,
0.072307337, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(81, 1013.25, 7.5, 288.15).value,
0.066377906, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(82, 1013.25, 7.5, 288.15).value,
0.061305161, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(83, 1013.25, 7.5, 288.15).value,
0.05693918, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(84, 1013.25, 7.5, 288.15).value,
0.053163238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(85, 1013.25, 7.5, 288.15).value,
0.049885471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(86, 1013.25, 7.5, 288.15).value,
0.047032946, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(87, 1013.25, 7.5, 288.15).value,
0.044547391, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(88, 1013.25, 7.5, 288.15).value,
0.042382069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(89, 1013.25, 7.5, 288.15).value,
0.040499471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(90, 1013.25, 7.5, 288.15).value,
0.038869622, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(91, 1013.25, 7.5, 288.15).value,
0.037468818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(92, 1013.25, 7.5, 288.15).value,
0.036278727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(93, 1013.25, 7.5, 288.15).value,
0.035285753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(94, 1013.25, 7.5, 288.15).value,
0.034480646, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(95, 1013.25, 7.5, 288.15).value,
0.033858315, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(96, 1013.25, 7.5, 288.15).value,
0.033417859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(97, 1013.25, 7.5, 288.15).value,
0.033162815, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(98, 1013.25, 7.5, 288.15).value,
0.033101677, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(99, 1013.25, 7.5, 288.15).value,
0.033248738, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(100, 1013.25, 7.5, 288.15).value,
0.033625377, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(101, 1013.25, 7.5, 288.15).value,
0.034261951, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(102, 1013.25, 7.5, 288.15).value,
0.03520058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(103, 1013.25, 7.5, 288.15).value,
0.036499225, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(104, 1013.25, 7.5, 288.15).value,
0.038237778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(105, 1013.25, 7.5, 288.15).value,
0.040527282, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(106, 1013.25, 7.5, 288.15).value,
0.043524209, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(107, 1013.25, 7.5, 288.15).value,
0.047453183, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(108, 1013.25, 7.5, 288.15).value,
0.05264422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(109, 1013.25, 7.5, 288.15).value,
0.059596011, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(110, 1013.25, 7.5, 288.15).value,
0.069087844, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(111, 1013.25, 7.5, 288.15).value,
0.082387054, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(112, 1013.25, 7.5, 288.15).value,
0.101654574, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(113, 1013.25, 7.5, 288.15).value,
0.130786962, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(114, 1013.25, 7.5, 288.15).value,
0.177281273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(115, 1013.25, 7.5, 288.15).value,
0.25660834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(116, 1013.25, 7.5, 288.15).value,
0.402453591, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(117, 1013.25, 7.5, 288.15).value,
0.683016431, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(118, 1013.25, 7.5, 288.15).value,
1.134866447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(119, 1013.25, 7.5, 288.15).value,
1.306379447, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(120, 1013.25, 7.5, 288.15).value,
0.886108944, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(121, 1013.25, 7.5, 288.15).value,
0.509171816, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(122, 1013.25, 7.5, 288.15).value,
0.307768488, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(123, 1013.25, 7.5, 288.15).value,
0.202100995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(124, 1013.25, 7.5, 288.15).value,
0.142570138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(125, 1013.25, 7.5, 288.15).value,
0.106445548, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(126, 1013.25, 7.5, 288.15).value,
0.083103974, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(127, 1013.25, 7.5, 288.15).value,
0.067232038, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(128, 1013.25, 7.5, 288.15).value,
0.055982184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(129, 1013.25, 7.5, 288.15).value,
0.047732332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(130, 1013.25, 7.5, 288.15).value,
0.041509034, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(131, 1013.25, 7.5, 288.15).value,
0.036701642, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(132, 1013.25, 7.5, 288.15).value,
0.032912343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(133, 1013.25, 7.5, 288.15).value,
0.029873422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(134, 1013.25, 7.5, 288.15).value,
0.027399581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(135, 1013.25, 7.5, 288.15).value,
0.025359375, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(136, 1013.25, 7.5, 288.15).value,
0.02365753, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(137, 1013.25, 7.5, 288.15).value,
0.022223652, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(138, 1013.25, 7.5, 288.15).value,
0.021004834, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(139, 1013.25, 7.5, 288.15).value,
0.019960701, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(140, 1013.25, 7.5, 288.15).value,
0.019060012, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(141, 1013.25, 7.5, 288.15).value,
0.018278288, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(142, 1013.25, 7.5, 288.15).value,
0.017596128, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(143, 1013.25, 7.5, 288.15).value,
0.016997997, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(144, 1013.25, 7.5, 288.15).value,
0.016471333, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(145, 1013.25, 7.5, 288.15).value,
0.016005887, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(146, 1013.25, 7.5, 288.15).value,
0.015593233, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(147, 1013.25, 7.5, 288.15).value,
0.015226386, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(148, 1013.25, 7.5, 288.15).value,
0.014899516, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(149, 1013.25, 7.5, 288.15).value,
0.014607727, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(150, 1013.25, 7.5, 288.15).value,
0.01434688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(151, 1013.25, 7.5, 288.15).value,
0.014113455, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(152, 1013.25, 7.5, 288.15).value,
0.013904444, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(153, 1013.25, 7.5, 288.15).value,
0.013717263, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(154, 1013.25, 7.5, 288.15).value,
0.013549678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(155, 1013.25, 7.5, 288.15).value,
0.013399755, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(156, 1013.25, 7.5, 288.15).value,
0.013265808, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(157, 1013.25, 7.5, 288.15).value,
0.013146362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(158, 1013.25, 7.5, 288.15).value,
0.013040122, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(159, 1013.25, 7.5, 288.15).value,
0.012945947, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(160, 1013.25, 7.5, 288.15).value,
0.012862828, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(161, 1013.25, 7.5, 288.15).value,
0.012789869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(162, 1013.25, 7.5, 288.15).value,
0.012726273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(163, 1013.25, 7.5, 288.15).value,
0.012671328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(164, 1013.25, 7.5, 288.15).value,
0.012624397, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(165, 1013.25, 7.5, 288.15).value,
0.012584907, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(166, 1013.25, 7.5, 288.15).value,
0.012552343, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(167, 1013.25, 7.5, 288.15).value,
0.012526238, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(168, 1013.25, 7.5, 288.15).value,
0.012506174, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(169, 1013.25, 7.5, 288.15).value,
0.012491766, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(170, 1013.25, 7.5, 288.15).value,
0.012482668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(171, 1013.25, 7.5, 288.15).value,
0.012478563, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(172, 1013.25, 7.5, 288.15).value,
0.012479162, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(173, 1013.25, 7.5, 288.15).value,
0.012484201, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(174, 1013.25, 7.5, 288.15).value,
0.012493438, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(175, 1013.25, 7.5, 288.15).value,
0.01250665, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(176, 1013.25, 7.5, 288.15).value,
0.012523632, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(177, 1013.25, 7.5, 288.15).value,
0.012544196, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(178, 1013.25, 7.5, 288.15).value,
0.012568166, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(179, 1013.25, 7.5, 288.15).value,
0.012595383, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(180, 1013.25, 7.5, 288.15).value,
0.012625696, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(181, 1013.25, 7.5, 288.15).value,
0.012658968, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(182, 1013.25, 7.5, 288.15).value,
0.01269507, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(183, 1013.25, 7.5, 288.15).value,
0.012733882, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(184, 1013.25, 7.5, 288.15).value,
0.012775293, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(185, 1013.25, 7.5, 288.15).value,
0.012819198, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(186, 1013.25, 7.5, 288.15).value,
0.012865502, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(187, 1013.25, 7.5, 288.15).value,
0.012914112, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(188, 1013.25, 7.5, 288.15).value,
0.012964945, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(189, 1013.25, 7.5, 288.15).value,
0.01301792, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(190, 1013.25, 7.5, 288.15).value,
0.013072963, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(191, 1013.25, 7.5, 288.15).value,
0.013130004, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(192, 1013.25, 7.5, 288.15).value,
0.013188976, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(193, 1013.25, 7.5, 288.15).value,
0.013249818, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(194, 1013.25, 7.5, 288.15).value,
0.013312471, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(195, 1013.25, 7.5, 288.15).value,
0.01337688, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(196, 1013.25, 7.5, 288.15).value,
0.013442993, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(197, 1013.25, 7.5, 288.15).value,
0.01351076, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(198, 1013.25, 7.5, 288.15).value,
0.013580135, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(199, 1013.25, 7.5, 288.15).value,
0.013651074, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(200, 1013.25, 7.5, 288.15).value,
0.013723536, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(201, 1013.25, 7.5, 288.15).value,
0.01379748, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(202, 1013.25, 7.5, 288.15).value,
0.013872869, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(203, 1013.25, 7.5, 288.15).value,
0.013949668, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(204, 1013.25, 7.5, 288.15).value,
0.014027843, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(205, 1013.25, 7.5, 288.15).value,
0.014107361, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(206, 1013.25, 7.5, 288.15).value,
0.014188192, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(207, 1013.25, 7.5, 288.15).value,
0.014270307, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(208, 1013.25, 7.5, 288.15).value,
0.014353678, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(209, 1013.25, 7.5, 288.15).value,
0.014438278, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(210, 1013.25, 7.5, 288.15).value,
0.014524083, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(211, 1013.25, 7.5, 288.15).value,
0.014611069, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(212, 1013.25, 7.5, 288.15).value,
0.014699212, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(213, 1013.25, 7.5, 288.15).value,
0.01478849, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(214, 1013.25, 7.5, 288.15).value,
0.014878883, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(215, 1013.25, 7.5, 288.15).value,
0.01497037, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(216, 1013.25, 7.5, 288.15).value,
0.015062932, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(217, 1013.25, 7.5, 288.15).value,
0.015156551, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(218, 1013.25, 7.5, 288.15).value,
0.01525121, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(219, 1013.25, 7.5, 288.15).value,
0.015346891, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(220, 1013.25, 7.5, 288.15).value,
0.015443579, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(221, 1013.25, 7.5, 288.15).value,
0.015541258, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(222, 1013.25, 7.5, 288.15).value,
0.015639912, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(223, 1013.25, 7.5, 288.15).value,
0.015739529, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(224, 1013.25, 7.5, 288.15).value,
0.015840094, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(225, 1013.25, 7.5, 288.15).value,
0.015941595, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(226, 1013.25, 7.5, 288.15).value,
0.016044018, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(227, 1013.25, 7.5, 288.15).value,
0.016147352, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(228, 1013.25, 7.5, 288.15).value,
0.016251585, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(229, 1013.25, 7.5, 288.15).value,
0.016356706, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(230, 1013.25, 7.5, 288.15).value,
0.016462705, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(231, 1013.25, 7.5, 288.15).value,
0.016569571, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(232, 1013.25, 7.5, 288.15).value,
0.016677295, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(233, 1013.25, 7.5, 288.15).value,
0.016785866, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(234, 1013.25, 7.5, 288.15).value,
0.016895277, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(235, 1013.25, 7.5, 288.15).value,
0.017005518, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(236, 1013.25, 7.5, 288.15).value,
0.017116581, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(237, 1013.25, 7.5, 288.15).value,
0.017228459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(238, 1013.25, 7.5, 288.15).value,
0.017341142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(239, 1013.25, 7.5, 288.15).value,
0.017454625, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(240, 1013.25, 7.5, 288.15).value,
0.0175689, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(241, 1013.25, 7.5, 288.15).value,
0.01768396, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(242, 1013.25, 7.5, 288.15).value,
0.017799799, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(243, 1013.25, 7.5, 288.15).value,
0.017916411, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(244, 1013.25, 7.5, 288.15).value,
0.018033789, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(245, 1013.25, 7.5, 288.15).value,
0.018151929, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(246, 1013.25, 7.5, 288.15).value,
0.018270824, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(247, 1013.25, 7.5, 288.15).value,
0.01839047, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(248, 1013.25, 7.5, 288.15).value,
0.018510862, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(249, 1013.25, 7.5, 288.15).value,
0.018631995, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(250, 1013.25, 7.5, 288.15).value,
0.018753865, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(251, 1013.25, 7.5, 288.15).value,
0.018876467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(252, 1013.25, 7.5, 288.15).value,
0.018999798, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(253, 1013.25, 7.5, 288.15).value,
0.019123854, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(254, 1013.25, 7.5, 288.15).value,
0.019248631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(255, 1013.25, 7.5, 288.15).value,
0.019374127, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(256, 1013.25, 7.5, 288.15).value,
0.019500338, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(257, 1013.25, 7.5, 288.15).value,
0.019627261, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(258, 1013.25, 7.5, 288.15).value,
0.019754894, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(259, 1013.25, 7.5, 288.15).value,
0.019883235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(260, 1013.25, 7.5, 288.15).value,
0.02001228, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(261, 1013.25, 7.5, 288.15).value,
0.020142029, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(262, 1013.25, 7.5, 288.15).value,
0.02027248, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(263, 1013.25, 7.5, 288.15).value,
0.020403631, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(264, 1013.25, 7.5, 288.15).value,
0.020535481, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(265, 1013.25, 7.5, 288.15).value,
0.020668028, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(266, 1013.25, 7.5, 288.15).value,
0.020801273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(267, 1013.25, 7.5, 288.15).value,
0.020935214, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(268, 1013.25, 7.5, 288.15).value,
0.021069851, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(269, 1013.25, 7.5, 288.15).value,
0.021205184, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(270, 1013.25, 7.5, 288.15).value,
0.021341213, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(271, 1013.25, 7.5, 288.15).value,
0.021477939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(272, 1013.25, 7.5, 288.15).value,
0.021615362, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(273, 1013.25, 7.5, 288.15).value,
0.021753483, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(274, 1013.25, 7.5, 288.15).value,
0.021892304, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(275, 1013.25, 7.5, 288.15).value,
0.022031825, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(276, 1013.25, 7.5, 288.15).value,
0.02217205, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(277, 1013.25, 7.5, 288.15).value,
0.022312979, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(278, 1013.25, 7.5, 288.15).value,
0.022454615, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(279, 1013.25, 7.5, 288.15).value,
0.022596961, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(280, 1013.25, 7.5, 288.15).value,
0.022740019, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(281, 1013.25, 7.5, 288.15).value,
0.022883795, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(282, 1013.25, 7.5, 288.15).value,
0.02302829, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(283, 1013.25, 7.5, 288.15).value,
0.02317351, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(284, 1013.25, 7.5, 288.15).value,
0.023319459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(285, 1013.25, 7.5, 288.15).value,
0.023466142, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(286, 1013.25, 7.5, 288.15).value,
0.023613565, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(287, 1013.25, 7.5, 288.15).value,
0.023761733, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(288, 1013.25, 7.5, 288.15).value,
0.023910653, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(289, 1013.25, 7.5, 288.15).value,
0.024060332, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(290, 1013.25, 7.5, 288.15).value,
0.024210778, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(291, 1013.25, 7.5, 288.15).value,
0.024361999, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(292, 1013.25, 7.5, 288.15).value,
0.024514003, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(293, 1013.25, 7.5, 288.15).value,
0.024666801, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(294, 1013.25, 7.5, 288.15).value,
0.024820402, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(295, 1013.25, 7.5, 288.15).value,
0.024974817, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(296, 1013.25, 7.5, 288.15).value,
0.025130058, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(297, 1013.25, 7.5, 288.15).value,
0.025286138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(298, 1013.25, 7.5, 288.15).value,
0.025443071, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(299, 1013.25, 7.5, 288.15).value,
0.025600871, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(300, 1013.25, 7.5, 288.15).value,
0.025759555, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(301, 1013.25, 7.5, 288.15).value,
0.025919138, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(302, 1013.25, 7.5, 288.15).value,
0.026079639, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(303, 1013.25, 7.5, 288.15).value,
0.026241079, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(304, 1013.25, 7.5, 288.15).value,
0.026403477, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(305, 1013.25, 7.5, 288.15).value,
0.026566857, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(306, 1013.25, 7.5, 288.15).value,
0.026731244, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(307, 1013.25, 7.5, 288.15).value,
0.026896663, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(308, 1013.25, 7.5, 288.15).value,
0.027063143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(309, 1013.25, 7.5, 288.15).value,
0.027230715, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(310, 1013.25, 7.5, 288.15).value,
0.027399412, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(311, 1013.25, 7.5, 288.15).value,
0.02756927, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(312, 1013.25, 7.5, 288.15).value,
0.027740328, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(313, 1013.25, 7.5, 288.15).value,
0.027912629, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(314, 1013.25, 7.5, 288.15).value,
0.028086218, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(315, 1013.25, 7.5, 288.15).value,
0.028261145, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(316, 1013.25, 7.5, 288.15).value,
0.028437464, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(317, 1013.25, 7.5, 288.15).value,
0.028615235, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(318, 1013.25, 7.5, 288.15).value,
0.028794523, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(319, 1013.25, 7.5, 288.15).value,
0.028975399, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(320, 1013.25, 7.5, 288.15).value,
0.029157939, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(321, 1013.25, 7.5, 288.15).value,
0.029342231, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(322, 1013.25, 7.5, 288.15).value,
0.029528367, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(323, 1013.25, 7.5, 288.15).value,
0.029716451, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(324, 1013.25, 7.5, 288.15).value,
0.029906599, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(325, 1013.25, 7.5, 288.15).value,
0.030098937, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(326, 1013.25, 7.5, 288.15).value,
0.030293607, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(327, 1013.25, 7.5, 288.15).value,
0.030490765, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(328, 1013.25, 7.5, 288.15).value,
0.030690588, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(329, 1013.25, 7.5, 288.15).value,
0.030893273, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(330, 1013.25, 7.5, 288.15).value,
0.031099041, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(331, 1013.25, 7.5, 288.15).value,
0.031308143, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(332, 1013.25, 7.5, 288.15).value,
0.031520859, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(333, 1013.25, 7.5, 288.15).value,
0.031737512, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(334, 1013.25, 7.5, 288.15).value,
0.031958467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(335, 1013.25, 7.5, 288.15).value,
0.032184144, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(336, 1013.25, 7.5, 288.15).value,
0.032415022, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(337, 1013.25, 7.5, 288.15).value,
0.032651659, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(338, 1013.25, 7.5, 288.15).value,
0.032894698, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(339, 1013.25, 7.5, 288.15).value,
0.033144893, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(340, 1013.25, 7.5, 288.15).value,
0.033403124, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(341, 1013.25, 7.5, 288.15).value,
0.033670433, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(342, 1013.25, 7.5, 288.15).value,
0.033948053, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(343, 1013.25, 7.5, 288.15).value,
0.034237459, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(344, 1013.25, 7.5, 288.15).value,
0.034540422, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(345, 1013.25, 7.5, 288.15).value,
0.034859093, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(346, 1013.25, 7.5, 288.15).value,
0.035196097, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(347, 1013.25, 7.5, 288.15).value,
0.03555467, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(348, 1013.25, 7.5, 288.15).value,
0.03593884, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(349, 1013.25, 7.5, 288.15).value,
0.036353672, places=5)
self.assertAlmostEqual(
models.itu676.gamma0_approx(350, 1013.25, 7.5, 288.15).value,
0.036805605, places=5)
def test_zenit_water_vapour_attenuation(self):
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 14.25).value,
0.064981043, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 14.25).value,
0.070360091, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 14.25).value,
0.074660262, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 14.25).value,
0.06911297, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 14.25).value,
0.073434531, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 14.25).value,
0.080098077, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 14.25).value,
0.072394726, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 14.25).value,
0.075162715, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 14.25).value,
0.083750389, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 14.25).value,
0.074394064, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 14.25).value,
0.076695287, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 14.25).value,
0.086350752, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 1.0, 29).value,
0.305636526, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 1.0, 29).value,
0.331425898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 1.0, 29).value,
0.355205229, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.5, 29).value,
0.324977228, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.5, 29).value,
0.345830132, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.5, 29).value,
0.38091961, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.3, 29).value,
0.340327583, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.3, 29).value,
0.353923317, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.3, 29).value,
0.398176611, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
51.5, -0.14, 0.2, 29).value,
0.349674822, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
41.9, 12.49, 0.2, 29).value,
0.361098289, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
33.94, 18.43, 0.2, 29).value,
0.410456469, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 14.25).value,
0.099820608, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 14.25).value,
0.118484695, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 14.25).value,
0.105446054, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 14.25).value,
0.12252307, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 14.25).value,
0.108812058, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 14.25).value,
0.125093339, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 14.25).value,
0.111441086, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 14.25).value,
0.127090376, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 1.0, 29).value,
0.473979935, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 1.0, 29).value,
0.561753331, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.5, 29).value,
0.500468518, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.5, 29).value,
0.580717641, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.3, 29).value,
0.516307047, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.3, 29).value,
0.592782098, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
22.9, -43.23, 0.2, 29).value,
0.528672179, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
25.78, -80.22, 0.2, 29).value,
0.602152942, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 14.25).value,
0.149156898, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 14.25).value,
0.121165007, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 14.25).value,
0.051589359, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 14.25).value,
0.153859398, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 14.25).value,
0.123550552, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 14.25).value,
0.052996133, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 14.25).value,
0.156616572, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 14.25).value,
0.125325192, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 14.25).value,
0.053871006, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 14.25).value,
0.158958354, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 14.25).value,
0.126766365, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 14.25).value,
0.054721343, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 1.0, 29).value,
0.683528163, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 1.0, 29).value,
0.555168022, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 1.0, 29).value,
0.188559832, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.5, 29).value,
0.704836196, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.5, 29).value,
0.565993797, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.5, 29).value,
0.193687836, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.3, 29).value,
0.717323975, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.3, 29).value,
0.574044911, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.3, 29).value,
0.19687619, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
28.717, 77.3, 0.2, 29).value,
0.727927181, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
3.133, 101.7, 0.2, 29).value,
0.580581723, places=5)
self.assertAlmostEqual(
models.itu676.zenit_water_vapour_attenuation(
9.05, 38.7, 0.2, 29).value,
0.19997458, places=5)
class ITUR836_6TestCase(test.TestCase):
def setUp(self):
models.itu836.change_version(6)
def surface_water_vapour_density(self):
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.1, 0.236104459).value,
22.93756598, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.15, 0.236104459).value,
22.80534575, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.3, 0.236104459).value,
22.55507955, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
3.133, 101.7, 0.35, 0.236104459).value,
22.49361957, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.1, 0).value,
21.59164912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.15, 0).value,
21.46164369, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.3, 0).value,
21.24753319, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
22.9, -43.23, 0.35, 0).value,
21.18676013, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.1, 0.247).value,
11.88170822, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.15, 0.247).value,
11.61777268, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.3, 0.247).value,
11.12235912, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
23, 30, 0.35, 0.247).value,
11.00877052, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.1, 7.51071e-05).value,
23.50748104, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.15, 7.51071e-05).value,
23.34324475, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.3, 7.51071e-05).value,
23.06574222, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
25.78, -80.22, 0.35, 7.51071e-05).value,
23.00327243, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.1, 0.217559455).value,
25.95287453, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.15, 0.217559455).value,
25.71217873, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.3, 0.217559455).value,
25.34018758, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
28.717, 77.3, 0.35, 0.217559455).value,
25.2557054, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.1, 0).value,
24.00156532, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.15, 0).value,
23.85987554, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.3, 0).value,
23.51464505, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
33.94, 18.43, 0.35, 0).value,
23.41954477, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.1, 0.056701045).value,
19.78501126, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.15, 0.056701045).value,
19.48948848, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.3, 0.056701045).value,
19.02450953, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
41.9, 12.49, 0.35, 0.056701045).value,
18.92055161, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.1, 0.069164224).value,
15.21351315, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.15, 0.069164224).value,
15.0172773, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.3, 0.069164224).value,
14.6189506, places=5)
self.assertAlmostEqual(
models.itu836.surface_water_vapour_density(
51.5, -0.14, 0.35, 0.069164224).value,
14.50640729, places=5)
def total_water_vapour_content(self):
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.1, 0.23610446).value,
62.16532093, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.15, 0.23610446).value,
61.59527521, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.3, 0.23610446).value,
60.58285243, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
3.133, 101.7, 0.35, 0.23610446).value,
60.35619302, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.1, 0.0).value,
56.38788554, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.15, 0.0).value,
55.36064664, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.3, 0.0).value,
53.4851113, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
22.9, -43.23, 0.35, 0.0).value,
53.03918259, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.1, 0.247).value,
38.47288189, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.15, 0.247).value,
37.21449337, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.3, 0.247).value,
34.63093178, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
23, 30, 0.35, 0.247).value,
34.06569649, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.1, 7.511e-05).value,
62.84315177, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.15, 7.511e-05).value,
61.95641322, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.3, 7.511e-05).value,
60.48487688, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
25.78, -80.22, 0.35, 7.511e-05).value,
60.1561742, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.1, 0.21755946).value,
75.44891006, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.15, 0.21755946).value,
74.79639702, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.3, 0.21755946).value,
73.40408393, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
28.717, 77.3, 0.35, 0.21755946).value,
73.07234727, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.1, 0.0).value,
45.19895208, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.15, 0.0).value,
44.15275162, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.3, 0.0).value,
42.21022387, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
33.94, 18.43, 0.35, 0.0).value,
41.69772633, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.1, 0.05670104).value,
39.93693588, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.15, 0.05670104).value,
39.33984158, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.3, 0.05670104).value,
38.19321515, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
41.9, 12.49, 0.35, 0.05670104).value,
37.94621912, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.1, 0.06916422).value,
39.23803432, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.15, 0.06916422).value,
38.41414987, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.3, 0.06916422).value,
36.88058222, places=5)
self.assertAlmostEqual(
models.itu836.total_water_vapour_content(
51.5, -0.14, 0.35, 0.06916422).value,
36.4074561, places=5)
class ITUR838_3TestCase(test.TestCase):
def setUp(self):
models.itu838.change_version(3)
def test_rain_specific_attenuation(self):
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 14.25, 30.87067768, 0).value,
1.879742, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 14.25, 40.97052773, 0).value,
3.630988, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 14.25, 47.91280491, 0).value,
3.503189, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
30.875024, 29.00, 30.87067768, 0).value,
5.814832, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
56.370009, 29.00, 40.97052773, 0).value,
10.157375, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
55.231625, 29.00, 47.91280491, 0).value,
9.846762, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 14.25, 59.81487174, 0).value,
3.628282, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 14.25, 49.20900369, 0).value,
5.948478, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
58.094216, 29.00, 59.81487174, 0).value,
10.132682, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
89.114103, 29.00, 49.20900369, 0).value,
15.460212, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 14.25, 55.90591362, 0).value,
3.603569, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 14.25, 67.76751981, 0).value,
6.06336, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 14.25, 38.14104832, 0).value,
3.523996, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
57.39623, 29.00, 55.90591362, 0).value,
10.078266, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
93.607098, 29.00, 67.76751981, 0).value,
15.712442, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
54.623411, 29.00, 38.14104832, 0).value,
9.904098, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 14.25, 31.07694309, 0).value,
1.581308489, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 14.25, 40.23202374, 0).value,
2.06173217, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 14.25, 46.35969261, 0).value,
1.592084199, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
26.48052, 29, 31.07694309, 0).value,
5.021802196, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
33.936232, 29, 40.23202374, 0).value,
6.278460355, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
27.13586832, 29, 46.35969261, 0).value,
5.031354793, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 14.25, 22.27833468, 0).value,
3.321396378, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 14.25, 52.6789929, 0).value,
5.11503455, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
50.639304, 29, 22.27833468, 0).value,
9.424302438, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
78.2994993, 29, 52.6789929, 0).value,
13.59290067, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 14.25, 48.23861222, 90).value,
3.72899602, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 14.25, 85.80767474, 90).value,
6.340652096, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 14.25, 20.14348033, 90).value,
2.350323497, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
63.62668149, 29, 48.23861222, 90).value,
10.28694456, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
99.13558978, 29, 85.80767474, 90).value,
16.31838263, places=5)
self.assertAlmostEqual(
models.itu838.rain_specific_attenuation(
42.91007183, 29, 20.14348033, 90).value,
6.833646475, places=5)
class ITUR837_6TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(6)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.500, 359.86, 0.01).value,
30.8750240, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.900, 12.49, 0.01).value,
56.3700090, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.940, 18.43, 0.01).value,
55.2316250, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.900, 316.77, 0.01).value,
58.0942160, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.780, 279.78, 0.01).value,
89.1141030, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.30, 0.01).value,
57.3962300, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.70, 0.01).value,
93.6070980, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(9.050, 38.70, 0.01).value,
54.6234110, places=5)
class ITUR837_7TestCase(test.TestCase):
def setUp(self):
models.itu837.change_version(7)
def test_rainfall_rate(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.1).value,
34.64798123, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.15).value,
27.7636201, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.3).value,
18.26254364, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.35).value,
16.49493229, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.1).value,
14.58963041, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.15).value,
11.00510082, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.3).value,
6.23796236, places=2)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.35).value,
5.38239642, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.1).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.15).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.3).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23, 30, 0.35).value,
0.0, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.1).value,
25.33888119, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.15).value,
19.86683577, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.3).value,
12.43676554, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.35).value,
11.07566126, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.1).value,
16.53857378, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.15).value,
12.04651363, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.3).value,
6.21600589, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.35).value,
5.19609765, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.1).value,
7.43193175, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.15).value,
5.53031864, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.3).value,
3.03506603, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.35).value,
2.59276061, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.1).value,
11.19798305, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.15).value,
8.88472572, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.3).value,
5.75356253, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.35).value,
5.18058827, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.1).value,
8.9924712, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.15).value,
7.17369312, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.3).value,
4.69033625, places=3)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.35).value,
4.23258601, places=3)
def test_rainfall_probability(self):
self.assertAlmostEqual(
models.itu837.rainfall_probability(3.133, 101.7).value,
4.53654368, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(22.9, -43.23).value,
1.41773353, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(23, 30).value,
0.00051911, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(25.78, -80.22).value,
2.90785192, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(28.717, 77.3).value,
1.07089363, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(33.94, 18.43).value,
1.27567391, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(41.9, 12.49).value,
5.26971907, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_probability(51.5, -0.14).value,
5.3615096, places=5)
def test_rainfall_rate_R001(self):
self.assertAlmostEqual(
models.itu837.rainfall_rate(3.133, 101.7, 0.01).value,
99.1481136, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(22.9, -43.23, 0.01).value,
50.639304, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(23.0, 30.0, 0.01).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(25.78, -80.22, 0.01).value,
78.2982928, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(28.717, 77.3, 0.01).value,
63.5972464, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(33.94, 18.43, 0.01).value,
27.1349664, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(41.9, 12.49, 0.01).value,
33.936232, places=5)
self.assertAlmostEqual(
models.itu837.rainfall_rate(51.5, -0.14, 0.01).value,
26.48052, places=5)
class ITUR839_4TestCase(test.TestCase):
def setUp(self):
models.itu839.change_version(4)
def test_isoterm_0_deg(self):
self.assertAlmostEqual(
models.itu839.isoterm_0(3.133, 101.7).value,
4.5979744, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(22.9, -43.23).value,
3.79877867, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(23, 30).value,
4.168, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(25.78, -80.22).value,
4.20946133, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(28.717, 77.3).value,
4.89820404, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(33.94, 18.43).value,
2.20330276, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(41.9, 12.49).value,
2.68749333, places=5)
self.assertAlmostEqual(
models.itu839.isoterm_0(51.5, -0.14).value,
2.09273333, places=5)
def test_rain_height(self):
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.500, 359.86).value,
2.4527330, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.900, 12.49).value,
3.0474930, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.940, 18.43).value,
2.5633030, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.900, 316.77).value,
4.1587790, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.780, 279.78).value,
4.5694610, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.30).value,
5.2582040, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.70).value,
4.9579740, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(9.050, 38.70).value,
4.7839070, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(3.133, 101.7).value,
4.9579744, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(22.9, -43.23).value,
4.15877867, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(23, 30).value,
4.528, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(25.78, -80.22).value,
4.56946133, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(28.717, 77.3).value,
5.25820404, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(33.94, 18.43).value,
2.56330276, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(41.9, 12.49).value,
3.04749333, places=5)
self.assertAlmostEqual(
models.itu839.rain_height(51.5, -0.14).value,
2.45273333, places=5)
class ITUR618_12TestCase(test.TestCase):
def setUp(self):
models.itu618.change_version(12)
models.itu453.change_version(12)
models.itu838.change_version(3)
models.itu836.change_version(5)
models.itu837.change_version(6)
models.itu840.change_version(6)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.38308757, 14.25, 30.870677680, 0.001, 0).value,
27.143007980, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.89479806, 14.25, 40.970527730, 0.1, 0).value,
37.386086000, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.71179484, 14.25, 47.912804910, 0.01, 0).value,
33.812795580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
71.44613350, 29, 40.970527730, 0.001, 0).value,
21.244470560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.87478397, 29, 47.912804910, 0.1, 0).value,
35.166125690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
39.07323323, 29, 40.970527730, 0.01, 0).value,
25.180145740, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.92489694, 14.25, 59.814871740, 0.1, 0).value,
41.798127850, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.96606559, 14.25, 49.209003690, 0.1, 0).value,
34.830206060, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.76053997, 14.25, 59.814871740, 0.01, 0).value,
36.168649690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.06938866, 14.25, 49.209003690, 0.01, 0).value,
28.803871260, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.00197384, 14.25, 59.814871740, 0.001, 0).value,
33.308530550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
32.74150676, 14.25, 49.209003690, 0.001, 0).value,
25.508227320, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
100.96022257, 29, 49.209003690, 0.001, 0).value,
20.365001500, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
20.43214239, 29, 59.814871740, 0.1, 0).value,
35.581135690, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
27.86774318, 29, 49.209003690, 0.1, 0).value,
28.745547830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.32024457, 29, 59.814871740, 0.01, 0).value,
30.303830010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
63.46384760, 29, 49.209003690, 0.01, 0).value,
23.046241580, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
73.05533363, 29, 59.814871740, 0.001, 0).value,
28.089155910, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
4.44533923, 14.25, 38.141048320, 0.1, 0).value,
35.652315760, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
11.06265445, 14.25, 38.141048320, 0.01, 0).value,
30.034285750, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
26.85402570, 14.25, 55.905913620, 0.001, 0).value,
29.993601830, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.84602116, 29, 55.905913620, 0.1, 0).value,
33.289964560, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
51.72271818, 29, 55.905913620, 0.01, 0).value,
27.480618010, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
53.61322867, 29, 38.141048320, 0.001, 0).value,
23.354012700, places=5)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
7.5572640, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
11.4735460, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
9.7117950, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.01, tau=0.00).value,
25.7166770, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.01, tau=0.00).value,
39.0732330, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.01, tau=0.00).value,
33.4169840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
12.7605400, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
18.0693890, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.01, tau=0.00).value,
46.3202450, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.01, tau=0.00).value,
63.4638480, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
14.1707990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
19.6617050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
11.0626540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.01, tau=0.00).value,
51.7227180, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.01, tau=0.00).value,
70.5396050, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.01, tau=0.00).value,
35.1160650, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
2.4567600, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
3.8947980, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
3.2920370, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=0.10, tau=0.00).value,
9.4912070, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=0.10, tau=0.00).value,
15.0594580, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=0.10, tau=0.00).value,
12.8747840, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
4.9248970, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
6.9660660, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=0.10, tau=0.00).value,
20.4321420, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=0.10, tau=0.00).value,
27.8677430, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
5.2338740, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
9.6728110, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
4.4453390, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=0.10, tau=0.00).value,
21.8460210, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=0.10, tau=0.00).value,
39.6143120, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=0.10, tau=0.00).value,
15.9048720, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 14.25, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
0.5628470, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 14.25, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
0.9317550, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 14.25, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
0.7619040, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.500, 359.86, 29.00, 30.87067768,
hs=0.0691640, p=1.00, tau=0.00).value,
2.4686380, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.900, 12.49, 29.00, 40.97052773,
hs=0.0567010, p=1.00, tau=0.00).value,
4.0904280, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.940, 18.43, 29.00, 47.91280491,
hs=0.0000000, p=1.00, tau=0.00).value,
3.3867490, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 14.25, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
1.0593540, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 14.25, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
1.6122160, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.900, 316.77, 29.00, 59.81487174,
hs=0.0000000, p=1.00, tau=0.00).value,
5.0231130, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.780, 279.78, 29.00, 49.20900369,
hs=0.0000750, p=1.00, tau=0.00).value,
7.3463010, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 14.25, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
1.2022670, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 14.25, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
1.7852610, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 14.25, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
0.8916230, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.30, 29.00, 55.90591362,
hs=0.2175590, p=1.00, tau=0.00).value,
5.7386810, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.70, 29.00, 67.76751981,
hs=0.2361040, p=1.00, tau=0.00).value,
8.3461990, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.050, 38.70, 29.00, 38.14104832,
hs=2.4500050, p=1.00, tau=0.00).value,
3.5957140, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.001, 0.9, eta=0.6).value,
0.866044, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.001, 0.9, eta=0.6).value,
0.710527, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.001, 0.9, eta=0.6).value,
0.764448, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.001, 0.9, eta=0.6).value,
1.289482, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.001, 0.9, eta=0.6).value,
1.054611, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.001, 0.9, eta=0.6).value,
1.132606, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.001, 0.9, eta=0.6).value,
0.699472, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.001, 0.9, eta=0.6).value,
0.912438, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.001, 0.9, eta=0.6).value,
1.033819, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.001, 0.9, eta=0.6).value,
1.351457, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.571530, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.001, 0.9, eta=0.6).value,
0.736636, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.001, 0.9, eta=0.6).value,
0.733740, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.001, 0.9, eta=0.6).value,
0.845322, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.001, 0.9, eta=0.6).value,
1.087468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.001, 0.9, eta=0.6).value,
1.089954, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.402326, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.330080, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.355129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 0.1, 0.9, eta=0.6).value,
0.599037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 0.1, 0.9, eta=0.6).value,
0.489926, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 0.1, 0.9, eta=0.6).value,
0.526159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.324944, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.423879, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 0.1, 0.9, eta=0.6).value,
0.480267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 0.1, 0.9, eta=0.6).value,
0.627828, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.265508, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.342209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.340864, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 0.1, 0.9, eta=0.6).value,
0.392700, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 0.1, 0.9, eta=0.6).value,
0.505190, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 0.1, 0.9, eta=0.6).value,
0.506345, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 14.25, 30.87067768, 1, 0.9, eta=0.6).value,
0.249221, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.97052773, 1, 0.9, eta=0.6).value,
0.204468, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 47.91280491, 1, 0.9, eta=0.6).value,
0.219985, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, 359.86, 29, 30.87067768, 1, 0.9, eta=0.6).value,
0.371074, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.97052773, 1, 0.9, eta=0.6).value,
0.303485, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 47.91280491, 1, 0.9, eta=0.6).value,
0.325930, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 14.25, 59.81487174, 1, 0.9, eta=0.6).value,
0.201287, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 14.25, 49.20900369, 1, 0.9, eta=0.6).value,
0.262572, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, 316.77, 29, 59.81487174, 1, 0.9, eta=0.6).value,
0.297502, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, 279.78, 29, 49.20900369, 1, 0.9, eta=0.6).value,
0.388909, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 55.90591362, 1, 0.9, eta=0.6).value,
0.164469, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 67.76751981, 1, 0.9, eta=0.6).value,
0.211982, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 38.14104832, 1, 0.9, eta=0.6).value,
0.211148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 55.90591362, 1, 0.9, eta=0.6).value,
0.243258, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 67.76751981, 1, 0.9, eta=0.6).value,
0.312940, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 38.14104832, 1, 0.9, eta=0.6).value,
0.313656, places=5)
class ITUR618_13TestCase(test.TestCase):
def setUp(self):
models.itu453.change_version(13)
models.itu618.change_version(13)
models.itu676.change_version(11)
models.itu836.change_version(6)
models.itu837.change_version(7)
models.itu838.change_version(3)
models.itu839.change_version(4)
models.itu840.change_version(7)
models.itu1510.change_version(1)
models.itu1511.change_version(1)
def test_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
0.4891464, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
0.62159245, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
0.42101702, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
2.16093996, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
2.69015654, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
1.91338757, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
6.72784425, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
8.20500328, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
5.9418061, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 14.25, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
14.76177358, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 14.25, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
17.636376, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 14.25, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
12.98151687, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=1.0,
tau=0, R001=26.48052).value,
2.17898357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=1.0,
tau=0, R001=33.936232).value,
2.81537632, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=1.0,
tau=0, R001=27.13586832).value,
1.96063611, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.1,
tau=0, R001=26.48052).value,
8.46779316, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.1,
tau=0, R001=33.936232).value,
10.70289842, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.1,
tau=0, R001=27.13586832).value,
7.80832251, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.01,
tau=0, R001=26.48052).value,
23.1908096, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.01,
tau=0, R001=33.936232).value,
28.67449232, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.01,
tau=0, R001=27.13586832).value,
21.24861968, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
51.5, -0.14, 29.0, 31.07694309, p=0.001,
tau=0, R001=26.48052).value,
44.76009125, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
41.9, 12.49, 29.0, 40.23202374, p=0.001,
tau=0, R001=33.936232).value,
54.14015005, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
33.94, 18.43, 29.0, 46.35969261, p=0.001,
tau=0, R001=27.13586832).value,
40.68133015, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
1.70690128, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
1.43904149, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
8.27164744, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
6.30417186, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
18.94410356, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
16.44617644, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 14.25, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
29.91171296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 14.25, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
29.95767701, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=1.0,
tau=0, R001=50.639304).value,
6.81336808, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=1.0,
tau=0, R001=78.2994993).value,
6.66385625, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.1,
tau=0, R001=50.639304).value,
29.31896844, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.1,
tau=0, R001=78.2994993).value,
25.59455941, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.01,
tau=0, R001=50.639304).value,
59.62576355, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.01,
tau=0, R001=78.2994993).value,
58.53988572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
22.9, -43.23, 29.0, 22.27833468, p=0.001,
tau=0, R001=50.639304).value,
83.5996391, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
25.78, -80.22, 29.0, 52.6789929, p=0.001,
tau=0, R001=78.2994993).value,
93.48939944, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
1.2731081, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
1.93713255, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
1.04440572, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
5.48101228, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
10.67987642, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
6.0510347, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
14.85903351, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
21.03740448, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
12.61120361, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 14.25, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
28.21372983, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 14.25, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
28.13337932, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 14.25, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
17.85045772, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=1.0,
tau=90, R001=63.61888808).value,
5.88085649, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=1.0,
tau=90, R001=99.15117186).value,
9.84052929, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=1.0,
tau=90, R001=42.91007183).value,
3.8213237, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.1,
tau=90, R001=63.61888808).value,
22.20219047, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.1,
tau=90, R001=99.15117186).value,
47.18910296, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.1,
tau=90, R001=42.91007183).value,
19.80717661, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.01,
tau=90, R001=63.61888808).value,
52.7819415, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.01,
tau=90, R001=99.15117186).value,
80.85074503, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.01,
tau=90, R001=42.91007183).value,
36.93157357, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
28.717, 77.3, 29.0, 48.24116215, p=0.001,
tau=90, R001=63.61888808).value,
87.88505965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
3.133, 101.7, 29.0, 85.80457401, p=0.001,
tau=90, R001=99.15117186).value,
94.0437949, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation(
9.05, 38.7, 29.0, 20.14348033, p=0.001,
tau=90, R001=42.91007183).value,
46.76694226, places=5)
def test_probability_of_rain_attenuation(self):
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
51.5, -0.14, 31.07694309).value,
7.32466089, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
41.9, 12.49, 40.23202374).value,
7.08992377, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
33.94, 18.43, 46.35969261).value,
1.74467895, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
22.9, -43.23, 22.27833468).value,
2.5828985, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
25.78, -80.22, 52.6789929).value,
4.0392312, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
28.717, 77.3, 48.24116215).value,
1.64420965, places=5)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
3.133, 101.7, 85.80457401).value,
5.00075505, places=4)
self.assertAlmostEqual(
models.itu618.rain_attenuation_probability(
9.05, 38.7, 20.14348033).value,
7.0357202, places=5)
def test_scintillation_attenuation(self):
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65).value,
0.26193234, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65).value,
0.22405226, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65).value,
0.23279942, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65).value,
0.4228461, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65).value,
0.36169504, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65).value,
0.37581586, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65).value,
0.62828836, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65).value,
0.5374267, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65).value,
0.55840821, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65).value,
0.91021486, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65).value,
0.77858162, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65).value,
0.80897798, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65).value,
0.38849319, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65).value,
0.33115269, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65).value,
0.34339899, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65).value,
0.62715751, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65).value,
0.53459083, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65).value,
0.55436043, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65).value,
0.93186567, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65).value,
0.79432493, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65).value,
0.82369971, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65).value,
1.35001384, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65).value,
1.15075561, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65).value,
1.19331148, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65).value,
0.62009744, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65).value,
0.2664749, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65).value,
1.00104396, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65).value,
0.43017931, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65).value,
1.48740705, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65).value,
0.63918446, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65).value,
2.15483859, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65).value,
0.92600027, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65).value,
0.92341029, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65).value,
0.39237999, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65).value,
1.49069201, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65).value,
0.63343209, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65).value,
2.21495349, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65).value,
0.9411888, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65).value,
3.20885076, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65).value,
1.36352046, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65).value,
0.2156413, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65).value,
0.22167129, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65).value,
0.48533645, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65).value,
0.34811693, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65).value,
0.35785136, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65).value,
0.78349481, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65).value,
0.51725159, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65).value,
0.53171554, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65).value,
1.16416037, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65).value,
0.7493535, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65).value,
0.77030774, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65).value,
1.68654418, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65).value,
0.31791278, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65).value,
0.32486881, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65).value,
0.72351614, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65).value,
0.5132172, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65).value,
0.52444655, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65).value,
1.16799623, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65).value,
0.76256679, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65).value,
0.77925198, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65).value,
1.73547406, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65).value,
1.10474691, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65).value,
1.12891911, places=5)
self.assertAlmostEqual(
models.itu618.scintillation_attenuation(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65).value,
2.5142186, places=5)
def test_rain_cross_polarization_discrimination(self):
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.4891464, 14.25, 31.07694309, 1.0, 0).value,
49.57582307, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.62159245, 14.25, 40.23202374, 1.0, 0).value,
49.3981550, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
0.42101702, 14.25, 46.35969261, 1.0, 0).value,
53.93857057, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.16093996, 14.25, 31.07694309, 0.1, 0).value,
40.29800396, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.69015654, 14.25, 40.23202374, 0.1, 0).value,
40.28034662, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.91338757, 14.25, 46.35969261, 0.1, 0).value,
44.68265675, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.72784425, 14.25, 31.07694309, 0.01, 0).value,
32.97842659, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.20500328, 14.25, 40.23202374, 0.01, 0).value,
33.13972017, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.9418061, 14.25, 46.35969261, 0.01, 0).value,
37.62918682, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.76177358, 14.25, 31.07694309, 0.001, 0).value,
28.14021762, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.636376, 14.25, 40.23202374, 0.001, 0).value,
28.49940232, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.98151687, 14.25, 46.35969261, 0.001, 0).value,
33.07510332, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.17898357, 29.0, 31.07694309, 1.0, 0).value,
44.30006506, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
2.81537632, 29.0, 40.23202374, 1.0, 0).value,
43.8603725, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.96063611, 29.0, 46.35969261, 1.0, 0).value,
48.36964892, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.46779316, 29.0, 31.07694309, 0.1, 0).value,
35.03444, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.70289842, 29.0, 40.23202374, 0.1, 0).value,
34.76315732, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
7.80832251, 29.0, 46.35969261, 0.1, 0).value,
39.12690283, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
23.1908096, 29.0, 31.07694309, 0.01, 0).value,
27.96431726, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.67449232, 29.0, 40.23202374, 0.01, 0).value,
27.8830305, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.24861968, 29.0, 46.35969261, 0.01, 0).value,
32.34366876, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
44.76009125, 29.0, 31.07694309, 0.001, 0).value,
23.64462724, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
54.14015005, 29.0, 40.23202374, 0.001, 0).value,
23.7749224, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
40.68133015, 29.0, 46.35969261, 0.001, 0).value,
28.33381119, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.70690128, 14.25, 22.27833468, 1.0, 0).value,
38.65072987, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.43904149, 14.25, 52.6789929, 1.0, 0).value,
46.23051298, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
8.27164744, 14.25, 22.27833468, 0.1, 0).value,
27.9634536, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.30417186, 14.25, 52.6789929, 0.1, 0).value,
36.82555192, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
18.94410356, 14.25, 22.27833468, 0.01, 0).value,
22.64492814, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
16.44617644, 14.25, 52.6789929, 0.01, 0).value,
30.86009092, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.91171296, 14.25, 22.27833468, 0.001, 0).value,
20.29292318, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.95767701, 14.25, 52.6789929, 0.001, 0).value,
27.62415271, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.81336808, 29.0, 22.27833468, 1.0, 0).value,
33.64688473, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.66385625, 29.0, 52.6789929, 1.0, 0).value,
40.0755612, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
29.31896844, 29.0, 22.27833468, 0.1, 0).value,
22.85413903, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
25.59455941, 29.0, 52.6789929, 0.1, 0).value,
30.6650529, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
59.62576355, 29.0, 22.27833468, 0.01, 0).value,
17.88255372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
58.53988572, 29.0, 52.6789929, 0.01, 0).value,
25.03203051, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
83.5996391, 29.0, 22.27833468, 0.001, 0).value,
16.16922861, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
93.48939944, 29.0, 52.6789929, 0.001, 0).value,
22.41718851, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.2731081, 14.25, 48.24116215, 1.0, 90).value,
45.80237934, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.93713255, 14.25, 85.80457401, 1.0, 90).value,
75.12972446, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
1.04440572, 14.25, 20.14348033, 1.0, 90).value,
42.28242577, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.48101228, 14.25, 48.24116215, 0.1, 90).value,
36.51649699, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
10.67987642, 14.25, 85.80457401, 0.1, 90).value,
65.51910372, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
6.0510347, 14.25, 20.14348033, 0.1, 90).value,
30.32827626, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
14.85903351, 14.25, 48.24116215, 0.01, 90).value,
30.19759496, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
21.03740448, 14.25, 85.80457401, 0.01, 90).value,
63.60558975, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
12.61120361, 14.25, 20.14348033, 0.01, 90).value,
25.96615447, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.21372983, 14.25, 48.24116215, 0.001, 90).value,
26.54453432, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
28.13337932, 14.25, 85.80457401, 0.001, 90).value,
64.9390721, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
17.85045772, 14.25, 20.14348033, 0.001, 90).value,
24.79563761, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
5.88085649, 29.0, 48.24116215, 1.0, 90).value,
39.73121592, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
9.84052929, 29.0, 85.80457401, 1.0, 90).value,
68.04931733, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
3.8213237, 29.0, 20.14348033, 1.0, 90).value,
38.25788658, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
22.20219047, 29.0, 48.24116215, 0.1, 90).value,
30.45232663, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
47.18910296, 29.0, 85.80457401, 0.1, 90).value,
58.32352317, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
19.80717661, 29.0, 20.14348033, 0.1, 90).value,
26.0924586, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
52.7819415, 29.0, 48.24116215, 0.01, 90).value,
24.4471052, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
80.85074503, 29.0, 85.80457401, 0.01, 90).value,
56.92074963, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
36.93157357, 29.0, 20.14348033, 0.01, 90).value,
22.11041426, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
87.88505965, 29.0, 48.24116215, 0.001, 90).value,
21.39198393, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
94.0437949, 29.0, 85.80457401, 0.001, 90).value,
59.09547625, places=5)
self.assertAlmostEqual(
models.itu618.rain_cross_polarization_discrimination(
46.76694226, 29.0, 20.14348033, 0.001, 90).value,
21.61918999, places=5)
def total_attenuation_fcn(self, lat, lon, f, el, p, D, eta, tau,
val_g, val_c, val_r, val_s, val_t):
R001 = models.itu837.rainfall_rate(lat, lon, 0.01000000001)
A_g, A_c, A_r, A_s, A = itur.atmospheric_attenuation_slant_path(
lat, lon, f, el, p, D, eta=eta, tau=tau, R001=R001,
return_contributions=True)
self.assertAlmostEqual(A_g.value, val_g, places=5)
self.assertAlmostEqual(A_c.value, val_c, places=5)
self.assertAlmostEqual(A_r.value, val_r, places=5)
self.assertAlmostEqual(A_s.value, val_s, places=5)
self.assertAlmostEqual(A.value, val_t, places=5)
def test_total_attenuation(self):
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 1, 1, 0.65, 0,
0.223693782, 0.45517046, 0.48914539, 0.26193234, 1.203663661)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 1, 1, 0.65, 0,
0.184499507, 0.26338517, 0.62159459, 0.22405226, 1.097400703)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 1, 1, 0.65, 0,
0.168635988, 0.18779409, 0.42101546, 0.23279942, 0.820437057)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.1, 1, 0.65, 0,
0.223693782, 0.45517046, 2.16093588, 0.4228461, 2.873752501)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.1, 1, 0.65, 0,
0.184499507, 0.26338517, 2.69016502, 0.36169504, 3.16011407)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.1, 1, 0.65, 0,
0.168635988, 0.18779409, 1.91338106, 0.37581586, 2.303155743)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.01, 1, 0.65, 0,
0.223693782, 0.45517046, 6.72783273, 0.62828836, 7.434122415)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.01, 1, 0.65, 0,
0.184499507, 0.26338517, 8.20502671, 0.5374267, 8.669947478)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.01, 1, 0.65, 0,
0.168635988, 0.18779409, 5.94178778, 0.55840821, 6.323600947)
self.total_attenuation_fcn(
51.5, -0.14, 14.25, 31.07694309, 0.001, 1, 0.65, 0,
0.223693782, 0.45517046, 14.76175093, 0.91021486, 15.46781355)
self.total_attenuation_fcn(
41.9, 12.49, 14.25, 40.23202374, 0.001, 1, 0.65, 0,
0.184499507, 0.26338517, 17.63642115, 0.77858162, 18.10123067)
self.total_attenuation_fcn(
33.94, 18.43, 14.25, 46.35969261, 0.001, 1, 0.65, 0,
0.168635988, 0.18779409, 12.981481, 0.80897798, 13.36273512)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 1, 1, 0.65, 0,
0.799999368, 1.77247154, 2.17897957, 0.38849319, 4.770502219)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 1, 1, 0.65, 0,
0.673619867, 1.0256437, 2.81538514, 0.33115269, 4.528897381)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 1, 1, 0.65, 0,
0.62972417, 0.73128577, 1.96062953, 0.34339899, 3.343454224)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.1, 1, 0.65, 0,
0.799999368, 1.77247154, 8.46777895, 0.62715751, 11.05943682)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.1, 1, 0.65, 0,
0.673619867, 1.0256437, 10.70292908, 0.53459083, 12.41436971)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.1, 1, 0.65, 0,
0.62972417, 0.73128577, 7.80829852, 0.55436043, 9.18728313)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.01, 1, 0.65, 0,
0.799999368, 1.77247154, 23.19077435, 0.93186567, 25.78063225)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.01, 1, 0.65, 0,
0.673619867, 1.0256437, 28.67456675, 0.79432493, 30.38445044)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.01, 1, 0.65, 0,
0.62972417, 0.73128577, 21.24856054, 0.82369971, 22.62499923)
self.total_attenuation_fcn(
51.5, -0.14, 29, 31.07694309, 0.001, 1, 0.65, 0,
0.799999368, 1.77247154, 44.76003026, 1.35001384, 47.35208054)
self.total_attenuation_fcn(
41.9, 12.49, 29, 40.23202374, 0.001, 1, 0.65, 0,
0.673619867, 1.0256437, 54.14027603, 1.15075561, 55.85154062)
self.total_attenuation_fcn(
33.94, 18.43, 29, 46.35969261, 0.001, 1, 0.65, 0,
0.62972417, 0.73128577, 40.68122866, 1.19331148, 42.05942781)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 1, 1, 0.65, 0,
0.383178724, 0.54183293, 1.70690691, 0.62009744, 2.715849229)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 1, 1, 0.65, 0,
0.206227197, 0.53317506, 1.43904233, 0.2664749, 2.196365451)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.1, 1, 0.65, 0,
0.383178724, 0.54183293, 8.27167236, 1.00104396, 9.253351467)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.1, 1, 0.65, 0,
0.206227197, 0.53317506, 6.30417519, 0.43017931, 7.057096675)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.01, 1, 0.65, 0,
0.383178724, 0.54183293, 18.94415527, 1.48740705, 19.92585295)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.01, 1, 0.65, 0,
0.206227197, 0.53317506, 16.44618432, 0.63918446, 17.1976133)
self.total_attenuation_fcn(
22.9, -43.23, 14.25, 22.27833468, 0.001, 1, 0.65, 0,
0.383178724, 0.54183293, 29.91178614, 2.15483859, 30.91293869)
self.total_attenuation_fcn(
25.78, -80.22, 14.25, 52.6789929, 0.001, 1, 0.65, 0,
0.206227197, 0.53317506, 29.95768987, 0.92600027, 30.71115009)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 1, 1, 0.65, 0,
1.504259763, 2.1099424, 6.81338837, 0.92341029, 10.4752418)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 1, 1, 0.65, 0,
0.827675954, 2.07622792, 6.66385994, 0.39237999, 9.576567189)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.1, 1, 0.65, 0,
1.504259763, 2.1099424, 29.31904828, 1.49069201, 32.9685827)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.1, 1, 0.65, 0,
0.827675954, 2.07622792, 25.59457239, 0.63343209, 28.50572549)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.01, 1, 0.65, 0,
1.504259763, 2.1099424, 59.62591067, 2.21495349, 63.27983401)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.01, 1, 0.65, 0,
0.827675954, 2.07622792, 58.53991262, 0.9411888, 61.45112298)
self.total_attenuation_fcn(
22.9, -43.23, 29, 22.27833468, 0.001, 1, 0.65, 0,
1.504259763, 2.1099424, 83.59982398, 3.20885076, 87.2740725)
self.total_attenuation_fcn(
25.78, -80.22, 29, 52.6789929, 0.001, 1, 0.65, 0,
0.827675954, 2.07622792, 93.48943794, 1.36352046, 96.4030686)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 1, 1, 0.65, 90,
0.257653026, 0.68592197, 1.27311232, 0.2156413, 2.228519972)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 1, 1, 0.65, 90,
0.163655312, 0.62211863, 1.93712821, 0.22167129, 2.732484342)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 1, 1, 0.65, 90,
0.22310495, 0.65764822, 1.04440674, 0.48533645, 1.993003982)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.1, 1, 0.65, 90,
0.257653026, 0.68592197, 5.48102886, 0.34811693, 6.434421439)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.1, 1, 0.65, 90,
0.163655312, 0.62211863, 10.67985456, 0.35785136, 11.47129236)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.1, 1, 0.65, 90,
0.22310495, 0.65764822, 6.05104013, 0.78349481, 6.977389778)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.01, 1, 0.65, 90,
0.257653026, 0.68592197, 14.85907425, 0.51725159, 15.81125251)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.01, 1, 0.65, 90,
0.163655312, 0.62211863, 21.03736546, 0.53171554, 21.82966493)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.01, 1, 0.65, 90,
0.22310495, 0.65764822, 12.61121387, 1.16416037, 13.54293868)
self.total_attenuation_fcn(
28.717, 77.3, 14.25, 48.24116215, 0.001, 1, 0.65, 90,
0.257653026, 0.68592197, 28.21379917, 0.7493535, 29.16708769)
self.total_attenuation_fcn(
3.133, 101.7, 14.25, 85.80457401, 0.001, 1, 0.65, 90,
0.163655312, 0.62211863, 28.13333254, 0.77030774, 28.92942223)
self.total_attenuation_fcn(
9.05, 38.7, 14.25, 20.14348033, 0.001, 1, 0.65, 90,
0.22310495, 0.65764822, 17.85047073, 1.68654418, 18.80790784)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 1, 1, 0.65, 90,
1.038585522, 2.67103709, 5.88087518, 0.31791278, 9.596404871)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 1, 1, 0.65, 90,
0.645959831, 2.42258159, 9.84050759, 0.32486881, 12.9133514)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 1, 1, 0.65, 90,
0.703128217, 2.56093676, 3.82132703, 0.72351614, 7.126271267)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.1, 1, 0.65, 90,
1.038585522, 2.67103709, 22.20225497, 0.5132172, 25.9171717)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.1, 1, 0.65, 90,
0.645959831, 2.42258159, 47.18900784, 0.52444655, 50.26032116)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.1, 1, 0.65, 90,
0.703128217, 2.56093676, 19.80719237, 1.16799623, 23.10173121)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.01, 1, 0.65, 90,
1.038585522, 2.67103709, 52.78208044, 0.76256679, 56.49694605)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.01, 1, 0.65, 90,
0.645959831, 2.42258159, 80.85059735, 0.77925198, 83.92278473)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.01, 1, 0.65, 90,
0.703128217, 2.56093676, 36.9316002, 1.73547406, 40.23377893)
self.total_attenuation_fcn(
28.717, 77.3, 29, 48.24116215, 0.001, 1, 0.65, 90,
1.038585522, 2.67103709, 87.88526702, 1.10474691, 91.6016281)
self.total_attenuation_fcn(
3.133, 101.7, 29, 85.80457401, 0.001, 1, 0.65, 90,
0.645959831, 2.42258159, 94.04364093, 1.12891911, 97.11878785)
self.total_attenuation_fcn(
9.05, 38.7, 29, 20.14348033, 0.001, 1, 0.65, 90,
0.703128217, 2.56093676, 46.76697248, 2.5142186, 50.09507012)
class ITUR840_7TestCase(test.TestCase):
def setUp(self):
models.itu840.change_version(7)
def test_columnar_content_reduced_liquid(self):
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.1).value,
3.805251208, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.15).value,
3.744512329, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.3).value,
3.630957766, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
3.133, 101.7, 0.35).value,
3.594946111, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.1).value,
2.829931669, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.15).value,
2.615428331, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.3).value,
2.152560931, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
22.9, -43.23, 0.35).value,
2.030424796, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.1).value,
0.443821013, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.15).value,
0.367758574, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.3).value,
0.25249597, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
23, 30, 0.35).value,
0.230476914, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.1).value,
3.52927514, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.15).value,
3.368053109, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.3).value,
3.090031167, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
25.78, -80.22, 0.35).value,
2.98280226, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.1).value,
4.230726014, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.15).value,
4.004951665, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.3).value,
3.641943304, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
28.717, 77.3, 0.35).value,
3.550068054, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.1).value,
1.476285677, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.15).value,
1.342662497, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.3).value,
1.117630129, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
33.94, 18.43, 0.35).value,
1.061278891, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.1).value,
1.498459518, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.15).value,
1.411411719, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.3).value,
1.254176128, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
41.9, 12.49, 0.35).value,
1.214239524, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.1).value,
1.903298487, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.15).value,
1.803803604, places=5)
self.assertAlmostEqual(
models.itu840.columnar_content_reduced_liquid(
51.5, -0.14, 0.3).value,
1.641289077, places=5)
def test_cloud_attenuation(self):
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 1.0).value,
0.45517046, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 1.0).value,
0.26338517, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 1.0).value,
0.18779409, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.5).value,
0.53457216, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.5).value,
0.3230387, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.5).value,
0.23923797, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.3).value,
0.59136745, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.3).value,
0.36114741, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.3).value,
0.2872291, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 14.25, 0.2).value,
0.62448748, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 14.25, 0.2).value,
0.38863977, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 14.25, 0.2).value,
0.32069677, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 1.0).value,
1.77247154, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 1.0).value,
1.0256437, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 1.0).value,
0.73128577, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.5).value,
2.08166837, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.5).value,
1.2579395, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.5).value,
0.9316125, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.3).value,
2.30283391, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.3).value,
1.40633801, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.3).value,
1.11849396, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
51.5, -0.14, 31.07694309, 29, 0.2).value,
2.43180607, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
41.9, 12.49, 40.23202374, 29, 0.2).value,
1.51339553, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
33.94, 18.43, 46.35969261, 29, 0.2).value,
1.24881983, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 1.0).value,
0.54183293, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 1.0).value,
0.53317506, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.5).value,
0.85746792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.5).value,
0.63956606, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.3).value,
1.05602769, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.3).value,
0.72266885, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 14.25, 0.2).value,
1.20844208, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 14.25, 0.2).value,
0.76093789, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 1.0).value,
2.1099424, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 1.0).value,
2.07622792, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.5).value,
3.33905126, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.5).value,
2.49052334, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.3).value,
4.11225948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.3).value,
2.81413248, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
22.9, -43.23, 22.27833468, 29, 0.2).value,
4.70577375, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
25.78, -80.22, 52.6789929, 29, 0.2).value,
2.96315532, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 1.0).value,
0.68560078, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 1.0).value,
0.62214817, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 1.0).value,
0.65764822, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.5).value,
0.83179446, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.5).value,
0.65489922, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.5).value,
0.7181604, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.3).value,
0.90773089, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.3).value,
0.6771593, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.3).value,
0.75244454, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 14.25, 0.2).value,
0.95830261, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 14.25, 0.2).value,
0.69030616, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 14.25, 0.2).value,
0.77111549, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 1.0).value,
2.66978635, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 1.0).value,
2.42269662, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 1.0).value,
2.56093676, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.5).value,
3.23907665, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.5).value,
2.55023192, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.5).value,
2.79657622, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.3).value,
3.53477943, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.3).value,
2.63691452, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.3).value,
2.93008149, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
28.72, 77.3, 48.24116215, 29, 0.2).value,
3.73170991, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
3.13, 101.7, 85.80457401, 29, 0.2).value,
2.68810948, places=5)
self.assertAlmostEqual(
models.itu840.cloud_attenuation(
9.05, 38.7, 20.14348033, 29, 0.2).value,
3.00278773, places=5)
class ITUR1511_1TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(1)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.23610446, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(23.0, 30.0).value,
0.247, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
7.511e-05, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.21755946, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0.0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.05670104, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.06916422, places=5)
class ITUR1511_2TestCase(test.TestCase):
def setUp(self):
models.itu1511.change_version(2)
def test_topographic_altitude(self):
self.assertAlmostEqual(
models.itu1511.topographic_altitude(51.5, -0.14).value,
0.031382983999999, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(41.9, 12.49).value,
0.0461229880100015, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(33.94, 18.43).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(22.9, -43.23).value,
0, places=5)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(25.78, -80.22).value,
0.00861727999508758, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(28.717, 77.3).value,
0.209383698952704, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(3.133, 101.7).value,
0.0512514559528945, places=4)
self.assertAlmostEqual(
models.itu1511.topographic_altitude(9.05, 38.7).value,
2.5398618775, places=4)
if __name__ == '__main__':
pass
suite = suite()
print('Validation tests for the ITU-R models')
print('------------------------')
print(
'A total of %d test-cases are going to be tested' %
suite.countTestCases())
sys.stdout.flush()
test.TextTestRunner(verbosity=2).run(suite)
| true
| true
|
1c428eae5cf4633221c128c42d673f87b5eb1d9e
| 34,233
|
py
|
Python
|
src/transformers/configuration_utils.py
|
JadeMaveric/transformers
|
fb2b89840bf2ab9f74702bf83af8ddf92b61efb3
|
[
"Apache-2.0"
] | 1
|
2021-03-31T02:23:56.000Z
|
2021-03-31T02:23:56.000Z
|
src/transformers/configuration_utils.py
|
JadeMaveric/transformers
|
fb2b89840bf2ab9f74702bf83af8ddf92b61efb3
|
[
"Apache-2.0"
] | null | null | null |
src/transformers/configuration_utils.py
|
JadeMaveric/transformers
|
fb2b89840bf2ab9f74702bf83af8ddf92b61efb3
|
[
"Apache-2.0"
] | 1
|
2020-11-02T06:37:04.000Z
|
2020-11-02T06:37:04.000Z
|
# coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Configuration base class and utilities."""
import copy
import json
import os
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_offline_mode, is_remote_url
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(object):
r"""
Base class for all configuration classes. Handles a few parameters common to all models' configurations as well as
methods for loading/downloading/saving configurations.
Note: A configuration file can be loaded and saved to disk. Loading the configuration file and using this file to
initialize a model does **not** load the model weights. It only affects the model's configuration.
Class attributes (overridden by derived classes)
- **model_type** (:obj:`str`): An identifier for the model type, serialized into the JSON file, and used to
recreate the correct object in :class:`~transformers.AutoConfig`.
- **is_composition** (:obj:`bool`): Whether the config class is composed of multiple sub-configs. In this case
the config has to be initialized from two or more configs of type :class:`~transformers.PretrainedConfig`
like: :class:`~transformers.EncoderDecoderConfig` or :class:`~RagConfig`.
- **keys_to_ignore_at_inference** (:obj:`List[str]`): A list of keys to ignore by default when looking at
dictionary outputs of the model during inference.
Args:
name_or_path (:obj:`str`, `optional`, defaults to :obj:`""`):
Store the string that was passed to :func:`~transformers.PreTrainedModel.from_pretrained` or
:func:`~transformers.TFPreTrainedModel.from_pretrained` as ``pretrained_model_name_or_path`` if the
configuration was created with such a method.
output_hidden_states (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should return all hidden-states.
output_attentions (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not the model should returns all attentions.
return_dict (:obj:`bool`, `optional`, defaults to :obj:`True`):
Whether or not the model should return a :class:`~transformers.file_utils.ModelOutput` instead of a plain
tuple.
is_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as an encoder/decoder or not.
is_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether the model is used as decoder or not (in which case it's used as an encoder).
add_cross_attention (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether cross-attention layers should be added to the model. Note, this option is only relevant for models
that can be used as decoder models within the `:class:~transformers.EncoderDecoderModel` class, which
consists of all models in ``AUTO_MODELS_FOR_CAUSAL_LM``.
tie_encoder_decoder (:obj:`bool`, `optional`, defaults to :obj:`False`)
Whether all encoder weights should be tied to their equivalent decoder weights. This requires the encoder
and decoder model to have the exact same parameter names.
prune_heads (:obj:`Dict[int, List[int]]`, `optional`, defaults to :obj:`{}`):
Pruned heads of the model. The keys are the selected layer indices and the associated values, the list of
heads to prune in said layer.
For instance ``{1: [0, 2], 2: [2, 3]}`` will prune heads 0 and 2 on layer 1 and heads 2 and 3 on layer 2.
chunk_size_feed_forward (:obj:`int`, `optional`, defaults to :obj:`0`):
The chunk size of all feed forward layers in the residual attention blocks. A chunk size of :obj:`0` means
that the feed forward layer is not chunked. A chunk size of n means that the feed forward layer processes
:obj:`n` < sequence_length embeddings at a time. For more information on feed forward chunking, see `How
does Feed Forward Chunking work? <../glossary.html#feed-forward-chunking>`__ .
Parameters for sequence generation
- **max_length** (:obj:`int`, `optional`, defaults to 20) -- Maximum length that will be used by default in the
:obj:`generate` method of the model.
- **min_length** (:obj:`int`, `optional`, defaults to 10) -- Minimum length that will be used by default in the
:obj:`generate` method of the model.
- **do_sample** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default in the
:obj:`generate` method of the model. Whether or not to use sampling ; use greedy decoding otherwise.
- **early_stopping** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Flag that will be used by default
in the :obj:`generate` method of the model. Whether to stop the beam search when at least ``num_beams``
sentences are finished per batch or not.
- **num_beams** (:obj:`int`, `optional`, defaults to 1) -- Number of beams for beam search that will be used by
default in the :obj:`generate` method of the model. 1 means no beam search.
- **num_beam_groups** (:obj:`int`, `optional`, defaults to 1) -- Number of groups to divide :obj:`num_beams`
into in order to ensure diversity among different groups of beams that will be used by default in the
:obj:`generate` method of the model. 1 means no group beam search.
- **diversity_penalty** (:obj:`float`, `optional`, defaults to 0.0) -- Value to control diversity for group
beam search. that will be used by default in the :obj:`generate` method of the model. 0 means no diversity
penalty. The higher the penalty, the more diverse are the outputs.
- **temperature** (:obj:`float`, `optional`, defaults to 1) -- The value used to module the next token
probabilities that will be used by default in the :obj:`generate` method of the model. Must be strictly
positive.
- **top_k** (:obj:`int`, `optional`, defaults to 50) -- Number of highest probability vocabulary tokens to keep
for top-k-filtering that will be used by default in the :obj:`generate` method of the model.
- **top_p** (:obj:`float`, `optional`, defaults to 1) -- Value that will be used by default in the
:obj:`generate` method of the model for ``top_p``. If set to float < 1, only the most probable tokens with
probabilities that add up to ``top_p`` or higher are kept for generation.
- **repetition_penalty** (:obj:`float`, `optional`, defaults to 1) -- Parameter for repetition penalty that
will be used by default in the :obj:`generate` method of the model. 1.0 means no penalty.
- **length_penalty** (:obj:`float`, `optional`, defaults to 1) -- Exponential penalty to the length that will
be used by default in the :obj:`generate` method of the model.
- **no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by default in the
:obj:`generate` method of the model for ``no_repeat_ngram_size``. If set to int > 0, all ngrams of that size
can only occur once.
- **encoder_no_repeat_ngram_size** (:obj:`int`, `optional`, defaults to 0) -- Value that will be used by
default in the :obj:`generate` method of the model for ``encoder_no_repeat_ngram_size``. If set to int > 0,
all ngrams of that size that occur in the ``encoder_input_ids`` cannot occur in the ``decoder_input_ids``.
- **bad_words_ids** (:obj:`List[int]`, `optional`) -- List of token ids that are not allowed to be generated
that will be used by default in the :obj:`generate` method of the model. In order to get the tokens of the
words that should not appear in the generated text, use :obj:`tokenizer.encode(bad_word,
add_prefix_space=True)`.
- **num_return_sequences** (:obj:`int`, `optional`, defaults to 1) -- Number of independently computed returned
sequences for each element in the batch that will be used by default in the :obj:`generate` method of the
model.
- **output_scores** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should return the
logits when used for generation
- **return_dict_in_generate** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether the model should
return a :class:`~transformers.file_utils.ModelOutput` instead of a :obj:`torch.LongTensor`
- **forced_bos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the first generated token
after the :obj:`decoder_start_token_id`. Useful for multilingual models like :doc:`mBART
<../model_doc/mbart>` where the first generated token needs to be the target language token.
- **forced_eos_token_id** (:obj:`int`, `optional`) -- The id of the token to force as the last generated token
when :obj:`max_length` is reached.
- **remove_invalid_values** (:obj:`bool`, `optional`) -- Whether to remove possible `nan` and `inf` outputs of
the model to prevent the generation method to crash. Note that using ``remove_invalid_values`` can slow down
generation.
Parameters for fine-tuning tasks
- **architectures** (:obj:`List[str]`, `optional`) -- Model architectures that can be used with the model
pretrained weights.
- **finetuning_task** (:obj:`str`, `optional`) -- Name of the task used to fine-tune the model. This can be
used when converting from an original (TensorFlow or PyTorch) checkpoint.
- **id2label** (:obj:`Dict[int, str]`, `optional`) -- A map from index (for instance prediction index, or
target index) to label.
- **label2id** (:obj:`Dict[str, int]`, `optional`) -- A map from label to index for the model.
- **num_labels** (:obj:`int`, `optional`) -- Number of labels to use in the last layer added to the model,
typically for a classification task.
- **task_specific_params** (:obj:`Dict[str, Any]`, `optional`) -- Additional keyword arguments to store for the
current task.
Parameters linked to the tokenizer
- **tokenizer_class** (:obj:`str`, `optional`) -- The name of the associated tokenizer class to use (if none is
set, will use the tokenizer associated to the model by default).
- **prefix** (:obj:`str`, `optional`) -- A specific prompt that should be added at the beginning of each text
before calling the model.
- **bos_token_id** (:obj:`int`, `optional`)) -- The id of the `beginning-of-stream` token.
- **pad_token_id** (:obj:`int`, `optional`)) -- The id of the `padding` token.
- **eos_token_id** (:obj:`int`, `optional`)) -- The id of the `end-of-stream` token.
- **decoder_start_token_id** (:obj:`int`, `optional`)) -- If an encoder-decoder model starts decoding with a
different token than `bos`, the id of that token.
- **sep_token_id** (:obj:`int`, `optional`)) -- The id of the `separation` token.
PyTorch specific parameters
- **torchscript** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should be
used with Torchscript.
- **tie_word_embeddings** (:obj:`bool`, `optional`, defaults to :obj:`True`) -- Whether the model's input and
output word embeddings should be tied. Note that this is only relevant if the model has a output word
embedding layer.
TensorFlow specific parameters
- **use_bfloat16** (:obj:`bool`, `optional`, defaults to :obj:`False`) -- Whether or not the model should use
BFloat16 scalars (only used by some TensorFlow models).
"""
model_type: str = ""
is_composition: bool = False
def __init__(self, **kwargs):
# Attributes with defaults
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False) # Only used by PyTorch models
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
) # Whether input and output word embeddings should be tied for all MLM, LM and Seq2Seq models.
# Is decoder is used in encoder-decoder models to differentiate encoder from decoder
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
# Parameters for sequence generation
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
# Fine-tuning task arguments
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
# Keys are always strings in JSON so convert ids to int here.
else:
self.num_labels = kwargs.pop("num_labels", 2)
# Tokenizer arguments TODO: eventually tokenizer and models should share the same config
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
# task specific arguments
self.task_specific_params = kwargs.pop("task_specific_params", None)
# TPU arguments
if kwargs.pop("xla_device", None) is not None:
logger.warn(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
# Name or path to the pretrained checkpoint
self._name_or_path = str(kwargs.pop("name_or_path", ""))
# Drop the transformers version info
kwargs.pop("transformers_version", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
"""
:obj:`bool`: Whether or not return :class:`~transformers.file_utils.ModelOutput` instead of tuples.
"""
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
"""
:obj:`int`: The number of labels for classification models.
"""
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike]):
"""
Save a configuration object to the directory ``save_directory``, so that it can be re-loaded using the
:func:`~transformers.PretrainedConfig.from_pretrained` class method.
Args:
save_directory (:obj:`str` or :obj:`os.PathLike`):
Directory where the configuration JSON file will be saved (will be created if it does not exist).
"""
if os.path.isfile(save_directory):
raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
r"""
Instantiate a :class:`~transformers.PretrainedConfig` (or a derived class) from a pretrained model
configuration.
Args:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
This can be either:
- a string, the `model id` of a pretrained model configuration hosted inside a model repo on
huggingface.co. Valid model ids can be located at the root-level, like ``bert-base-uncased``, or
namespaced under a user or organization name, like ``dbmdz/bert-base-german-cased``.
- a path to a `directory` containing a configuration file saved using the
:func:`~transformers.PretrainedConfig.save_pretrained` method, e.g., ``./my_model_directory/``.
- a path or url to a saved configuration JSON `file`, e.g.,
``./my_model_directory/configuration.json``.
cache_dir (:obj:`str` or :obj:`os.PathLike`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force to (re-)download the configuration files and override the cached versions if
they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received file. Attempts to resume the download if such a file
exists.
proxies (:obj:`Dict[str, str]`, `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
return_unused_kwargs (:obj:`bool`, `optional`, defaults to :obj:`False`):
If :obj:`False`, then this function returns just the final configuration object.
If :obj:`True`, then this functions returns a :obj:`Tuple(config, unused_kwargs)` where `unused_kwargs`
is a dictionary consisting of the key/value pairs whose keys are not configuration attributes: i.e.,
the part of ``kwargs`` which has not been used to update ``config`` and is otherwise ignored.
kwargs (:obj:`Dict[str, Any]`, `optional`):
The values in kwargs of any keys which are configuration attributes will be used to override the loaded
values. Behavior concerning key/value pairs whose keys are *not* configuration attributes is controlled
by the ``return_unused_kwargs`` keyword parameter.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from this pretrained model.
Examples::
# We can't instantiate directly the base class `PretrainedConfig` so let's show the examples on a
# derived class: BertConfig
config = BertConfig.from_pretrained('bert-base-uncased') # Download configuration from huggingface.co and cache.
config = BertConfig.from_pretrained('./test/saved_model/') # E.g. config (or model) was saved using `save_pretrained('./test/saved_model/')`
config = BertConfig.from_pretrained('./test/saved_model/my_configuration.json')
config = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True, foo=False)
assert config.output_attentions == True
config, unused_kwargs = BertConfig.from_pretrained('bert-base-uncased', output_attentions=True,
foo=False, return_unused_kwargs=True)
assert config.output_attentions == True
assert unused_kwargs == {'foo': False}
"""
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict.get("model_type", False) and hasattr(cls, "model_type"):
assert (
config_dict["model_type"] == cls.model_type
), f"You tried to initiate a model of type '{cls.model_type}' with a pretrained model of type '{config_dict['model_type']}'"
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
"""
From a ``pretrained_model_name_or_path``, resolve to a dictionary of parameters, to be used for instantiating a
:class:`~transformers.PretrainedConfig` using ``from_dict``.
Parameters:
pretrained_model_name_or_path (:obj:`str` or :obj:`os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
:obj:`Tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the configuration object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from a Python dictionary of parameters.
Args:
config_dict (:obj:`Dict[str, Any]`):
Dictionary that will be used to instantiate the configuration object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
:func:`~transformers.PretrainedConfig.get_config_dict` method.
kwargs (:obj:`Dict[str, Any]`):
Additional parameters from which to initialize the configuration object.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from those parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
"""
Instantiates a :class:`~transformers.PretrainedConfig` from the path to a JSON file of parameters.
Args:
json_file (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
:class:`PretrainedConfig`: The configuration object instantiated from that JSON file.
"""
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_diff_dict(self) -> Dict[str, Any]:
"""
Removes all attributes from config which correspond to the default config attributes for better readability and
serializes to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance,
"""
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
"""
Serializes this instance to a Python dictionary.
Returns:
:obj:`Dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
# Transformers version when serializing the model
output["transformers_version"] = __version__
return output
def to_json_string(self, use_diff: bool = True) -> str:
"""
Serializes this instance to a JSON string.
Args:
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON string.
Returns:
:obj:`str`: String containing all the attributes that make up this configuration instance in JSON format.
"""
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
"""
Save this instance to a JSON file.
Args:
json_file_path (:obj:`str` or :obj:`os.PathLike`):
Path to the JSON file in which this configuration instance's parameters will be saved.
use_diff (:obj:`bool`, `optional`, defaults to :obj:`True`):
If set to ``True``, only the difference between the config instance and the default
``PretrainedConfig()`` is serialized to JSON file.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
"""
Updates attributes of this class with attributes from ``config_dict``.
Args:
config_dict (:obj:`Dict[str, Any]`): Dictionary of attributes that shall be updated for this class.
"""
for key, value in config_dict.items():
setattr(self, key, value)
| 54.424483
| 153
| 0.650074
|
import copy
import json
import os
from typing import Any, Dict, Tuple, Union
from . import __version__
from .file_utils import CONFIG_NAME, cached_path, hf_bucket_url, is_offline_mode, is_remote_url
from .utils import logging
logger = logging.get_logger(__name__)
class PretrainedConfig(object):
model_type: str = ""
is_composition: bool = False
def __init__(self, **kwargs):
self.return_dict = kwargs.pop("return_dict", True)
self.output_hidden_states = kwargs.pop("output_hidden_states", False)
self.output_attentions = kwargs.pop("output_attentions", False)
self.torchscript = kwargs.pop("torchscript", False)
self.use_bfloat16 = kwargs.pop("use_bfloat16", False)
self.pruned_heads = kwargs.pop("pruned_heads", {})
self.tie_word_embeddings = kwargs.pop(
"tie_word_embeddings", True
)
self.is_encoder_decoder = kwargs.pop("is_encoder_decoder", False)
self.is_decoder = kwargs.pop("is_decoder", False)
self.add_cross_attention = kwargs.pop("add_cross_attention", False)
self.tie_encoder_decoder = kwargs.pop("tie_encoder_decoder", False)
self.max_length = kwargs.pop("max_length", 20)
self.min_length = kwargs.pop("min_length", 0)
self.do_sample = kwargs.pop("do_sample", False)
self.early_stopping = kwargs.pop("early_stopping", False)
self.num_beams = kwargs.pop("num_beams", 1)
self.num_beam_groups = kwargs.pop("num_beam_groups", 1)
self.diversity_penalty = kwargs.pop("diversity_penalty", 0.0)
self.temperature = kwargs.pop("temperature", 1.0)
self.top_k = kwargs.pop("top_k", 50)
self.top_p = kwargs.pop("top_p", 1.0)
self.repetition_penalty = kwargs.pop("repetition_penalty", 1.0)
self.length_penalty = kwargs.pop("length_penalty", 1.0)
self.no_repeat_ngram_size = kwargs.pop("no_repeat_ngram_size", 0)
self.encoder_no_repeat_ngram_size = kwargs.pop("encoder_no_repeat_ngram_size", 0)
self.bad_words_ids = kwargs.pop("bad_words_ids", None)
self.num_return_sequences = kwargs.pop("num_return_sequences", 1)
self.chunk_size_feed_forward = kwargs.pop("chunk_size_feed_forward", 0)
self.output_scores = kwargs.pop("output_scores", False)
self.return_dict_in_generate = kwargs.pop("return_dict_in_generate", False)
self.forced_bos_token_id = kwargs.pop("forced_bos_token_id", None)
self.forced_eos_token_id = kwargs.pop("forced_eos_token_id", None)
self.remove_invalid_values = kwargs.pop("remove_invalid_values", False)
self.architectures = kwargs.pop("architectures", None)
self.finetuning_task = kwargs.pop("finetuning_task", None)
self.id2label = kwargs.pop("id2label", None)
self.label2id = kwargs.pop("label2id", None)
if self.id2label is not None:
kwargs.pop("num_labels", None)
self.id2label = dict((int(key), value) for key, value in self.id2label.items())
else:
self.num_labels = kwargs.pop("num_labels", 2)
self.tokenizer_class = kwargs.pop("tokenizer_class", None)
self.prefix = kwargs.pop("prefix", None)
self.bos_token_id = kwargs.pop("bos_token_id", None)
self.pad_token_id = kwargs.pop("pad_token_id", None)
self.eos_token_id = kwargs.pop("eos_token_id", None)
self.sep_token_id = kwargs.pop("sep_token_id", None)
self.decoder_start_token_id = kwargs.pop("decoder_start_token_id", None)
self.task_specific_params = kwargs.pop("task_specific_params", None)
if kwargs.pop("xla_device", None) is not None:
logger.warn(
"The `xla_device` argument has been deprecated in v4.4.0 of Transformers. It is ignored and you can "
"safely remove it from your `config.json` file."
)
self._name_or_path = str(kwargs.pop("name_or_path", ""))
kwargs.pop("transformers_version", None)
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error("Can't set {} with value {} for {}".format(key, value, self))
raise err
@property
def name_or_path(self) -> str:
return self._name_or_path
@name_or_path.setter
def name_or_path(self, value):
self._name_or_path = str(value) # Make sure that name_or_path is a string (for JSON encoding)
@property
def use_return_dict(self) -> bool:
# If torchscript is set, force `return_dict=False` to avoid jit errors
return self.return_dict and not self.torchscript
@property
def num_labels(self) -> int:
return len(self.id2label)
@num_labels.setter
def num_labels(self, num_labels: int):
if self.id2label is None or len(self.id2label) != num_labels:
self.id2label = {i: "LABEL_{}".format(i) for i in range(num_labels)}
self.label2id = dict(zip(self.id2label.values(), self.id2label.keys()))
def save_pretrained(self, save_directory: Union[str, os.PathLike]):
if os.path.isfile(save_directory):
raise AssertionError("Provided path ({}) should be a directory, not a file".format(save_directory))
os.makedirs(save_directory, exist_ok=True)
# If we save using the predefined names, we can load using `from_pretrained`
output_config_file = os.path.join(save_directory, CONFIG_NAME)
self.to_json_file(output_config_file, use_diff=True)
logger.info(f"Configuration saved in {output_config_file}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs) -> "PretrainedConfig":
config_dict, kwargs = cls.get_config_dict(pretrained_model_name_or_path, **kwargs)
if config_dict.get("model_type", False) and hasattr(cls, "model_type"):
assert (
config_dict["model_type"] == cls.model_type
), f"You tried to initiate a model of type '{cls.model_type}' with a pretrained model of type '{config_dict['model_type']}'"
return cls.from_dict(config_dict, **kwargs)
@classmethod
def get_config_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> Tuple[Dict[str, Any], Dict[str, Any]]:
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
use_auth_token = kwargs.pop("use_auth_token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
config_file = os.path.join(pretrained_model_name_or_path, CONFIG_NAME)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
config_file = pretrained_model_name_or_path
else:
config_file = hf_bucket_url(
pretrained_model_name_or_path, filename=CONFIG_NAME, revision=revision, mirror=None
)
try:
# Load from URL or cache if already cached
resolved_config_file = cached_path(
config_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
)
# Load config dict
config_dict = cls._dict_from_json_file(resolved_config_file)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load config for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a {CONFIG_NAME} file\n\n"
)
raise EnvironmentError(msg)
except json.JSONDecodeError:
msg = (
"Couldn't reach server at '{}' to download configuration file or "
"configuration file is not a valid JSON file. "
"Please check network or file content here: {}.".format(config_file, resolved_config_file)
)
raise EnvironmentError(msg)
if resolved_config_file == config_file:
logger.info("loading configuration file {}".format(config_file))
else:
logger.info("loading configuration file {} from cache at {}".format(config_file, resolved_config_file))
return config_dict, kwargs
@classmethod
def from_dict(cls, config_dict: Dict[str, Any], **kwargs) -> "PretrainedConfig":
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
config = cls(**config_dict)
if hasattr(config, "pruned_heads"):
config.pruned_heads = dict((int(key), value) for key, value in config.pruned_heads.items())
# Update config with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if hasattr(config, key):
setattr(config, key, value)
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
logger.info("Model config %s", str(config))
if return_unused_kwargs:
return config, kwargs
else:
return config
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "PretrainedConfig":
config_dict = cls._dict_from_json_file(json_file)
return cls(**config_dict)
@classmethod
def _dict_from_json_file(cls, json_file: Union[str, os.PathLike]):
with open(json_file, "r", encoding="utf-8") as reader:
text = reader.read()
return json.loads(text)
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{} {}".format(self.__class__.__name__, self.to_json_string())
def to_diff_dict(self) -> Dict[str, Any]:
config_dict = self.to_dict()
# get the default config dict
default_config_dict = PretrainedConfig().to_dict()
# get class specific config dict
class_config_dict = self.__class__().to_dict() if not self.is_composition else {}
serializable_config_dict = {}
# only serialize values that differ from the default config
for key, value in config_dict.items():
if (
key not in default_config_dict
or key == "transformers_version"
or value != default_config_dict[key]
or (key in class_config_dict and value != class_config_dict[key])
):
serializable_config_dict[key] = value
return serializable_config_dict
def to_dict(self) -> Dict[str, Any]:
output = copy.deepcopy(self.__dict__)
if hasattr(self.__class__, "model_type"):
output["model_type"] = self.__class__.model_type
# Transformers version when serializing the model
output["transformers_version"] = __version__
return output
def to_json_string(self, use_diff: bool = True) -> str:
if use_diff is True:
config_dict = self.to_diff_dict()
else:
config_dict = self.to_dict()
return json.dumps(config_dict, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike], use_diff: bool = True):
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string(use_diff=use_diff))
def update(self, config_dict: Dict[str, Any]):
for key, value in config_dict.items():
setattr(self, key, value)
| true
| true
|
1c42908df0243e91686259f79b9018afec52a52a
| 828
|
py
|
Python
|
tests/regression/failing_link_to_playlist.py
|
EdwardBetts/pyspotify
|
851c622e814dee59362d27d978251bd3891c59f7
|
[
"Apache-2.0"
] | 2
|
2016-11-18T08:49:26.000Z
|
2018-05-14T13:27:19.000Z
|
tests/regression/failing_link_to_playlist.py
|
spankders/pyspotify
|
b18ac0c72771e6c3418f0d57b775ae5c6e1ab44e
|
[
"Apache-2.0"
] | null | null | null |
tests/regression/failing_link_to_playlist.py
|
spankders/pyspotify
|
b18ac0c72771e6c3418f0d57b775ae5c6e1ab44e
|
[
"Apache-2.0"
] | 1
|
2021-06-24T15:57:12.000Z
|
2021-06-24T15:57:12.000Z
|
# TODO This example should work, but fails to get the URIs of the playlists.
from __future__ import print_function
import logging
import time
import spotify
logging.basicConfig(level=logging.INFO)
# Assuming a spotify_appkey.key in the current dir:
session = spotify.Session()
# Assuming a previous login with remember_me=True and a proper logout:
session.relogin()
while session.connection.state != spotify.ConnectionState.LOGGED_IN:
session.process_events()
user = session.get_user('spotify:user:p3.no').load()
user.published_playlists.load()
time.sleep(10)
session.process_events()
print('%d playlists found' % len(user.published_playlists))
for playlist in user.published_playlists:
playlist.load()
print('Loaded', playlist)
print(user.published_playlists)
session.logout()
session.process_events()
| 22.378378
| 76
| 0.781401
|
from __future__ import print_function
import logging
import time
import spotify
logging.basicConfig(level=logging.INFO)
session = spotify.Session()
session.relogin()
while session.connection.state != spotify.ConnectionState.LOGGED_IN:
session.process_events()
user = session.get_user('spotify:user:p3.no').load()
user.published_playlists.load()
time.sleep(10)
session.process_events()
print('%d playlists found' % len(user.published_playlists))
for playlist in user.published_playlists:
playlist.load()
print('Loaded', playlist)
print(user.published_playlists)
session.logout()
session.process_events()
| true
| true
|
1c4291004131394ab0fba2226f14363deb904bba
| 618
|
py
|
Python
|
Classes/Endereco_Classe.py
|
felipemamore/SalaGreen-Felipe
|
73664b77fdd09400e9129364308dded0fc15234d
|
[
"Apache-2.0"
] | null | null | null |
Classes/Endereco_Classe.py
|
felipemamore/SalaGreen-Felipe
|
73664b77fdd09400e9129364308dded0fc15234d
|
[
"Apache-2.0"
] | null | null | null |
Classes/Endereco_Classe.py
|
felipemamore/SalaGreen-Felipe
|
73664b77fdd09400e9129364308dded0fc15234d
|
[
"Apache-2.0"
] | null | null | null |
class Endereco:
def __init__(self) -> None:
self.__rua = ''
self.__bairro = ''
self.__cidade = ''
def getRua(self)-> str:
return self.__rua
def getBairro(self)-> str:
return self.__bairro
def getCidade(self)-> str:
return self.__cidade
def setRua(self, rua) -> None:
if type(rua) == str:
self.rua = rua
def setBairro(self, bairro) -> None:
if type(bairro) == str:
self.__bairro = bairro
def setCidade(self, cidade) -> None:
if type(cidade) == str:
self.__cidade == cidade
| 19.935484
| 40
| 0.533981
|
class Endereco:
def __init__(self) -> None:
self.__rua = ''
self.__bairro = ''
self.__cidade = ''
def getRua(self)-> str:
return self.__rua
def getBairro(self)-> str:
return self.__bairro
def getCidade(self)-> str:
return self.__cidade
def setRua(self, rua) -> None:
if type(rua) == str:
self.rua = rua
def setBairro(self, bairro) -> None:
if type(bairro) == str:
self.__bairro = bairro
def setCidade(self, cidade) -> None:
if type(cidade) == str:
self.__cidade == cidade
| true
| true
|
1c429124c9b888185020e023d92debd615d5a131
| 840
|
py
|
Python
|
wagtailformblocks/forms.py
|
LUKKIEN/wagtailformblocks
|
d38dc6d42157eedeca25ed461873c00872bbf54d
|
[
"BSD-3-Clause"
] | 16
|
2016-08-26T12:59:47.000Z
|
2022-03-23T08:11:08.000Z
|
wagtailformblocks/forms.py
|
LUKKIEN/wagtailformblocks
|
d38dc6d42157eedeca25ed461873c00872bbf54d
|
[
"BSD-3-Clause"
] | 10
|
2016-09-01T06:58:41.000Z
|
2020-05-08T13:47:00.000Z
|
wagtailformblocks/forms.py
|
LUKKIEN/wagtailformblocks
|
d38dc6d42157eedeca25ed461873c00872bbf54d
|
[
"BSD-3-Clause"
] | 10
|
2016-09-09T22:14:02.000Z
|
2021-10-05T16:15:44.000Z
|
from captcha.fields import ReCaptchaField
from wagtail.contrib.forms.forms import FormBuilder as OrigFormBuilder
from wagtailformblocks.utils.conf import (get_formblocks_setting,
recaptcha_enabled)
class FormBuilder(OrigFormBuilder):
def __init__(self, fields, **kwargs):
self.add_recaptcha = kwargs.pop('add_recaptcha')
super(FormBuilder, self).__init__(fields)
@property
def recaptcha_enabled(self):
return self.add_recaptcha and recaptcha_enabled()
@property
def formfields(self):
formfields = super(FormBuilder, self).formfields
if self.recaptcha_enabled:
recaptcha_attrs = get_formblocks_setting('RECAPTCHA_ATTRS')
formfields['recaptcha'] = ReCaptchaField(**recaptcha_attrs)
return formfields
| 31.111111
| 71
| 0.695238
|
from captcha.fields import ReCaptchaField
from wagtail.contrib.forms.forms import FormBuilder as OrigFormBuilder
from wagtailformblocks.utils.conf import (get_formblocks_setting,
recaptcha_enabled)
class FormBuilder(OrigFormBuilder):
def __init__(self, fields, **kwargs):
self.add_recaptcha = kwargs.pop('add_recaptcha')
super(FormBuilder, self).__init__(fields)
@property
def recaptcha_enabled(self):
return self.add_recaptcha and recaptcha_enabled()
@property
def formfields(self):
formfields = super(FormBuilder, self).formfields
if self.recaptcha_enabled:
recaptcha_attrs = get_formblocks_setting('RECAPTCHA_ATTRS')
formfields['recaptcha'] = ReCaptchaField(**recaptcha_attrs)
return formfields
| true
| true
|
1c4292daca769d18c98cc09276b509285319627b
| 562
|
py
|
Python
|
neoload/neoload_cli_lib/cli_exception.py
|
stephanemartin/neoload-cli
|
aa128aad9a446e94d5700e8a25b674397d633e1a
|
[
"BSD-2-Clause"
] | 9
|
2020-06-01T14:28:37.000Z
|
2022-03-06T23:21:09.000Z
|
neoload/neoload_cli_lib/cli_exception.py
|
stephanemartin/neoload-cli
|
aa128aad9a446e94d5700e8a25b674397d633e1a
|
[
"BSD-2-Clause"
] | 97
|
2019-12-06T23:52:19.000Z
|
2022-02-11T14:22:07.000Z
|
neoload/neoload_cli_lib/cli_exception.py
|
stephanemartin/neoload-cli
|
aa128aad9a446e94d5700e8a25b674397d633e1a
|
[
"BSD-2-Clause"
] | 23
|
2020-03-24T18:38:58.000Z
|
2022-03-04T16:09:23.000Z
|
import traceback
import click
_CliException__debug = False
class CliException(click.ClickException):
__debug = False
@staticmethod
def set_debug(boolean: bool):
CliException.__debug = boolean
def __init__(self, message):
super().__init__(message)
def format_message(self):
__message = super().format_message()
if CliException.__debug:
__message = traceback.format_exc() + "\n\n" + __message
return __message
@staticmethod
def is_debug():
return CliException.__debug
| 20.814815
| 67
| 0.66548
|
import traceback
import click
_CliException__debug = False
class CliException(click.ClickException):
__debug = False
@staticmethod
def set_debug(boolean: bool):
CliException.__debug = boolean
def __init__(self, message):
super().__init__(message)
def format_message(self):
__message = super().format_message()
if CliException.__debug:
__message = traceback.format_exc() + "\n\n" + __message
return __message
@staticmethod
def is_debug():
return CliException.__debug
| true
| true
|
1c429356562f114ce6582ad14d6176986c1d9301
| 2,714
|
py
|
Python
|
tests/loris/transcoders/abstract_jp2_transcoder_tests.py
|
jpstroop/loris-redux
|
b5db56d5a250fdb24486afe01bad55b81761701a
|
[
"BSD-2-Clause"
] | 7
|
2016-08-09T17:39:05.000Z
|
2016-09-26T19:37:30.000Z
|
tests/loris/transcoders/abstract_jp2_transcoder_tests.py
|
jpstroop/loris-redux
|
b5db56d5a250fdb24486afe01bad55b81761701a
|
[
"BSD-2-Clause"
] | 183
|
2016-06-02T22:07:05.000Z
|
2022-03-11T23:23:01.000Z
|
tests/loris/transcoders/abstract_jp2_transcoder_tests.py
|
jpstroop/loris-redux
|
b5db56d5a250fdb24486afe01bad55b81761701a
|
[
"BSD-2-Clause"
] | 1
|
2016-08-09T17:39:11.000Z
|
2016-08-09T17:39:11.000Z
|
from loris.transcoders.abstract_jp2_transcoder import AbstractJp2Transcoder
from unittest.mock import Mock
import os
import stat
class TestAbstractJp2Transcoder(object):
def test__named_pipe(self):
class ActualJp2Transcoder(AbstractJp2Transcoder):
def _build_command(self, image_request, fifo_path):
pass
transcoder = ActualJp2Transcoder({})
with transcoder._named_pipe() as pipe_path:
fifo_path = transcoder._named_pipe()
assert stat.S_ISFIFO(os.stat(pipe_path).st_mode)
assert pipe_path.endswith(".bmp")
assert not os.path.exists(pipe_path)
def test__get_closest_scale(self):
req_w = 300
req_h = 400
full_w = 6000
full_h = 8000
scales = [1, 2, 4, 8, 16, 32, 64]
meth = AbstractJp2Transcoder._get_closest_scale
assert meth(req_w, req_h, full_w, full_h, scales) == 16
def test__get_closest_scale_empty_returns_1(self):
req_w = 300
req_h = 400
full_w = 6000
full_h = 8000
scales = []
meth = AbstractJp2Transcoder._get_closest_scale
assert meth(req_w, req_h, full_w, full_h, scales) == 1
def test__scales_to_reduce_arg(self):
req_w = 300
req_h = 400
full_w = 5999
full_h = 7600
scales = [1, 2, 4, 8, 16, 32, 64]
meth = AbstractJp2Transcoder._scales_to_reduce_arg
assert meth(req_w, req_h, full_w, full_h, scales) == "4"
def test__scales_to_reduce_arg_no_scales_returns_0(self):
req_w = 300
req_h = 400
full_w = 5999
full_h = 7600
scales = []
meth = AbstractJp2Transcoder._scales_to_reduce_arg
assert meth(req_w, req_h, full_w, full_h, scales) == "0"
def test_reduce_arg_from_image_request_portrait(self):
# Mock
info = Mock(width=5000, height=6500, all_scales=[1, 2, 4, 8, 16, 32, 64])
args = {
"width": 250, # can discard 4 (312px)
"height": 400, # can discard 4 (407px)
"info": info,
}
image_request = Mock(**args)
meth = AbstractJp2Transcoder.reduce_arg_from_image_request
assert meth(image_request) == "4"
def test_reduce_arg_from_image_request_landscape(self):
# Mock
info = Mock(width=7200, height=4128, all_scales=[1, 2, 4, 8, 16, 32, 64])
args = {
"width": 200, # can discard 5 (225px)
"height": 118, # can discard 5 (129px)
"info": info,
}
image_request = Mock(**args)
meth = AbstractJp2Transcoder.reduce_arg_from_image_request
assert meth(image_request) == "5"
| 34.35443
| 81
| 0.613117
|
from loris.transcoders.abstract_jp2_transcoder import AbstractJp2Transcoder
from unittest.mock import Mock
import os
import stat
class TestAbstractJp2Transcoder(object):
def test__named_pipe(self):
class ActualJp2Transcoder(AbstractJp2Transcoder):
def _build_command(self, image_request, fifo_path):
pass
transcoder = ActualJp2Transcoder({})
with transcoder._named_pipe() as pipe_path:
fifo_path = transcoder._named_pipe()
assert stat.S_ISFIFO(os.stat(pipe_path).st_mode)
assert pipe_path.endswith(".bmp")
assert not os.path.exists(pipe_path)
def test__get_closest_scale(self):
req_w = 300
req_h = 400
full_w = 6000
full_h = 8000
scales = [1, 2, 4, 8, 16, 32, 64]
meth = AbstractJp2Transcoder._get_closest_scale
assert meth(req_w, req_h, full_w, full_h, scales) == 16
def test__get_closest_scale_empty_returns_1(self):
req_w = 300
req_h = 400
full_w = 6000
full_h = 8000
scales = []
meth = AbstractJp2Transcoder._get_closest_scale
assert meth(req_w, req_h, full_w, full_h, scales) == 1
def test__scales_to_reduce_arg(self):
req_w = 300
req_h = 400
full_w = 5999
full_h = 7600
scales = [1, 2, 4, 8, 16, 32, 64]
meth = AbstractJp2Transcoder._scales_to_reduce_arg
assert meth(req_w, req_h, full_w, full_h, scales) == "4"
def test__scales_to_reduce_arg_no_scales_returns_0(self):
req_w = 300
req_h = 400
full_w = 5999
full_h = 7600
scales = []
meth = AbstractJp2Transcoder._scales_to_reduce_arg
assert meth(req_w, req_h, full_w, full_h, scales) == "0"
def test_reduce_arg_from_image_request_portrait(self):
info = Mock(width=5000, height=6500, all_scales=[1, 2, 4, 8, 16, 32, 64])
args = {
"width": 250,
"height": 400,
"info": info,
}
image_request = Mock(**args)
meth = AbstractJp2Transcoder.reduce_arg_from_image_request
assert meth(image_request) == "4"
def test_reduce_arg_from_image_request_landscape(self):
info = Mock(width=7200, height=4128, all_scales=[1, 2, 4, 8, 16, 32, 64])
args = {
"width": 200,
"height": 118,
"info": info,
}
image_request = Mock(**args)
meth = AbstractJp2Transcoder.reduce_arg_from_image_request
assert meth(image_request) == "5"
| true
| true
|
1c429436ef5996742d357dcc02e199509abf1571
| 6,777
|
py
|
Python
|
fixit/common/utils.py
|
lpetre/Fixit
|
f2e5c080cde2febff194f3a29074d51a29667d10
|
[
"Apache-2.0"
] | null | null | null |
fixit/common/utils.py
|
lpetre/Fixit
|
f2e5c080cde2febff194f3a29074d51a29667d10
|
[
"Apache-2.0"
] | 1
|
2022-03-16T19:17:23.000Z
|
2022-03-16T19:17:23.000Z
|
fixit/common/utils.py
|
isabella232/Fixit-1
|
f2e5c080cde2febff194f3a29074d51a29667d10
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import importlib
import inspect
import json
import pkgutil
import re
import textwrap
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
from typing import cast, Dict, List, Optional, Set, Type, Union
import libcst as cst
from libcst._add_slots import add_slots
from libcst.metadata import BaseMetadataProvider, MetadataWrapper, TypeInferenceProvider
from libcst.metadata.type_inference_provider import PyreData
from fixit.common.base import CstLintRule, LintConfig, LintRuleT
from fixit.common.pseudo_rule import PseudoLintRule
def _dedent(src: str) -> str:
src = re.sub(r"\A\n", "", src)
return textwrap.dedent(src)
def dedent_with_lstrip(src: str) -> str:
src = textwrap.dedent(src)
if src.startswith("\n"):
return "".join(src[1:])
return src
def _str_or_any(value: Optional[int]) -> str:
return "<any>" if value is None else str(value)
class DuplicateLintRuleNameError(Exception):
pass
class FixtureFileNotFoundError(Exception):
pass
class LintRuleNotFoundError(Exception):
pass
LintRuleCollectionT = Set[Union[Type[CstLintRule], Type[PseudoLintRule]]]
DEFAULT_FILENAME: str = "not/a/real/file/path.py"
DEFAULT_CONFIG: LintConfig = LintConfig(
repo_root=str(
Path(__file__).parent.parent
), # Set base config repo_root to `fixit` directory for testing.
)
@add_slots
@dataclass(frozen=True)
class ValidTestCase:
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@add_slots
@dataclass(frozen=True)
class InvalidTestCase:
code: str
kind: Optional[str] = None
line: Optional[int] = None
column: Optional[int] = None
expected_replacement: Optional[str] = None
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
expected_message: Optional[str] = None
@property
def expected_str(self) -> str:
return f"{_str_or_any(self.line)}:{_str_or_any(self.column)}: {self.kind} ..."
def import_submodules(package: str, recursive: bool = True) -> Dict[str, ModuleType]:
"""Import all submodules of a module, recursively, including subpackages."""
# pyre-fixme[35]: Target cannot be annotated.
package: ModuleType = importlib.import_module(package)
results = {}
# pyre-fixme[16]: `ModuleType` has no attribute `__path__`.
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except ModuleNotFoundError:
pass
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
def import_distinct_rules_from_package(
package: str,
block_list_rules: List[str] = [],
seen_names: Optional[Set[str]] = None,
allow_list_rules: Optional[List[str]] = None,
) -> LintRuleCollectionT:
# Import all rules from the specified package, omitting rules that appear in the block list.
# Raises error on repeated rule names.
# Optional parameter `seen_names` accepts set of names that should not occur in this package.
rules: LintRuleCollectionT = set()
if seen_names is None:
# pyre-fixme[35]: Target cannot be annotated.
seen_names: Set[str] = set()
for _module_name, module in import_submodules(package).items():
for name in dir(module):
try:
obj = getattr(module, name)
if (
obj is not CstLintRule
and (
issubclass(obj, CstLintRule) or issubclass(obj, PseudoLintRule)
)
and not inspect.isabstract(obj)
):
if name in seen_names:
raise DuplicateLintRuleNameError(
f"Lint rule name {name} is duplicated."
)
# Add all names (even block-listed ones) to the `names` set for duplicate checking.
seen_names.add(name)
# For backwards compatibility if `allow_list_rules` is missing fall back to all allowed
if not allow_list_rules or name in allow_list_rules:
if name not in block_list_rules:
rules.add(obj)
except TypeError:
continue
return rules
def gen_type_inference_wrapper(code: str, pyre_fixture_path: Path) -> MetadataWrapper:
# Given test case source code and a path to a pyre fixture file, generate a MetadataWrapper for a lint rule test case.
module = cst.parse_module(_dedent(code))
provider_type = TypeInferenceProvider
try:
pyre_json_data: PyreData = json.loads(pyre_fixture_path.read_text())
except FileNotFoundError as e:
raise FixtureFileNotFoundError(
f"Fixture file not found at {e.filename}. "
+ "Please run `python -m fixit.common.generate_pyre_fixtures <rule>` to generate fixtures."
)
return MetadataWrapper(
module=module,
cache={cast(Type[BaseMetadataProvider[object]], provider_type): pyre_json_data},
)
def import_rule_from_package(
package_name: str, rule_class_name: str
) -> Optional[LintRuleT]:
# Imports the first rule with matching class name found in specified package.
rule: Optional[LintRuleT] = None
package = importlib.import_module(package_name)
for _loader, name, is_pkg in pkgutil.walk_packages(
getattr(package, "__path__", None)
):
full_package_or_module_name = package.__name__ + "." + name
try:
module = importlib.import_module(full_package_or_module_name)
rule = getattr(module, rule_class_name, None)
except ModuleNotFoundError:
pass
if is_pkg:
rule = import_rule_from_package(
full_package_or_module_name, rule_class_name
)
if rule is not None:
# Stop early if we have found the rule.
return rule
return rule
def find_and_import_rule(rule_class_name: str, packages: List[str]) -> LintRuleT:
for package in packages:
imported_rule = import_rule_from_package(package, rule_class_name)
if imported_rule is not None:
return imported_rule
# If we get here, the rule was not found.
raise LintRuleNotFoundError(
f"Could not find lint rule {rule_class_name} in the following packages: \n"
+ "\n".join(packages)
)
| 34.227273
| 122
| 0.665486
|
import importlib
import inspect
import json
import pkgutil
import re
import textwrap
from dataclasses import dataclass
from pathlib import Path
from types import ModuleType
from typing import cast, Dict, List, Optional, Set, Type, Union
import libcst as cst
from libcst._add_slots import add_slots
from libcst.metadata import BaseMetadataProvider, MetadataWrapper, TypeInferenceProvider
from libcst.metadata.type_inference_provider import PyreData
from fixit.common.base import CstLintRule, LintConfig, LintRuleT
from fixit.common.pseudo_rule import PseudoLintRule
def _dedent(src: str) -> str:
src = re.sub(r"\A\n", "", src)
return textwrap.dedent(src)
def dedent_with_lstrip(src: str) -> str:
src = textwrap.dedent(src)
if src.startswith("\n"):
return "".join(src[1:])
return src
def _str_or_any(value: Optional[int]) -> str:
return "<any>" if value is None else str(value)
class DuplicateLintRuleNameError(Exception):
pass
class FixtureFileNotFoundError(Exception):
pass
class LintRuleNotFoundError(Exception):
pass
LintRuleCollectionT = Set[Union[Type[CstLintRule], Type[PseudoLintRule]]]
DEFAULT_FILENAME: str = "not/a/real/file/path.py"
DEFAULT_CONFIG: LintConfig = LintConfig(
repo_root=str(
Path(__file__).parent.parent
),
)
@add_slots
@dataclass(frozen=True)
class ValidTestCase:
code: str
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
@add_slots
@dataclass(frozen=True)
class InvalidTestCase:
code: str
kind: Optional[str] = None
line: Optional[int] = None
column: Optional[int] = None
expected_replacement: Optional[str] = None
filename: str = DEFAULT_FILENAME
config: LintConfig = DEFAULT_CONFIG
expected_message: Optional[str] = None
@property
def expected_str(self) -> str:
return f"{_str_or_any(self.line)}:{_str_or_any(self.column)}: {self.kind} ..."
def import_submodules(package: str, recursive: bool = True) -> Dict[str, ModuleType]:
package: ModuleType = importlib.import_module(package)
results = {}
for _loader, name, is_pkg in pkgutil.walk_packages(package.__path__):
full_name = package.__name__ + "." + name
try:
results[full_name] = importlib.import_module(full_name)
except ModuleNotFoundError:
pass
if recursive and is_pkg:
results.update(import_submodules(full_name))
return results
def import_distinct_rules_from_package(
package: str,
block_list_rules: List[str] = [],
seen_names: Optional[Set[str]] = None,
allow_list_rules: Optional[List[str]] = None,
) -> LintRuleCollectionT:
rules: LintRuleCollectionT = set()
if seen_names is None:
seen_names: Set[str] = set()
for _module_name, module in import_submodules(package).items():
for name in dir(module):
try:
obj = getattr(module, name)
if (
obj is not CstLintRule
and (
issubclass(obj, CstLintRule) or issubclass(obj, PseudoLintRule)
)
and not inspect.isabstract(obj)
):
if name in seen_names:
raise DuplicateLintRuleNameError(
f"Lint rule name {name} is duplicated."
)
seen_names.add(name)
if not allow_list_rules or name in allow_list_rules:
if name not in block_list_rules:
rules.add(obj)
except TypeError:
continue
return rules
def gen_type_inference_wrapper(code: str, pyre_fixture_path: Path) -> MetadataWrapper:
module = cst.parse_module(_dedent(code))
provider_type = TypeInferenceProvider
try:
pyre_json_data: PyreData = json.loads(pyre_fixture_path.read_text())
except FileNotFoundError as e:
raise FixtureFileNotFoundError(
f"Fixture file not found at {e.filename}. "
+ "Please run `python -m fixit.common.generate_pyre_fixtures <rule>` to generate fixtures."
)
return MetadataWrapper(
module=module,
cache={cast(Type[BaseMetadataProvider[object]], provider_type): pyre_json_data},
)
def import_rule_from_package(
package_name: str, rule_class_name: str
) -> Optional[LintRuleT]:
rule: Optional[LintRuleT] = None
package = importlib.import_module(package_name)
for _loader, name, is_pkg in pkgutil.walk_packages(
getattr(package, "__path__", None)
):
full_package_or_module_name = package.__name__ + "." + name
try:
module = importlib.import_module(full_package_or_module_name)
rule = getattr(module, rule_class_name, None)
except ModuleNotFoundError:
pass
if is_pkg:
rule = import_rule_from_package(
full_package_or_module_name, rule_class_name
)
if rule is not None:
return rule
return rule
def find_and_import_rule(rule_class_name: str, packages: List[str]) -> LintRuleT:
for package in packages:
imported_rule = import_rule_from_package(package, rule_class_name)
if imported_rule is not None:
return imported_rule
raise LintRuleNotFoundError(
f"Could not find lint rule {rule_class_name} in the following packages: \n"
+ "\n".join(packages)
)
| true
| true
|
1c4294ab8693747f0c625c1eb7d5e3d646153131
| 3,072
|
py
|
Python
|
examples/basic_operations/pause_ad.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/pause_ad.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
examples/basic_operations/pause_ad.py
|
jiulongw/google-ads-python
|
6f5256eb1eeb5a9a95c8cdb9b97988d3a676282e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This example pauses an ad."""
import argparse
import sys
from google.api_core import protobuf_helpers
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.util import ResourceName
def main(client, customer_id, ad_group_id, ad_id):
ad_group_ad_service = client.get_service('AdGroupAdService', version='v2')
ad_group_ad_operation = client.get_type('AdGroupAdOperation', version='v2')
ad_group_ad = ad_group_ad_operation.update
ad_group_ad.resource_name = ad_group_ad_service.ad_group_ad_path(
customer_id, ResourceName.format_composite(ad_group_id, ad_id))
ad_group_ad.status = client.get_type('AdGroupStatusEnum',
version='v2').PAUSED
fm = protobuf_helpers.field_mask(None, ad_group_ad)
ad_group_ad_operation.update_mask.CopyFrom(fm)
try:
ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(
customer_id, [ad_group_ad_operation])
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Paused ad group ad %s.'
% ad_group_ad_response.results[0].resource_name)
if __name__ == '__main__':
# GoogleAdsClient will read the google-ads.yaml configuration file in the
# home directory if none is specified.
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=('Pauses an ad in the specified customer\'s ad group.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The ad group ID.')
parser.add_argument('-i', '--ad_id', type=str, required=True,
help='The ad ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id, args.ad_id)
| 42.666667
| 79
| 0.696615
|
import argparse
import sys
from google.api_core import protobuf_helpers
from google.ads.google_ads.client import GoogleAdsClient
from google.ads.google_ads.util import ResourceName
def main(client, customer_id, ad_group_id, ad_id):
ad_group_ad_service = client.get_service('AdGroupAdService', version='v2')
ad_group_ad_operation = client.get_type('AdGroupAdOperation', version='v2')
ad_group_ad = ad_group_ad_operation.update
ad_group_ad.resource_name = ad_group_ad_service.ad_group_ad_path(
customer_id, ResourceName.format_composite(ad_group_id, ad_id))
ad_group_ad.status = client.get_type('AdGroupStatusEnum',
version='v2').PAUSED
fm = protobuf_helpers.field_mask(None, ad_group_ad)
ad_group_ad_operation.update_mask.CopyFrom(fm)
try:
ad_group_ad_response = ad_group_ad_service.mutate_ad_group_ads(
customer_id, [ad_group_ad_operation])
except google.ads.google_ads.errors.GoogleAdsException as ex:
print('Request with ID "%s" failed with status "%s" and includes the '
'following errors:' % (ex.request_id, ex.error.code().name))
for error in ex.failure.errors:
print('\tError with message "%s".' % error.message)
if error.location:
for field_path_element in error.location.field_path_elements:
print('\t\tOn field: %s' % field_path_element.field_name)
sys.exit(1)
print('Paused ad group ad %s.'
% ad_group_ad_response.results[0].resource_name)
if __name__ == '__main__':
google_ads_client = GoogleAdsClient.load_from_storage()
parser = argparse.ArgumentParser(
description=('Pauses an ad in the specified customer\'s ad group.'))
# The following argument(s) should be provided to run the example.
parser.add_argument('-c', '--customer_id', type=str,
required=True, help='The Google Ads customer ID.')
parser.add_argument('-a', '--ad_group_id', type=str,
required=True, help='The ad group ID.')
parser.add_argument('-i', '--ad_id', type=str, required=True,
help='The ad ID.')
args = parser.parse_args()
main(google_ads_client, args.customer_id, args.ad_group_id, args.ad_id)
| true
| true
|
1c42955367038b75ae9e6a93fc585d5e3fda38ec
| 385
|
py
|
Python
|
bahasa/stemmer/disambiguator/prefixes/rule_27.py
|
kangfend/bahasa
|
11f64d4116146dd238c1f2a38146ccf87443e522
|
[
"MIT"
] | 9
|
2016-10-04T10:09:37.000Z
|
2021-11-15T03:48:11.000Z
|
bahasa/stemmer/disambiguator/prefixes/rule_27.py
|
kangfend/bahasa
|
11f64d4116146dd238c1f2a38146ccf87443e522
|
[
"MIT"
] | null | null | null |
bahasa/stemmer/disambiguator/prefixes/rule_27.py
|
kangfend/bahasa
|
11f64d4116146dd238c1f2a38146ccf87443e522
|
[
"MIT"
] | 5
|
2016-09-19T06:37:48.000Z
|
2021-02-20T06:51:50.000Z
|
import re
class Rule27(object):
"""Disambiguate Prefix Rule 27
Rule 27 : pen{c|d|j|z} -> pen-{c|d|j|z}
"""
def disambiguate(self, word):
"""Disambiguate Prefix Rule 27
Rule 27 : pen{c|d|j|z} -> pen-{c|d|j|z}
"""
matches = re.match(r'^pen([cdjz])(.*)$', word)
if matches:
return matches.group(1) + matches.group(2)
| 24.0625
| 54
| 0.527273
|
import re
class Rule27(object):
def disambiguate(self, word):
matches = re.match(r'^pen([cdjz])(.*)$', word)
if matches:
return matches.group(1) + matches.group(2)
| true
| true
|
1c4295c5e9fde962daa9ec269cfdcd10d797ac33
| 11,795
|
py
|
Python
|
train_sage_EL2N_increment.py
|
DAIZHENWEI/FastGCN_pytorch
|
87efe350d5acbe517a0642e9862ac9676b55c053
|
[
"MIT"
] | null | null | null |
train_sage_EL2N_increment.py
|
DAIZHENWEI/FastGCN_pytorch
|
87efe350d5acbe517a0642e9862ac9676b55c053
|
[
"MIT"
] | null | null | null |
train_sage_EL2N_increment.py
|
DAIZHENWEI/FastGCN_pytorch
|
87efe350d5acbe517a0642e9862ac9676b55c053
|
[
"MIT"
] | null | null | null |
import argparse
import time
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
import dgl
import torch.nn as nn
import dgl.nn.pytorch as dglnn
import numpy as np
import pdb
import tqdm
from scipy.sparse.linalg import norm as sparse_norm
from utils import get_batches, accuracy, Entropy_loss
from utils import sparse_mx_to_torch_sparse_tensor
from utils_sage import load_data
from models import SAGE
import pdb
def compute_acc(pred, labels):
"""
Compute the accuracy of prediction given the labels.
"""
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, nfeat, labels, val_nid, test_nid, device):
"""
Evaluate the model on the validation set specified by ``val_mask``.
g : The entire graph.
inputs : The features of all the nodes.
labels : The labels of all the nodes.
val_mask : A 0-1 mask indicating which nodes do we actually compute the accuracy for.
device : The GPU device to evaluate on.
"""
model.eval()
with torch.no_grad():
pred = model.inference(g, nfeat, device, args)
model.train()
return compute_acc(pred[test_nid], labels[test_nid]), pred
def load_subtensor(nfeat, labels, seeds, input_nodes):
"""
Extracts features and labels for a set of nodes.
"""
batch_inputs = nfeat[input_nodes]
batch_labels = labels[seeds]
return batch_inputs, batch_labels
#### Entry point
def run(args, model, device, data, epochs, get_embed = False):
# Unpack data
train_nid, val_nid, test_nid, in_feats, labels, n_classes, nfeat, g = data
# Create PyTorch DataLoader for constructing blocks
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.NodeDataLoader(
g,
train_nid,
sampler,
batch_size=args.batchsize,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
# Training loop
avg = 0
iter_tput = []
best_eval_acc = 0
best_test_acc = 0
test_acc_list = []
for epoch in range(epochs):
tic = time.time()
# Loop over the dataloader to sample the computation dependency graph as a list of
# blocks.
for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
tic_step = time.time()
# copy block to gpu
blocks = [blk.int().to(device) for blk in blocks]
# Load the input features as well as output labels
batch_inputs, batch_labels = load_subtensor(nfeat, labels, seeds, input_nodes)
# Compute loss and prediction
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % args.log_every == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = torch.cuda.max_memory_allocated() / 1000000 if torch.cuda.is_available() else 0
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
# print('Number of steps per epochs: {}'.format(step+1))
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0:
test_acc, pred = evaluate(model, g, nfeat, labels, val_nid, test_nid, device)
if args.save_pred:
np.savetxt(args.save_pred + '%02d' % epoch, pred.argmax(1).cpu().numpy(), '%d')
# print('Eval Acc {:.4f}'.format(eval_acc))
# if eval_acc > best_eval_acc:
# best_eval_acc = eval_acc
# best_test_acc = test_acc
print('Test Acc {:.4f}'.format(test_acc))
test_acc_list += [test_acc.cpu().numpy()]
# print('Avg epoch time: {}'.format(avg / (epoch - 4)))
if get_embed:
return test_acc_list, pred
return test_acc_list
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--dataset', type=str, default='cora', help='dataset name.')
argparser.add_argument('--gpu', type=int, default=0,
help="GPU device ID. Use -1 for CPU training")
argparser.add_argument('--pre_train_epochs', type=int, default=100, help='Number of pre-training epochs.')
argparser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to train.')
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--fan-out', type=str, default='5,5,5')
argparser.add_argument('--val-batch-size', type=int, default=10000)
argparser.add_argument('--log-every', type=int, default=10)
argparser.add_argument('--eval-every', type=int, default=1)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--hidden', type=int, default=64, help='Number of hidden units.')
argparser.add_argument('--batchsize', type=int, default=256,
help='batchsize for train')
argparser.add_argument('--sample_freq', type=int, default=20, help = 'frequnecy of resampling nodes with large uncertainties')
argparser.add_argument('--dropout', type=float, default=0.0)
argparser.add_argument('--seed', type=int, default=123, help='Random seed.')
argparser.add_argument('--num-workers', type=int, default=4, help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument('--pretrain_ratio', type=float, default=0.05,
help='Proportion of samples used for training')
argparser.add_argument('--ratio', type=float, default=0.01,
help='Proportion of samples picked at each round')
argparser.add_argument('--save-pred', type=str, default='')
argparser.add_argument('--wd', type=float, default=0)
argparser.add_argument('--remove_degree_one', action='store_true', default=False,
help='Recursively remove the nodes with degree one from the adjacency matrix (remove corresponding edges).')
args = argparser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu != -1:
torch.cuda.manual_seed(args.seed)
if args.gpu >= 0:
device = torch.device('cuda:%d' % args.gpu)
else:
device = torch.device('cpu')
# load ogbn-products data
train_index, valid_index, test_index, in_feats, labels, n_classes, feats, graph, adj_train, _ = load_data(args.dataset, args)
y_train = labels[train_index]
y_train_onehot = F.one_hot(y_train, num_classes=n_classes)
# """ Pick the nodes with large laplacian norm"""
# col_norm = sparse_norm(adj_train, axis=0)
# train_probs = col_norm / np.sum(col_norm)
# # train_probs = torch.from_numpy(train_probs)
# train_index = train_index.numpy()
# num_sampled = int(len(train_index) * args.ratio)
# train_ind_sort = torch.from_numpy(train_index[train_probs.argsort()[::-1]])
# train_ind_sampled = train_ind_sort[:num_sampled]
# Define model and optimizer
model = SAGE(in_feats, args.hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(device)
loss_fcn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
num_train_sampled = []
""" randomly pick nodes """
train_index = train_index.numpy()
num_sampled = int(len(train_index) * args.pretrain_ratio)
train_ind_sampled = np.random.choice(train_index, num_sampled, replace = False)
labels = labels.to(device)
feats = feats.to(device)
data = train_ind_sampled, valid_index, test_index, in_feats, labels, n_classes, feats, graph
""" pretraining model """
test_accs, pred = run(args, model, device, data, args.pre_train_epochs, get_embed=True)
test_acc_list = test_accs
num_train_sampled = [num_sampled] * args.pre_train_epochs
""" Continue training """
Entropy = Entropy_loss()
sample_freq = args.sample_freq
for epochs in range(0, (args.epochs - args.pre_train_epochs) // sample_freq):
y_pred = pred[train_index].detach().cpu()
EL2N = torch.norm((y_pred - y_train_onehot), dim = 1).numpy()
del pred
""" Pick nodes with large uncertainty """
num_sampled = int(len(train_index) * args.ratio)
train_index_sort = torch.from_numpy(train_index[EL2N.argsort()[::-1]])
train_ind_new = train_index_sort[:num_sampled]
train_ind_new_sampled = np.intersect1d(train_ind_new, train_ind_sampled)
train_ind_sampled = np.unique(np.concatenate((train_ind_sampled, train_ind_new)))
print("Number of newly picked samples: {}, Number of total selected samples: {}".format(len(train_ind_new), len(train_ind_sampled)))
print("Number of large entropy nodes already sampled: {}".format(len(train_ind_new_sampled)))
data = train_ind_sampled, valid_index, test_index, in_feats, labels, n_classes, feats, graph
test_accs, pred = run(args, model, device, data, sample_freq, get_embed=True)
test_acc_list += test_accs
num_train_sampled += [len(train_ind_sampled)] * sample_freq
directory = './save/{}/'.format(args.dataset)
if not os.path.exists(directory):
os.makedirs(directory)
# np.save('./save/GraphSage_accuracy_list_{}_active_laplacian_uncertain_epochs{}_pretrain{}_ratio{}.npy'.format(args.dataset,
# args.epochs,
# args.pre_train_epochs,
# args.ratio), test_acc_list)
np.save(directory + 'GraphSage_accuracy_list_{}_active_random_EL2N_increment_pretrain{}_epochs{}_pretrain_ratio{}_ratio{}_sample_freq{}.npy'.format(args.dataset,
args.pre_train_epochs,
args.epochs,
args.pretrain_ratio,
args.ratio,
args.sample_freq), test_acc_list)
np.save(directory + 'Sample_count_list_{}_active_random_EL2N_increment_pretrain{}_epochs{}_pretrain_ratio{}_ratio{}_sample_freq{}.npy'.format(args.dataset,
args.pre_train_epochs,
args.epochs,
args.pretrain_ratio,
args.ratio,
args.sample_freq), num_train_sampled)
| 48.539095
| 166
| 0.59432
|
import argparse
import time
import os
import torch
import torch.nn.functional as F
import torch.optim as optim
import dgl
import torch.nn as nn
import dgl.nn.pytorch as dglnn
import numpy as np
import pdb
import tqdm
from scipy.sparse.linalg import norm as sparse_norm
from utils import get_batches, accuracy, Entropy_loss
from utils import sparse_mx_to_torch_sparse_tensor
from utils_sage import load_data
from models import SAGE
import pdb
def compute_acc(pred, labels):
return (torch.argmax(pred, dim=1) == labels).float().sum() / len(pred)
def evaluate(model, g, nfeat, labels, val_nid, test_nid, device):
model.eval()
with torch.no_grad():
pred = model.inference(g, nfeat, device, args)
model.train()
return compute_acc(pred[test_nid], labels[test_nid]), pred
def load_subtensor(nfeat, labels, seeds, input_nodes):
batch_inputs = nfeat[input_nodes]
batch_labels = labels[seeds]
return batch_inputs, batch_labels
, get_embed = False):
train_nid, val_nid, test_nid, in_feats, labels, n_classes, nfeat, g = data
sampler = dgl.dataloading.MultiLayerNeighborSampler(
[int(fanout) for fanout in args.fan_out.split(',')])
dataloader = dgl.dataloading.NodeDataLoader(
g,
train_nid,
sampler,
batch_size=args.batchsize,
shuffle=True,
drop_last=False,
num_workers=args.num_workers)
avg = 0
iter_tput = []
best_eval_acc = 0
best_test_acc = 0
test_acc_list = []
for epoch in range(epochs):
tic = time.time()
for step, (input_nodes, seeds, blocks) in enumerate(dataloader):
tic_step = time.time()
blocks = [blk.int().to(device) for blk in blocks]
batch_inputs, batch_labels = load_subtensor(nfeat, labels, seeds, input_nodes)
batch_pred = model(blocks, batch_inputs)
loss = loss_fcn(batch_pred, batch_labels)
optimizer.zero_grad()
loss.backward()
optimizer.step()
iter_tput.append(len(seeds) / (time.time() - tic_step))
if step % args.log_every == 0:
acc = compute_acc(batch_pred, batch_labels)
gpu_mem_alloc = torch.cuda.max_memory_allocated() / 1000000 if torch.cuda.is_available() else 0
print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MB'.format(
epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))
toc = time.time()
print('Epoch Time(s): {:.4f}'.format(toc - tic))
if epoch >= 5:
avg += toc - tic
if epoch % args.eval_every == 0:
test_acc, pred = evaluate(model, g, nfeat, labels, val_nid, test_nid, device)
if args.save_pred:
np.savetxt(args.save_pred + '%02d' % epoch, pred.argmax(1).cpu().numpy(), '%d')
print('Test Acc {:.4f}'.format(test_acc))
test_acc_list += [test_acc.cpu().numpy()]
if get_embed:
return test_acc_list, pred
return test_acc_list
if __name__ == '__main__':
argparser = argparse.ArgumentParser("multi-gpu training")
argparser.add_argument('--dataset', type=str, default='cora', help='dataset name.')
argparser.add_argument('--gpu', type=int, default=0,
help="GPU device ID. Use -1 for CPU training")
argparser.add_argument('--pre_train_epochs', type=int, default=100, help='Number of pre-training epochs.')
argparser.add_argument('--epochs', type=int, default=1000, help='Number of epochs to train.')
argparser.add_argument('--num-layers', type=int, default=3)
argparser.add_argument('--fan-out', type=str, default='5,5,5')
argparser.add_argument('--val-batch-size', type=int, default=10000)
argparser.add_argument('--log-every', type=int, default=10)
argparser.add_argument('--eval-every', type=int, default=1)
argparser.add_argument('--lr', type=float, default=0.001)
argparser.add_argument('--hidden', type=int, default=64, help='Number of hidden units.')
argparser.add_argument('--batchsize', type=int, default=256,
help='batchsize for train')
argparser.add_argument('--sample_freq', type=int, default=20, help = 'frequnecy of resampling nodes with large uncertainties')
argparser.add_argument('--dropout', type=float, default=0.0)
argparser.add_argument('--seed', type=int, default=123, help='Random seed.')
argparser.add_argument('--num-workers', type=int, default=4, help="Number of sampling processes. Use 0 for no extra process.")
argparser.add_argument('--pretrain_ratio', type=float, default=0.05,
help='Proportion of samples used for training')
argparser.add_argument('--ratio', type=float, default=0.01,
help='Proportion of samples picked at each round')
argparser.add_argument('--save-pred', type=str, default='')
argparser.add_argument('--wd', type=float, default=0)
argparser.add_argument('--remove_degree_one', action='store_true', default=False,
help='Recursively remove the nodes with degree one from the adjacency matrix (remove corresponding edges).')
args = argparser.parse_args()
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.gpu != -1:
torch.cuda.manual_seed(args.seed)
if args.gpu >= 0:
device = torch.device('cuda:%d' % args.gpu)
else:
device = torch.device('cpu')
train_index, valid_index, test_index, in_feats, labels, n_classes, feats, graph, adj_train, _ = load_data(args.dataset, args)
y_train = labels[train_index]
y_train_onehot = F.one_hot(y_train, num_classes=n_classes)
n_feats, args.hidden, n_classes, args.num_layers, F.relu, args.dropout)
model = model.to(device)
loss_fcn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.wd)
num_train_sampled = []
train_index = train_index.numpy()
num_sampled = int(len(train_index) * args.pretrain_ratio)
train_ind_sampled = np.random.choice(train_index, num_sampled, replace = False)
labels = labels.to(device)
feats = feats.to(device)
data = train_ind_sampled, valid_index, test_index, in_feats, labels, n_classes, feats, graph
test_accs, pred = run(args, model, device, data, args.pre_train_epochs, get_embed=True)
test_acc_list = test_accs
num_train_sampled = [num_sampled] * args.pre_train_epochs
Entropy = Entropy_loss()
sample_freq = args.sample_freq
for epochs in range(0, (args.epochs - args.pre_train_epochs) // sample_freq):
y_pred = pred[train_index].detach().cpu()
EL2N = torch.norm((y_pred - y_train_onehot), dim = 1).numpy()
del pred
num_sampled = int(len(train_index) * args.ratio)
train_index_sort = torch.from_numpy(train_index[EL2N.argsort()[::-1]])
train_ind_new = train_index_sort[:num_sampled]
train_ind_new_sampled = np.intersect1d(train_ind_new, train_ind_sampled)
train_ind_sampled = np.unique(np.concatenate((train_ind_sampled, train_ind_new)))
print("Number of newly picked samples: {}, Number of total selected samples: {}".format(len(train_ind_new), len(train_ind_sampled)))
print("Number of large entropy nodes already sampled: {}".format(len(train_ind_new_sampled)))
data = train_ind_sampled, valid_index, test_index, in_feats, labels, n_classes, feats, graph
test_accs, pred = run(args, model, device, data, sample_freq, get_embed=True)
test_acc_list += test_accs
num_train_sampled += [len(train_ind_sampled)] * sample_freq
directory = './save/{}/'.format(args.dataset)
if not os.path.exists(directory):
os.makedirs(directory)
np.save(directory + 'GraphSage_accuracy_list_{}_active_random_EL2N_increment_pretrain{}_epochs{}_pretrain_ratio{}_ratio{}_sample_freq{}.npy'.format(args.dataset,
args.pre_train_epochs,
args.epochs,
args.pretrain_ratio,
args.ratio,
args.sample_freq), test_acc_list)
np.save(directory + 'Sample_count_list_{}_active_random_EL2N_increment_pretrain{}_epochs{}_pretrain_ratio{}_ratio{}_sample_freq{}.npy'.format(args.dataset,
args.pre_train_epochs,
args.epochs,
args.pretrain_ratio,
args.ratio,
args.sample_freq), num_train_sampled)
| true
| true
|
1c4295c9cd865d1ecee704e8b8578562c6d021ee
| 207
|
py
|
Python
|
examples/pkg_json_fallback/pkg_json_fallback/__init__.py
|
ioam/autover
|
66e9a3118697cef252b360b63697086a6ecf987a
|
[
"BSD-3-Clause"
] | 3
|
2018-05-25T12:08:12.000Z
|
2019-06-09T02:34:46.000Z
|
examples/pkg_json_fallback/pkg_json_fallback/__init__.py
|
ioam/autover
|
66e9a3118697cef252b360b63697086a6ecf987a
|
[
"BSD-3-Clause"
] | 36
|
2017-12-08T13:48:28.000Z
|
2018-04-24T13:09:29.000Z
|
examples/pkg_json_fallback/pkg_json_fallback/__init__.py
|
ioam/autover
|
66e9a3118697cef252b360b63697086a6ecf987a
|
[
"BSD-3-Clause"
] | 1
|
2020-04-13T12:01:31.000Z
|
2020-04-13T12:01:31.000Z
|
# Only testing the JSON fallback. See pkg_bundle or pkg_depend for complete example.
import os, json
__version__ = json.load(open(os.path.join(os.path.dirname(__file__),'.version'), 'r'))['version_string']
| 41.4
| 104
| 0.758454
|
import os, json
__version__ = json.load(open(os.path.join(os.path.dirname(__file__),'.version'), 'r'))['version_string']
| true
| true
|
1c4295ea77ae8346ccc4dfd6a5e4aa5accf24713
| 7,863
|
py
|
Python
|
src/step1_split_text.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | 1
|
2021-01-04T03:15:50.000Z
|
2021-01-04T03:15:50.000Z
|
src/step1_split_text.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | null | null | null |
src/step1_split_text.py
|
lychyzclc/High-throughput-relation-extraction-algorithm
|
93530ddcb78df3f1b1b7fda34821fa307d095c74
|
[
"MIT"
] | null | null | null |
import argparse
import logging
import math
import re
from itertools import islice
from multiprocessing import Process
import spacy
from util.logger import get_logger
logger = logging.getLogger(__name__)
def strip_title(sub_str):
r = re.search('==+.*?==+', sub_str)
if r:
return sub_str[:r.span()[0]]
else:
return sub_str
def get_next(string):
sub_str = ''
mark = 0
if re.match('<<<|>>>', string):
r = re.match('[>>>|<<<].*?==+.*?==+|[>>>|<<<].*?$', string).span()
sub_str = string[:r[1]]
sub_str = strip_title(sub_str)
mark = 0
elif string.startswith('=='):
r = re.match('==+.+?==+', string)
if r:
sub_str = string[:r.span()[1]]
else:
r = re.match('=+', string).span()
sub_str = string[:r[1]]
mark = 1
else:
r = re.match('.*?==+.*?==+|.*$', string).span()
sub_str = string[:r[1]]
sub_str = strip_title(sub_str)
mark = 2
split = len(sub_str)
return sub_str, string[split:], mark
def get_titles(title_list):
titles = ''
if len(title_list):
titles = '.'.join(title_list) + '. ' * (4 - len(title_list))
else:
titles = ' . . . '
return titles
def output(output_file, splitter, lines, n, error_file):
logger.info('starting subprocess %d...' % n)
with open(output_file + str(n) + '.txt', 'w', encoding='utf-8') as outfile:
i = 0
for line in lines:
if i % 10 == 0:
logger.info('subprocess %d: %d finished' % (n, i))
line_list = line.split('|')
ID = line_list[0]
main_title = line_list[1]
main_title_p = len(ID) + 1
content = line_list[2].strip()
content_p = main_title_p + len(main_title) + 1
title_list = []
out_list = []
flag = 1
sent_num = 0
if ID.startswith('5') or ID.startswith('6'):
sents = splitter(main_title)
for s in sents.sents:
s = str(s)
temp = main_title_p + main_title.find(s)
out_list.append(str(sent_num) + '|' + ID + '|' + '<EMPTY_TITLE>' + '|' +
s.strip() + '|' + ' . . . ' + '|' +
str(temp))
sent_num += 1
while (content):
sub, content, mark = get_next(content)
if not sub:
logger.info('error ' + ID)
break
if mark == 2:
sub = '>>>' + sub
content_p -= 3
content_list = re.split('>>>|<<<', sub)
for con in content_list[1:]:
content_p += 3
if con.strip().strip('■').strip():
try:
sents = splitter(con)
for s in sents.sents:
s = str(s)
temp = content_p + con.find(s)
out_list.append(str(sent_num) + "|" + ID + '|' +
'<EMPTY_TITLE>' + '|' +
s.strip() + '|' +
' . . . ' + '|' +
str(temp))
sent_num += 1
except:
logger.info('error ' + ID)
flag = 0
content_p += len(con)
else:
while (content):
sub, content, mark = get_next(content)
if not sub:
logger.info('error ' + ID)
break
if mark == 1:
content_p += len(sub)
if sub.strip('='):
c = sub.count('=') / 2 - 2
while (c < len(title_list)):
title_list.pop()
title_list.append(sub.strip('='))
else:
if mark == 2:
sub = '>>>' + sub
content_p -= 3
sub_titles = get_titles(title_list)
content_list = re.split('>>>|<<<', sub)
for con in content_list[1:]:
content_p += 3
if con.strip().strip('■').strip():
try:
sents = splitter(con)
for s in sents.sents:
s = str(s)
temp = content_p + con.find(s)
out_list.append(str(sent_num) + "|" + ID + '|' + main_title +
'|' + s.strip() + '|' +
sub_titles + '|' +
str(temp))
sent_num += 1
except:
logger.info('error ' + ID)
flag = 0
content_p += len(con)
if out_list and flag:
outfile.writelines('\n'.join(out_list) + '\n')
elif not flag:
with open(error_file, 'a+') as f:
f.writelines(line)
i += 1
logger.info('subprocess %d: all finished' % (n))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file',
type=str,
help='The path to input file')
parser.add_argument('--text_length',
type=str,
help='The num of rows of the input file')
parser.add_argument('--output_file',
type=str,
help='The path to output file')
parser.add_argument('--error_file',
type=str,
help='The path to output error')
parser.add_argument('--log_file',
type=str,
help='The path to output log')
parser.add_argument('--proc',
default=None,
help='process number for multiprocess')
args = parser.parse_args()
logger = get_logger(logger, args.log_file)
splitter = spacy.load('en_core_web_sm')
if args.proc is not None:
text_list = []
n = int(math.ceil(int(args.text_length) / int(args.proc)))
with open(args.input_file) as infile:
p_list = []
for i in range(8):
next_n_lines = list(islice(infile, n))
p = Process(target=output,
args=(args.output_file, splitter, next_n_lines, i,
args.error_file))
p.start()
p_list.append(p)
for ap in p_list:
ap.join()
else:
with open(args.input_file) as infile:
lines = infile.readlines()
output(args.output_file, splitter, lines, 0, args.error_file)
| 39.315
| 102
| 0.375684
|
import argparse
import logging
import math
import re
from itertools import islice
from multiprocessing import Process
import spacy
from util.logger import get_logger
logger = logging.getLogger(__name__)
def strip_title(sub_str):
r = re.search('==+.*?==+', sub_str)
if r:
return sub_str[:r.span()[0]]
else:
return sub_str
def get_next(string):
sub_str = ''
mark = 0
if re.match('<<<|>>>', string):
r = re.match('[>>>|<<<].*?==+.*?==+|[>>>|<<<].*?$', string).span()
sub_str = string[:r[1]]
sub_str = strip_title(sub_str)
mark = 0
elif string.startswith('=='):
r = re.match('==+.+?==+', string)
if r:
sub_str = string[:r.span()[1]]
else:
r = re.match('=+', string).span()
sub_str = string[:r[1]]
mark = 1
else:
r = re.match('.*?==+.*?==+|.*$', string).span()
sub_str = string[:r[1]]
sub_str = strip_title(sub_str)
mark = 2
split = len(sub_str)
return sub_str, string[split:], mark
def get_titles(title_list):
titles = ''
if len(title_list):
titles = '.'.join(title_list) + '. ' * (4 - len(title_list))
else:
titles = ' . . . '
return titles
def output(output_file, splitter, lines, n, error_file):
logger.info('starting subprocess %d...' % n)
with open(output_file + str(n) + '.txt', 'w', encoding='utf-8') as outfile:
i = 0
for line in lines:
if i % 10 == 0:
logger.info('subprocess %d: %d finished' % (n, i))
line_list = line.split('|')
ID = line_list[0]
main_title = line_list[1]
main_title_p = len(ID) + 1
content = line_list[2].strip()
content_p = main_title_p + len(main_title) + 1
title_list = []
out_list = []
flag = 1
sent_num = 0
if ID.startswith('5') or ID.startswith('6'):
sents = splitter(main_title)
for s in sents.sents:
s = str(s)
temp = main_title_p + main_title.find(s)
out_list.append(str(sent_num) + '|' + ID + '|' + '<EMPTY_TITLE>' + '|' +
s.strip() + '|' + ' . . . ' + '|' +
str(temp))
sent_num += 1
while (content):
sub, content, mark = get_next(content)
if not sub:
logger.info('error ' + ID)
break
if mark == 2:
sub = '>>>' + sub
content_p -= 3
content_list = re.split('>>>|<<<', sub)
for con in content_list[1:]:
content_p += 3
if con.strip().strip('■').strip():
try:
sents = splitter(con)
for s in sents.sents:
s = str(s)
temp = content_p + con.find(s)
out_list.append(str(sent_num) + "|" + ID + '|' +
'<EMPTY_TITLE>' + '|' +
s.strip() + '|' +
' . . . ' + '|' +
str(temp))
sent_num += 1
except:
logger.info('error ' + ID)
flag = 0
content_p += len(con)
else:
while (content):
sub, content, mark = get_next(content)
if not sub:
logger.info('error ' + ID)
break
if mark == 1:
content_p += len(sub)
if sub.strip('='):
c = sub.count('=') / 2 - 2
while (c < len(title_list)):
title_list.pop()
title_list.append(sub.strip('='))
else:
if mark == 2:
sub = '>>>' + sub
content_p -= 3
sub_titles = get_titles(title_list)
content_list = re.split('>>>|<<<', sub)
for con in content_list[1:]:
content_p += 3
if con.strip().strip('■').strip():
try:
sents = splitter(con)
for s in sents.sents:
s = str(s)
temp = content_p + con.find(s)
out_list.append(str(sent_num) + "|" + ID + '|' + main_title +
'|' + s.strip() + '|' +
sub_titles + '|' +
str(temp))
sent_num += 1
except:
logger.info('error ' + ID)
flag = 0
content_p += len(con)
if out_list and flag:
outfile.writelines('\n'.join(out_list) + '\n')
elif not flag:
with open(error_file, 'a+') as f:
f.writelines(line)
i += 1
logger.info('subprocess %d: all finished' % (n))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input_file',
type=str,
help='The path to input file')
parser.add_argument('--text_length',
type=str,
help='The num of rows of the input file')
parser.add_argument('--output_file',
type=str,
help='The path to output file')
parser.add_argument('--error_file',
type=str,
help='The path to output error')
parser.add_argument('--log_file',
type=str,
help='The path to output log')
parser.add_argument('--proc',
default=None,
help='process number for multiprocess')
args = parser.parse_args()
logger = get_logger(logger, args.log_file)
splitter = spacy.load('en_core_web_sm')
if args.proc is not None:
text_list = []
n = int(math.ceil(int(args.text_length) / int(args.proc)))
with open(args.input_file) as infile:
p_list = []
for i in range(8):
next_n_lines = list(islice(infile, n))
p = Process(target=output,
args=(args.output_file, splitter, next_n_lines, i,
args.error_file))
p.start()
p_list.append(p)
for ap in p_list:
ap.join()
else:
with open(args.input_file) as infile:
lines = infile.readlines()
output(args.output_file, splitter, lines, 0, args.error_file)
| true
| true
|
1c429768559837ca305652a8cdfd69ecf9b899b5
| 2,840
|
py
|
Python
|
input_data/make_go_plus_holdouts.py
|
Knowledge-Graph-Hub/NEAT
|
744b8abd6fc08037be349e2e637d891d80100771
|
[
"BSD-3-Clause"
] | 6
|
2020-10-27T15:54:26.000Z
|
2022-02-28T15:41:06.000Z
|
input_data/make_go_plus_holdouts.py
|
Knowledge-Graph-Hub/NEAT
|
744b8abd6fc08037be349e2e637d891d80100771
|
[
"BSD-3-Clause"
] | 52
|
2020-11-24T18:27:44.000Z
|
2022-03-31T16:20:06.000Z
|
input_data/make_go_plus_holdouts.py
|
Knowledge-Graph-Hub/NEAT
|
744b8abd6fc08037be349e2e637d891d80100771
|
[
"BSD-3-Clause"
] | 1
|
2021-08-23T06:04:02.000Z
|
2021-08-23T06:04:02.000Z
|
import os
from ensmallen_graph import EnsmallenGraph
from shutil import which
from neat.graph_embedding.graph_embedding import get_node_data, \
merge_and_write_complete_node_data
train_percentage = 0.8
seed = 42
go_plus_json_file = "go_plus.json"
go_plus_edges_file = "go_plus_edges.tsv"
go_plus_nodes_file = "go_plus_nodes.tsv"
def is_tool(name):
"""Check whether `name` is on PATH and marked as executable."""
# from whichcraft import which
return which(name) is not None
if not is_tool("kgx"):
raise Exception("Need to install KGX! (pip install kgx)") # might also need to roll bmt back to v 0.1.1
if not os.path.exists(go_plus_json_file):
os.system("wget http://purl.obolibrary.org/obo/go/extensions/go-plus.json -O go_plus.json")
if not os.path.exists(go_plus_edges_file) or not os.path.exists(go_plus_nodes_file):
os.system("kgx transform --input-format obojson --output-format tsv --output go_plus go_plus.json")
for edges in (
['biolink:subclass_of'],
):
edges_string = "_".join(edges)
os.makedirs(edges_string, exist_ok=True)
graph = EnsmallenGraph.from_unsorted_csv(
edge_path=go_plus_edges_file,
sources_column="subject",
destinations_column="object",
edge_types_column='edge_label',
directed=False,
node_path=go_plus_nodes_file,
nodes_column='id',
node_types_column='category',
default_node_type='biolink:NamedThing'
)
reduced_graph = graph.remove(singletons=True)
pos_training, pos_validation = reduced_graph.connected_holdout(
train_size=train_percentage,
edge_types=edges,
random_state=seed)
# make negative graph
neg_training, neg_validation = reduced_graph.sample_negatives(
random_state=seed,
only_from_same_component=True,
negatives_number=graph.get_edges_number(),
).random_holdout(random_state=seed, train_size=train_percentage)
reduced_graph.dump_nodes(os.path.join(edges_string, f"go_plus_nodes_training.tsv"))
node_data = get_node_data(os.path.join(edges_string, f"go_plus_nodes_training.tsv"))
merge_and_write_complete_node_data(
go_plus_nodes_file,
node_data,
os.path.join(edges_string, f"go_plus_nodes_training.tsv")
)
pos_training.dump_edges(os.path.join(edges_string, f"go_plus_edges_training.tsv"))
pos_validation.dump_edges(os.path.join(edges_string, f"go_plus_edges_validation.tsv"))
neg_training.dump_edges(os.path.join(edges_string, f"go_plus_edges_neg_training.tsv"))
neg_validation.dump_edges(os.path.join(edges_string, f"go_plus_edges_neg_validation.tsv"))
os.system(f"sed -i '.bak' 's/$/\tlabel/' {edges_string}/go_plus_edges_neg_training.tsv")
os.system(f"sed -i '.bak' 's/$/\tlabel/' {edges_string}/go_plus_edges_neg_validation.tsv")
| 36.410256
| 107
| 0.73838
|
import os
from ensmallen_graph import EnsmallenGraph
from shutil import which
from neat.graph_embedding.graph_embedding import get_node_data, \
merge_and_write_complete_node_data
train_percentage = 0.8
seed = 42
go_plus_json_file = "go_plus.json"
go_plus_edges_file = "go_plus_edges.tsv"
go_plus_nodes_file = "go_plus_nodes.tsv"
def is_tool(name):
return which(name) is not None
if not is_tool("kgx"):
raise Exception("Need to install KGX! (pip install kgx)")
if not os.path.exists(go_plus_json_file):
os.system("wget http://purl.obolibrary.org/obo/go/extensions/go-plus.json -O go_plus.json")
if not os.path.exists(go_plus_edges_file) or not os.path.exists(go_plus_nodes_file):
os.system("kgx transform --input-format obojson --output-format tsv --output go_plus go_plus.json")
for edges in (
['biolink:subclass_of'],
):
edges_string = "_".join(edges)
os.makedirs(edges_string, exist_ok=True)
graph = EnsmallenGraph.from_unsorted_csv(
edge_path=go_plus_edges_file,
sources_column="subject",
destinations_column="object",
edge_types_column='edge_label',
directed=False,
node_path=go_plus_nodes_file,
nodes_column='id',
node_types_column='category',
default_node_type='biolink:NamedThing'
)
reduced_graph = graph.remove(singletons=True)
pos_training, pos_validation = reduced_graph.connected_holdout(
train_size=train_percentage,
edge_types=edges,
random_state=seed)
neg_training, neg_validation = reduced_graph.sample_negatives(
random_state=seed,
only_from_same_component=True,
negatives_number=graph.get_edges_number(),
).random_holdout(random_state=seed, train_size=train_percentage)
reduced_graph.dump_nodes(os.path.join(edges_string, f"go_plus_nodes_training.tsv"))
node_data = get_node_data(os.path.join(edges_string, f"go_plus_nodes_training.tsv"))
merge_and_write_complete_node_data(
go_plus_nodes_file,
node_data,
os.path.join(edges_string, f"go_plus_nodes_training.tsv")
)
pos_training.dump_edges(os.path.join(edges_string, f"go_plus_edges_training.tsv"))
pos_validation.dump_edges(os.path.join(edges_string, f"go_plus_edges_validation.tsv"))
neg_training.dump_edges(os.path.join(edges_string, f"go_plus_edges_neg_training.tsv"))
neg_validation.dump_edges(os.path.join(edges_string, f"go_plus_edges_neg_validation.tsv"))
os.system(f"sed -i '.bak' 's/$/\tlabel/' {edges_string}/go_plus_edges_neg_training.tsv")
os.system(f"sed -i '.bak' 's/$/\tlabel/' {edges_string}/go_plus_edges_neg_validation.tsv")
| true
| true
|
1c4297c9f2abc51a2141ed38f24c6e7f25f9e979
| 1,614
|
py
|
Python
|
hcl-api.py
|
briamorr/UCSHCL
|
c4aa8df78dc10b37499e73fca44a6f742b0fb802
|
[
"MIT"
] | null | null | null |
hcl-api.py
|
briamorr/UCSHCL
|
c4aa8df78dc10b37499e73fca44a6f742b0fb802
|
[
"MIT"
] | null | null | null |
hcl-api.py
|
briamorr/UCSHCL
|
c4aa8df78dc10b37499e73fca44a6f742b0fb802
|
[
"MIT"
] | null | null | null |
from time import sleep
import requests
results = []
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadOsVendors',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
for r in resp.json():
if "Microsoft" not in r['OSVENDOR'] and "VMware" not in r['OSVENDOR']:
payload = "treeIdVendor=" + str(r['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadOsVersions', data=payload,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
for s in resp.json():
payload = "treeIdOSVersion=" + str(s['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadServerTypes',
data=payload, headers={'Content-Type': 'application/x-www-form-urlencoded'})
sleep(0.05)
for y in resp.json():
if "C-Series" in y['RELEASE'] or "B-Series" in y['RELEASE']:
payload = "treeIdRelease=" + str(y['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadServerModels',
data=payload, headers={'Content-Type': 'application/x-www-form-urlencoded'})
sleep(0.05)
for z in resp.json():
if "M4" in z['SERVER_MODEL'] or "M5" in z['SERVER_MODEL']:
print((z['SERVER_MODEL'] + " " + s['OSVERSION']))
| 52.064516
| 121
| 0.552664
|
from time import sleep
import requests
results = []
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadOsVendors',
headers={'Content-Type': 'application/x-www-form-urlencoded'})
for r in resp.json():
if "Microsoft" not in r['OSVENDOR'] and "VMware" not in r['OSVENDOR']:
payload = "treeIdVendor=" + str(r['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadOsVersions', data=payload,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
for s in resp.json():
payload = "treeIdOSVersion=" + str(s['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadServerTypes',
data=payload, headers={'Content-Type': 'application/x-www-form-urlencoded'})
sleep(0.05)
for y in resp.json():
if "C-Series" in y['RELEASE'] or "B-Series" in y['RELEASE']:
payload = "treeIdRelease=" + str(y['T_ID'])
resp = requests.post('https://ucshcltool.cloudapps.cisco.com/public/rest/osvendor/loadServerModels',
data=payload, headers={'Content-Type': 'application/x-www-form-urlencoded'})
sleep(0.05)
for z in resp.json():
if "M4" in z['SERVER_MODEL'] or "M5" in z['SERVER_MODEL']:
print((z['SERVER_MODEL'] + " " + s['OSVERSION']))
| true
| true
|
1c429821437f43abdad44920e1f1cd3c0ddcecd2
| 11,811
|
py
|
Python
|
samples/client/petstore/python-experimental/petstore_api/api/another_fake_api.py
|
kingdun3284/openapi-generator
|
07d145828f33e9e2d9e5be6694bfb1d29db4fe07
|
[
"Apache-2.0"
] | 3
|
2019-06-25T01:41:08.000Z
|
2019-10-01T15:49:15.000Z
|
samples/client/petstore/python-experimental/petstore_api/api/another_fake_api.py
|
kingdun3284/openapi-generator
|
07d145828f33e9e2d9e5be6694bfb1d29db4fe07
|
[
"Apache-2.0"
] | 1
|
2022-02-27T20:01:11.000Z
|
2022-02-27T20:01:11.000Z
|
samples/client/petstore/python-experimental/petstore_api/api/another_fake_api.py
|
kingdun3284/openapi-generator
|
07d145828f33e9e2d9e5be6694bfb1d29db4fe07
|
[
"Apache-2.0"
] | 2
|
2019-06-25T05:39:52.000Z
|
2019-07-09T15:47:02.000Z
|
# coding: utf-8
"""
OpenAPI Petstore
This spec is mainly for testing Petstore server and contains fake endpoints, models. Please do not use this for any other purpose. Special characters: \" \\ # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from petstore_api.api_client import ApiClient
from petstore_api.exceptions import (
ApiTypeError,
ApiValueError
)
from petstore_api.model_utils import (
check_allowed_values,
check_validations
)
class AnotherFakeApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __call_123_test_special_tags(self, body, **kwargs): # noqa: E501
"""To test special tags # noqa: E501
To test special tags and operation ID starting with number # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.call_123_test_special_tags(body, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Client body: client model (required)
:param _return_http_data_only: response data without head status
code and headers
:param _preload_content: if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
:return: Client
If the method is called asynchronously, returns the request
thread.
"""
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['body'] = body
return self.call_with_http_info(**kwargs)
self.call_123_test_special_tags = Endpoint(
settings={
'response_type': 'Client',
'auth': [],
'endpoint_path': '/another-fake/dummy',
'operation_id': 'call_123_test_special_tags',
'http_method': 'PATCH',
'servers': [],
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body': 'Client',
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__call_123_test_special_tags
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
"""Creates an endpoint
Args:
settings (dict): see below key value pairs
'response_type' (str): response type
'auth' (list): a list of auth type keys
'endpoint_path' (str): the endpoint path
'operation_id' (str): endpoint string identifier
'http_method' (str): POST/PUT/PATCH/GET etc
'servers' (list): list of str servers that this endpoint is at
params_map (dict): see below key value pairs
'all' (list): list of str endpoint parameter names
'required' (list): list of required parameter names
'nullable' (list): list of nullable parameter names
'enum' (list): list of parameters with enum values
'validation' (list): list of parameters with validations
root_map
'validations' (dict): the dict mapping endpoint parameter tuple
paths to their validation dictionaries
'allowed_values' (dict): the dict mapping endpoint parameter
tuple paths to their allowed_values (enum) dictionaries
'openapi_types' (dict): param_name to openapi type
'attribute_map' (dict): param_name to camelCase name
'location_map' (dict): param_name to 'body', 'file', 'form',
'header', 'path', 'query'
collection_format_map (dict): param_name to `csv` etc.
headers_map (dict): see below key value pairs
'accept' (list): list of Accept header strings
'content_type' (list): list of Content-Type header strings
api_client (ApiClient) api client instance
callable (function): the function which is invoked when the
Endpoint is called
"""
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only'
])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param],
self.validations
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param]
)
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == 'file'):
param_location = 'file'
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
""" This method is invoked when endpoints are called
Example:
pet_api = PetApi()
pet_api.add_pet # this is an instance of the class Endpoint
pet_api.add_pet() # this invokes pet_api.add_pet.__call__()
which then invokes the callable functions stored in that endpoint at
pet_api.add_pet.callable or self.callable in this class
"""
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
if kwargs.get('_host_index') and self.settings['servers']:
_host_index = kwargs.get('_host_index')
try:
_host = self.settings['servers'][_host_index]
except IndexError:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
else:
try:
_host = self.settings['servers'][0]
except IndexError:
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
if key not in self.params_map['nullable'] and value is None:
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list)
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs.get('async_req'),
_return_http_data_only=kwargs.get('_return_http_data_only'),
_preload_content=kwargs.get('_preload_content', True),
_request_timeout=kwargs.get('_request_timeout'),
_host=_host,
collection_formats=params['collection_format'])
| 38.223301
| 174
| 0.542545
|
from __future__ import absolute_import
import re
import six
from petstore_api.api_client import ApiClient
from petstore_api.exceptions import (
ApiTypeError,
ApiValueError
)
from petstore_api.model_utils import (
check_allowed_values,
check_validations
)
class AnotherFakeApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def __call_123_test_special_tags(self, body, **kwargs):
kwargs['_return_http_data_only'] = kwargs.get(
'_return_http_data_only', True
)
kwargs['body'] = body
return self.call_with_http_info(**kwargs)
self.call_123_test_special_tags = Endpoint(
settings={
'response_type': 'Client',
'auth': [],
'endpoint_path': '/another-fake/dummy',
'operation_id': 'call_123_test_special_tags',
'http_method': 'PATCH',
'servers': [],
},
params_map={
'all': [
'body',
],
'required': [
'body',
],
'nullable': [
],
'enum': [
],
'validation': [
]
},
root_map={
'validations': {
},
'allowed_values': {
},
'openapi_types': {
'body': 'Client',
},
'attribute_map': {
},
'location_map': {
'body': 'body',
},
'collection_format_map': {
}
},
headers_map={
'accept': [
'application/json'
],
'content_type': [
'application/json'
]
},
api_client=api_client,
callable=__call_123_test_special_tags
)
class Endpoint(object):
def __init__(self, settings=None, params_map=None, root_map=None,
headers_map=None, api_client=None, callable=None):
self.settings = settings
self.params_map = params_map
self.params_map['all'].extend([
'async_req',
'_host_index',
'_preload_content',
'_request_timeout',
'_return_http_data_only'
])
self.validations = root_map['validations']
self.allowed_values = root_map['allowed_values']
self.openapi_types = root_map['openapi_types']
self.attribute_map = root_map['attribute_map']
self.location_map = root_map['location_map']
self.collection_format_map = root_map['collection_format_map']
self.headers_map = headers_map
self.api_client = api_client
self.callable = callable
def __validate_inputs(self, kwargs):
for param in self.params_map['enum']:
if param in kwargs:
check_allowed_values(
self.allowed_values,
(param,),
kwargs[param],
self.validations
)
for param in self.params_map['validation']:
if param in kwargs:
check_validations(
self.validations,
(param,),
kwargs[param]
)
def __gather_params(self, kwargs):
params = {
'body': None,
'collection_format': {},
'file': {},
'form': [],
'header': {},
'path': {},
'query': []
}
for param_name, param_value in six.iteritems(kwargs):
param_location = self.location_map.get(param_name)
if param_location:
if param_location == 'body':
params['body'] = param_value
continue
base_name = self.attribute_map[param_name]
if (param_location == 'form' and
self.openapi_types[param_name] == 'file'):
param_location = 'file'
elif param_location in {'form', 'query'}:
param_value_full = (base_name, param_value)
params[param_location].append(param_value_full)
if param_location not in {'form', 'query'}:
params[param_location][base_name] = param_value
collection_format = self.collection_format_map.get(param_name)
if collection_format:
params['collection_format'][base_name] = collection_format
return params
def __call__(self, *args, **kwargs):
return self.callable(self, *args, **kwargs)
def call_with_http_info(self, **kwargs):
if kwargs.get('_host_index') and self.settings['servers']:
_host_index = kwargs.get('_host_index')
try:
_host = self.settings['servers'][_host_index]
except IndexError:
raise ApiValueError(
"Invalid host index. Must be 0 <= index < %s" %
len(self.settings['servers'])
)
else:
try:
_host = self.settings['servers'][0]
except IndexError:
_host = None
for key, value in six.iteritems(kwargs):
if key not in self.params_map['all']:
raise ApiTypeError(
"Got an unexpected parameter '%s'"
" to method `%s`" %
(key, self.settings['operation_id'])
)
if key not in self.params_map['nullable'] and value is None:
raise ApiValueError(
"Value may not be None for non-nullable parameter `%s`"
" when calling `%s`" %
(key, self.settings['operation_id'])
)
for key in self.params_map['required']:
if key not in kwargs.keys():
raise ApiValueError(
"Missing the required parameter `%s` when calling "
"`%s`" % (key, self.settings['operation_id'])
)
self.__validate_inputs(kwargs)
params = self.__gather_params(kwargs)
accept_headers_list = self.headers_map['accept']
if accept_headers_list:
params['header']['Accept'] = self.api_client.select_header_accept(
accept_headers_list)
content_type_headers_list = self.headers_map['content_type']
if content_type_headers_list:
header_list = self.api_client.select_header_content_type(
content_type_headers_list)
params['header']['Content-Type'] = header_list
return self.api_client.call_api(
self.settings['endpoint_path'], self.settings['http_method'],
params['path'],
params['query'],
params['header'],
body=params['body'],
post_params=params['form'],
files=params['file'],
response_type=self.settings['response_type'],
auth_settings=self.settings['auth'],
async_req=kwargs.get('async_req'),
_return_http_data_only=kwargs.get('_return_http_data_only'),
_preload_content=kwargs.get('_preload_content', True),
_request_timeout=kwargs.get('_request_timeout'),
_host=_host,
collection_formats=params['collection_format'])
| true
| true
|
1c42993e0f44f10aac7d85d890466a2f809cc09a
| 19,246
|
py
|
Python
|
salt/states/x509.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | 1
|
2020-10-19T11:49:49.000Z
|
2020-10-19T11:49:49.000Z
|
salt/states/x509.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | null | null | null |
salt/states/x509.py
|
Rafflecopter/salt
|
08bbfcd4d9b93351d7d5d25b097e892026b6f1cd
|
[
"Apache-2.0"
] | 1
|
2020-10-19T11:49:50.000Z
|
2020-10-19T11:49:50.000Z
|
# -*- coding: utf-8 -*-
'''
Manage X509 Certificates
.. versionadded:: Beryllium
This module can enable managing a complete PKI infrastructure including creating private keys, CA's,
certificates and CRLs. It includes the ability to generate a private key on a server, and have the
corresponding public key sent to a remote CA to create a CA signed certificate. This can be done in
a secure manner, where private keys are always generated locally and never moved across the network.
Here is a simple example scenario. In this example ``ca`` is the ca server,
and ``www`` is a web server that needs a certificate signed by ``ca``.
For remote signing, peers must be permitted to remotely call the
:mod:`pem_managed <salt.states.x509.pem_managed>` function.
/etc/salt/master.d/peer.sls
.. code-block:: yaml
peer:
.*:
- x509.sign_remote_certificate
/srv/salt/top.sls
.. code-block:: yaml
base:
'*':
- cert
'ca':
- ca
'www':
- www
This state creates the CA key, certificate and signing policy. It also publishes the certificate to
the mine where it can be easily retrieved by other minions.
/srv/salt/ca.sls
.. code-block:: yaml
salt-minion:
service.running:
- enabled
- listen:
- file: /etc/salt/minion.d/signing_policies.conf
/etc/salt/minion.d/signing_policies.conf:
file.managed:
- source: salt://signing_policies.conf
/etc/pki:
file.directory: []
/etc/pki/ca.key:
x509.private_key_managed:
- bits: 4096
- backup: True
- require:
- file: /etc/pki
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: ca.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
- require:
- x509: /etc/pki/ca.key
mine.send:
module.run:
- func: x509.get_pem_entries
- kwargs:
glob_path: /etc/pki/ca.crt
- onchanges:
- x509: /etc/pki/ca.crt
The signing policy defines properties that override any property requested or included in a CRL. It also
can define a restricted list of minons which are allowed to remotely invoke this signing policy.
/srv/salt/signing_policies.conf
.. code-block:: yaml
x509_signing_policies:
www:
- minions: 'www'
- signing_private_key: /etc/pki/ca.key
- signing_cert: /etc/pki/ca.crt
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:false"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 90
- copypath: /etc/pki/issued_certs/
This state will instruct all minions to trust certificates signed by our new CA.
Using jinja to strip newlines from the text avoids dealing with newlines in the rendered yaml,
and the :mod:`sign_remote_certificate <salt.states.x509.sign_remote_certificate>` state will
handle properly formatting the text before writing the output.
/srv/salt/cert.sls
.. code-block:: yaml
/usr/local/share/ca-certificates/intca.crt
x509.pem_managed:
- text: {{ salt['mine.get']('pki', 'x509.get_pem_entries')['pki']['/etc/pki/ca.crt']|replace('\\n', '') }}
This state creates a private key then requests a certificate signed by ca according to the www policy.
/srv/salt/www.sls
.. code-block:: yaml
/etc/pki/www.key:
x509.private_key_managed:
- bits: 4096
/etc/pki/www.crt:
x509.certificate_managed:
- ca_server: ca
- signing_policy: www
- public_key: /etc/pki/www.key
- CN: www.example.com
- days_remaining: 30
- backup: True
'''
# Import Python Libs
from __future__ import absolute_import
import datetime
import os
import re
import copy
# Import Salt Libs
import salt.exceptions
import salt.utils
# Import 3rd-party libs
import salt.ext.six as six
def _revoked_to_list(revs):
'''
Turn the mess of OrderedDicts and Lists into a list of dicts for
use in the CRL module.
'''
list_ = []
for rev in revs:
for rev_name, props in six.iteritems(rev): # pylint: disable=unused-variable
dict_ = {}
for prop in props:
for propname, val in six.iteritems(prop):
if isinstance(val, datetime.datetime):
val = val.strftime('%Y-%m-%d %H:%M:%S')
dict_[propname] = val
list_.append(dict_)
return list_
def private_key_managed(name,
bits=2048,
new=False,
backup=False):
'''
Manage a private key's existance.
name:
Path to the private key
bits:
Key length in bits. Default 2048.
new:
Always create a new key. Defaults to False.
Combining new with :mod:`prereq <salt.states.requsities.preqreq>` can allow key rotation
whenever a new certificiate is generated.
backup:
When replacing an existing file, backup the old file onthe minion.
Default is False.
Example:
The jinja templating in this example ensures a private key is generated if the file doesn't exist
and that a new private key is generated whenever the certificate that uses it is to be renewed.
.. code-block:: yaml
/etc/pki/www.key:
x509.private_key_managed:
- bits: 4096
- new: True
{% if salt['file.file_exists']('/etc/pki/ca.key') -%}
- prereq:
- x509: /etc/pki/www.crt
{%- endif %}
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
current_bits = 0
if os.path.isfile(name):
try:
current_bits = __salt__['x509.get_private_key_size'](private_key=name)
current = "{0} bit private key".format(current_bits)
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid Private Key.'.format(name)
else:
current = '{0} does not exist.'.format(name)
if current_bits == bits and not new:
ret['result'] = True
ret['comment'] = 'The Private key is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': "{0} bit private key".format(bits)}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The Private Key "{0}" will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.create_private_key'](path=name, bits=bits)
ret['result'] = True
return ret
def csr_managed(name,
backup=False,
**kwargs):
'''
Manage a Certificate Signing Request
name:
Path to the CSR
properties:
The properties to be added to the certificate request, including items like subject, extensions
and public key. See above for valid properties.
Example:
.. code-block:: yaml
/etc/pki/mycert.csr:
x509.csr_managed:
- public_key: /etc/pki/mycert.key
- CN: www.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- keyUsage: 'critical dataEncipherment'
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if os.path.isfile(name):
try:
current = __salt__['x509.read_csr'](csr=name)
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid CSR.'.format(name)
else:
current = '{0} does not exist.'.format(name)
new_csr = __salt__['x509.create_csr'](text=True, **kwargs)
new = __salt__['x509.read_csr'](csr=new_csr)
if current == new:
ret['result'] = True
ret['comment'] = 'The CSR is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The CSR {0} will be updated.'.format(name)
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=new_csr, path=name, pem_type="CERTIFICATE REQUEST")
ret['result'] = True
return ret
def certificate_managed(name,
days_remaining=90,
backup=False,
**kwargs):
'''
Manage a Certificate
name:
Path to the certificate
days_remaining:
The minimum number of days remaining when the certificate should be recreted. Default is 90. A
value of 0 disables automatic renewal.
backup:
When replacing an existing file, backup the old file onthe minion. Default is False.
kwargs:
Any arguments supported by :mod:`x509.create_certificate <salt.modules.x509.create_certificate>`
are supported.
Examples:
.. code-block:: yaml
/etc/pki/ca.crt:
x509.certificate_managed:
- signing_private_key: /etc/pki/ca.key
- CN: ca.example.com
- C: US
- ST: Utah
- L: Salt Lake City
- basicConstraints: "critical CA:true"
- keyUsage: "critical cRLSign, keyCertSign"
- subjectKeyIdentifier: hash
- authorityKeyIdentifier: keyid,issuer:always
- days_valid: 3650
- days_remaining: 0
- backup: True
.. code-block:: yaml
/etc/ssl/www.crt:
x509.certificate_managed:
- ca_server: pki
- signing_policy: www
- public_key: /etc/ssl/www.key
- CN: www.example.com
- days_valid: 90
- days_remaining: 30
- backup: True
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
current_days_remaining = 0
current_comp = {}
if os.path.isfile(name):
try:
current = __salt__['x509.read_certificate'](certificate=name)
current_comp = copy.deepcopy(current)
if 'serial_number' not in kwargs:
current_comp.pop('Serial Number')
if 'signing_cert' not in kwargs:
try:
current_comp['X509v3 Extensions']['authorityKeyIdentifier'] = (
re.sub(r'serial:([0-9A-F]{2}:)*[0-9A-F]{2}', 'serial:--',
current_comp['X509v3 Extensions']['authorityKeyIdentifier']))
except KeyError:
pass
current_comp.pop('Not Before')
current_comp.pop('MD5 Finger Print')
current_comp.pop('SHA1 Finger Print')
current_comp.pop('SHA-256 Finger Print')
current_notafter = current_comp.pop('Not After')
current_days_remaining = (
datetime.datetime.strptime(current_notafter, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.now()).days
if days_remaining == 0:
days_remaining = current_days_remaining - 1
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid Certificate.'.format(name)
else:
current = '{0} does not exist.'.format(name)
if 'ca_server' in kwargs and 'signing_policy' not in kwargs:
raise salt.exceptions.SaltInvocationError('signing_policy must be specified if ca_server is.')
new = __salt__['x509.create_certificate'](testrun=True, **kwargs)
if isinstance(new, dict):
new_comp = copy.deepcopy(new)
new.pop('Issuer Public Key')
if 'serial_number' not in kwargs:
new_comp.pop('Serial Number')
if 'signing_cert' not in kwargs:
try:
new_comp['X509v3 Extensions']['authorityKeyIdentifier'] = (
re.sub(r'serial:([0-9A-F]{2}:)*[0-9A-F]{2}', 'serial:--',
new_comp['X509v3 Extensions']['authorityKeyIdentifier']))
except KeyError:
pass
new_comp.pop('Not Before')
new_comp.pop('Not After')
new_comp.pop('MD5 Finger Print')
new_comp.pop('SHA1 Finger Print')
new_comp.pop('SHA-256 Finger Print')
new_issuer_public_key = new_comp.pop('Issuer Public Key')
else:
new_comp = new
if (current_comp == new_comp and
current_days_remaining > days_remaining and
__salt__['x509.verify_signature'](name, new_issuer_public_key)):
ret['result'] = True
ret['comment'] = 'The certificate is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The certificate {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.create_certificate'](path=name, **kwargs)
ret['result'] = True
return ret
def crl_managed(name,
signing_private_key,
signing_cert=None,
revoked=None,
days_valid=100,
days_remaining=30,
include_expired=False,
backup=False,):
'''
Manage a Certificate Revocation List
name:
Path to the certificate
signing_private_key:
The private key that will be used to sign this crl. This is
usually your CA's private key.
signing_cert:
The certificate of the authority that will be used to sign this crl.
This is usually your CA's certificate.
revoked:
A list of certificates to revoke. Must include either a serial number or a
the certificate itself. Can optionally include the revocation date and
notAfter date from the certificate. See example below for details.
days_valid:
The number of days the certificate should be valid for. Default is 100.
days_remaining:
The crl should be automatically recreated if there are less than ``days_remaining``
days until the crl expires. Set to 0 to disable automatic renewal. Default is 30.
include_expired:
Include expired certificates in the CRL. Default is ``False``.
backup:
When replacing an existing file, backup the old file onthe minion. Default is False.
Example:
.. code-block:: yaml
/etc/pki/ca.crl:
x509.crl_managed:
- signing_private_key: /etc/pki/myca.key
- signing_cert: /etc/pki/myca.crt
- revoked:
- compromized_Web_key:
- certificate: /etc/pki/certs/badweb.crt
- revocation_date: 2015-03-01 00:00:00
- reason: keyCompromise
- terminated_vpn_user:
- serial_number: D6:D2:DC:D8:4D:5C:C0:F4
- not_after: 2016-01-01 00:00:00
- revocation_date: 2015-02-25 00:00:00
- reason: cessationOfOperation
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if revoked is None:
revoked = []
revoked = _revoked_to_list(revoked)
current_days_remaining = 0
current_comp = {}
if os.path.isfile(name):
try:
current = __salt__['x509.read_crl'](crl=name)
current_comp = current.copy()
current_comp.pop('Last Update')
current_notafter = current_comp.pop('Next Update')
current_days_remaining = (
datetime.datetime.strptime(current_notafter, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.now()).days
if days_remaining == 0:
days_remaining = current_days_remaining - 1
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid CRL.'.format(name)
else:
current = '{0} does not exist.'.format(name)
new_crl = __salt__['x509.create_crl'](text=True, signing_private_key=signing_private_key,
signing_cert=signing_cert, revoked=revoked, days_valid=days_valid, include_expired=include_expired)
new = __salt__['x509.read_crl'](crl=new_crl)
new_comp = new.copy()
new_comp.pop('Last Update')
new_comp.pop('Next Update')
if (current_comp == new_comp and
current_days_remaining > days_remaining and
__salt__['x509.verify_crl'](name, signing_cert)):
ret['result'] = True
ret['comment'] = 'The crl is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The crl {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=new_crl, path=name, pem_type='X509 CRL')
ret['result'] = True
return ret
def pem_managed(name,
text,
backup=False):
'''
Manage the contents of a PEM file directly with the content in text, ensuring correct formatting.
name:
The path to the file to manage
text:
The PEM formatted text to write.
backup:
When replacing an existing file, backup the old file on the minion. Default is False.
'''
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
new = __salt__['x509.get_pem_entry'](text=text)
if os.path.isfile(name):
current = salt.utils.fopen(name).read()
else:
current = '{0} does not exist.'.format(name)
if new == current:
ret['result'] = True
ret['comment'] = 'The file is already in the correct state'
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The file {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=text, path=name)
ret['result'] = True
return ret
| 30.842949
| 114
| 0.593474
|
from __future__ import absolute_import
import datetime
import os
import re
import copy
import salt.exceptions
import salt.utils
import salt.ext.six as six
def _revoked_to_list(revs):
list_ = []
for rev in revs:
for rev_name, props in six.iteritems(rev):
dict_ = {}
for prop in props:
for propname, val in six.iteritems(prop):
if isinstance(val, datetime.datetime):
val = val.strftime('%Y-%m-%d %H:%M:%S')
dict_[propname] = val
list_.append(dict_)
return list_
def private_key_managed(name,
bits=2048,
new=False,
backup=False):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
current_bits = 0
if os.path.isfile(name):
try:
current_bits = __salt__['x509.get_private_key_size'](private_key=name)
current = "{0} bit private key".format(current_bits)
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid Private Key.'.format(name)
else:
current = '{0} does not exist.'.format(name)
if current_bits == bits and not new:
ret['result'] = True
ret['comment'] = 'The Private key is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': "{0} bit private key".format(bits)}
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The Private Key "{0}" will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.create_private_key'](path=name, bits=bits)
ret['result'] = True
return ret
def csr_managed(name,
backup=False,
**kwargs):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if os.path.isfile(name):
try:
current = __salt__['x509.read_csr'](csr=name)
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid CSR.'.format(name)
else:
current = '{0} does not exist.'.format(name)
new_csr = __salt__['x509.create_csr'](text=True, **kwargs)
new = __salt__['x509.read_csr'](csr=new_csr)
if current == new:
ret['result'] = True
ret['comment'] = 'The CSR is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The CSR {0} will be updated.'.format(name)
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=new_csr, path=name, pem_type="CERTIFICATE REQUEST")
ret['result'] = True
return ret
def certificate_managed(name,
days_remaining=90,
backup=False,
**kwargs):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
current_days_remaining = 0
current_comp = {}
if os.path.isfile(name):
try:
current = __salt__['x509.read_certificate'](certificate=name)
current_comp = copy.deepcopy(current)
if 'serial_number' not in kwargs:
current_comp.pop('Serial Number')
if 'signing_cert' not in kwargs:
try:
current_comp['X509v3 Extensions']['authorityKeyIdentifier'] = (
re.sub(r'serial:([0-9A-F]{2}:)*[0-9A-F]{2}', 'serial:--',
current_comp['X509v3 Extensions']['authorityKeyIdentifier']))
except KeyError:
pass
current_comp.pop('Not Before')
current_comp.pop('MD5 Finger Print')
current_comp.pop('SHA1 Finger Print')
current_comp.pop('SHA-256 Finger Print')
current_notafter = current_comp.pop('Not After')
current_days_remaining = (
datetime.datetime.strptime(current_notafter, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.now()).days
if days_remaining == 0:
days_remaining = current_days_remaining - 1
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid Certificate.'.format(name)
else:
current = '{0} does not exist.'.format(name)
if 'ca_server' in kwargs and 'signing_policy' not in kwargs:
raise salt.exceptions.SaltInvocationError('signing_policy must be specified if ca_server is.')
new = __salt__['x509.create_certificate'](testrun=True, **kwargs)
if isinstance(new, dict):
new_comp = copy.deepcopy(new)
new.pop('Issuer Public Key')
if 'serial_number' not in kwargs:
new_comp.pop('Serial Number')
if 'signing_cert' not in kwargs:
try:
new_comp['X509v3 Extensions']['authorityKeyIdentifier'] = (
re.sub(r'serial:([0-9A-F]{2}:)*[0-9A-F]{2}', 'serial:--',
new_comp['X509v3 Extensions']['authorityKeyIdentifier']))
except KeyError:
pass
new_comp.pop('Not Before')
new_comp.pop('Not After')
new_comp.pop('MD5 Finger Print')
new_comp.pop('SHA1 Finger Print')
new_comp.pop('SHA-256 Finger Print')
new_issuer_public_key = new_comp.pop('Issuer Public Key')
else:
new_comp = new
if (current_comp == new_comp and
current_days_remaining > days_remaining and
__salt__['x509.verify_signature'](name, new_issuer_public_key)):
ret['result'] = True
ret['comment'] = 'The certificate is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The certificate {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.create_certificate'](path=name, **kwargs)
ret['result'] = True
return ret
def crl_managed(name,
signing_private_key,
signing_cert=None,
revoked=None,
days_valid=100,
days_remaining=30,
include_expired=False,
backup=False,):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
if revoked is None:
revoked = []
revoked = _revoked_to_list(revoked)
current_days_remaining = 0
current_comp = {}
if os.path.isfile(name):
try:
current = __salt__['x509.read_crl'](crl=name)
current_comp = current.copy()
current_comp.pop('Last Update')
current_notafter = current_comp.pop('Next Update')
current_days_remaining = (
datetime.datetime.strptime(current_notafter, '%Y-%m-%d %H:%M:%S') -
datetime.datetime.now()).days
if days_remaining == 0:
days_remaining = current_days_remaining - 1
except salt.exceptions.SaltInvocationError:
current = '{0} is not a valid CRL.'.format(name)
else:
current = '{0} does not exist.'.format(name)
new_crl = __salt__['x509.create_crl'](text=True, signing_private_key=signing_private_key,
signing_cert=signing_cert, revoked=revoked, days_valid=days_valid, include_expired=include_expired)
new = __salt__['x509.read_crl'](crl=new_crl)
new_comp = new.copy()
new_comp.pop('Last Update')
new_comp.pop('Next Update')
if (current_comp == new_comp and
current_days_remaining > days_remaining and
__salt__['x509.verify_crl'](name, signing_cert)):
ret['result'] = True
ret['comment'] = 'The crl is already in the correct state'
return ret
ret['changes'] = {
'old': current,
'new': new, }
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The crl {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=new_crl, path=name, pem_type='X509 CRL')
ret['result'] = True
return ret
def pem_managed(name,
text,
backup=False):
ret = {'name': name, 'changes': {}, 'result': False, 'comment': ''}
new = __salt__['x509.get_pem_entry'](text=text)
if os.path.isfile(name):
current = salt.utils.fopen(name).read()
else:
current = '{0} does not exist.'.format(name)
if new == current:
ret['result'] = True
ret['comment'] = 'The file is already in the correct state'
return ret
if __opts__['test'] is True:
ret['result'] = None
ret['comment'] = 'The file {0} will be updated.'.format(name)
return ret
if os.path.isfile(name) and backup:
bkroot = os.path.join(__opts__['cachedir'], 'file_backup')
salt.utils.backup_minion(name, bkroot)
ret['comment'] = __salt__['x509.write_pem'](text=text, path=name)
ret['result'] = True
return ret
| true
| true
|
1c42999c62d09208aa34aeb2620c85b895a68692
| 133,787
|
py
|
Python
|
neutron/tests/unit/agent/test_securitygroups_rpc.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/test_securitygroups_rpc.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
neutron/tests/unit/agent/test_securitygroups_rpc.py
|
mail2nsrajesh/neutron
|
352afb37afcf4952f03436b25618d0066c51f3f1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012, Nachi Ueno, NTT MCL, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import contextlib
import mock
import netaddr
from neutron_lib import constants as const
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
import oslo_messaging
from testtools import matchers
import webob.exc
from neutron.agent import firewall as firewall_base
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import iptables_manager
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import rpc as n_rpc
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import securitygroup as ext_sg
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.extensions import test_securitygroup as test_sg
FAKE_PREFIX = {const.IPv4: '10.0.0.0/24',
const.IPv6: '2001:db8::/64'}
FAKE_IP = {const.IPv4: '10.0.0.1',
const.IPv6: 'fe80::1',
'IPv6_GLOBAL': '2001:db8::1',
'IPv6_LLA': 'fe80::123',
'IPv6_DHCP': '2001:db8::3'}
TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.'
'SecurityGroupRpcTestPlugin')
FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.'
FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver'
FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE +
'OVSHybridIptablesFirewallDriver')
FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver'
def ingress_address_assignment_rules(port):
rules = []
v4_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips']
if netaddr.IPNetwork(ip['ip_address']).version == 4]
v6_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips']
if netaddr.IPNetwork(ip['ip_address']).version == 6]
if v6_addrs:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'protocol': 'ipv6-icmp',
'source_port_range_min': 134})
for dest in v4_addrs + ['255.255.255.255']:
rules.append({'direction': 'ingress',
'ethertype': 'IPv4',
'port_range_max': 68,
'port_range_min': 68,
'protocol': 'udp',
'source_port_range_max': 67,
'source_port_range_min': 67,
'dest_ip_prefix': '%s/32' % dest})
for dest in v6_addrs:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_max': 546,
'port_range_min': 546,
'protocol': 'udp',
'source_port_range_max': 547,
'source_port_range_min': 547,
'dest_ip_prefix': '%s/128' % dest})
for dest in ['fe80::/64']:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_max': 546,
'port_range_min': 546,
'protocol': 'udp',
'source_port_range_max': 547,
'source_port_range_min': 547,
'dest_ip_prefix': '%s' % dest})
return rules
def set_enable_security_groups(enabled):
cfg.CONF.set_override('enable_security_group', enabled,
group='SECURITYGROUP')
def set_firewall_driver(firewall_driver):
cfg.CONF.set_override('firewall_driver', firewall_driver,
group='SECURITYGROUP')
class FakeFirewallDriver(firewall_base.FirewallDriver):
"""Fake FirewallDriver
FirewallDriver is base class for other types of drivers. To be able to
use it in tests, it's needed to overwrite all abstract methods.
"""
def prepare_port_filter(self, port):
raise NotImplementedError()
def update_port_filter(self, port):
raise NotImplementedError()
class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin,
sg_db_rpc.SecurityGroupServerRpcMixin):
def __init__(self):
super(SecurityGroupRpcTestPlugin, self).__init__()
self.notifier = mock.Mock()
self.devices = {}
def create_port(self, context, port):
result = super(SecurityGroupRpcTestPlugin,
self).create_port(context, port)
self.devices[result['id']] = result
self.notify_security_groups_member_updated(context, result)
return result
def update_port(self, context, id, port):
original_port = self.get_port(context, id)
updated_port = super(SecurityGroupRpcTestPlugin,
self).update_port(context, id, port)
self.devices[id] = updated_port
self.update_security_group_on_port(
context, id, port, original_port, updated_port)
def delete_port(self, context, id):
port = self.get_port(context, id)
super(SecurityGroupRpcTestPlugin, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
del self.devices[id]
def get_port_from_device(self, context, device):
device = self.devices.get(device)
if device:
device['security_group_rules'] = []
device['security_group_source_groups'] = []
device['fixed_ips'] = [ip['ip_address']
for ip in device['fixed_ips']]
return device
class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self, plugin=None):
plugin = plugin or TEST_PLUGIN_CLASS
set_firewall_driver(FIREWALL_NOOP_DRIVER)
super(SGServerRpcCallBackTestCase, self).setUp(plugin)
self.notifier = directory.get_plugin().notifier
self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback()
def _test_security_group_port(self, device_owner, gw_ip,
cidr, ip_version, ip_address):
with self.network() as net:
with self.subnet(net,
gateway_ip=gw_ip,
cidr=cidr,
ip_version=ip_version) as subnet:
kwargs = {
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_address}]}
if device_owner:
kwargs['device_owner'] = device_owner
res = self._create_port(
self.fmt, net['network']['id'], **kwargs)
res = self.deserialize(self.fmt, res)
port_id = res['port']['id']
if device_owner in const.ROUTER_INTERFACE_OWNERS:
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self._delete('ports', port_id)
def test_notify_security_group_ipv6_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::1')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_dvr_ipv6_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_DVR_INTERFACE,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::2')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv6_normal_port_added(self):
self._test_security_group_port(
None,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_dhcp_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_DHCP,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.2')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.1')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_normal_port_added(self):
self._test_security_group_port(
None,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def _test_sg_rules_for_devices_ipv4_ingress_port_range(
self, min_port, max_port):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, str(min_port),
str(max_port))
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': max_port,
'security_group_id': sg1_id,
'port_range_min': min_port},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_sg_rules_for_devices_ipv4_ingress_port_range_min_port_1(self):
self._test_sg_rules_for_devices_ipv4_ingress_port_range(1, 10)
def test_security_group_info_for_ports_with_no_rules(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg:
sg_id = sg['security_group']['id']
self._delete_default_security_group_egress_rules(sg_id)
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg_id])
ports_rest = self.deserialize(self.fmt, res)
port_id = ports_rest['port']['id']
self.rpc.devices = {port_id: ports_rest['port']}
devices = [port_id]
ctx = context.get_admin_context()
sg_info = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {sg_id: []}
self.assertEqual(expected, sg_info['security_groups'])
self._delete('ports', port_id)
@contextlib.contextmanager
def _port_with_addr_pairs_and_security_group(self):
plugin_obj = directory.get_plugin()
if ('allowed-address-pairs'
not in plugin_obj.supported_extension_aliases):
self.skipTest("Test depends on allowed-address-pairs extension")
fake_prefix = FAKE_PREFIX['IPv4']
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '22',
'22', remote_group_id=sg1_id)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.1.0/24'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '11.0.0.1'}]
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
yield self.deserialize(self.fmt, res1)
def test_security_group_info_for_devices_ipv4_addr_pair(self):
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
# verify that address pairs are included in remote SG IPs
sg_member_ips = self.rpc.security_group_info_for_devices(
ctx, devices=devices)['sg_member_ips']
expected_member_ips = [
'10.0.1.0/24', '11.0.0.1',
port['port']['fixed_ips'][0]['ip_address']]
self.assertEqual(sorted(expected_member_ips),
sorted(sg_member_ips[sg_id]['IPv4']))
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg_id},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '11.0.0.1/32',
'port_range_min': 22},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '10.0.1.0/24',
'port_range_min': 22},
{'direction': 'ingress', 'protocol': 'tcp',
'ethertype': 'IPv4',
'port_range_max': 23, 'security_group_id': sg_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(port)
expected = tools.UnorderedList(expected)
self.assertEqual(expected,
port_rpc['security_group_rules'])
self.assertEqual(port['port']['allowed_address_pairs'],
port_rpc['allowed_address_pairs'])
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id,
sg2_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
port_fixed_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': u'ingress',
'source_ip_prefix': port_fixed_ip2 + '/32',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_info_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id}
]},
'sg_member_ips': {sg2_id: {
'IPv4': set([port_ip2]),
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'],
ports_rpc['sg_member_ips'][sg2_id]['IPv4'])
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_rules_for_devices_ipv6_ingress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': FAKE_IP['IPv6_DHCP']}],
device_owner=const.DEVICE_OWNER_DHCP,
security_groups=[sg1_id])
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_info_for_devices_only_ipv6_rule(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22', remote_group_id=sg1_id,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22, 'port_range_min': 22,
'remote_group_id': sg1_id}
]},
'sg_member_ips': {sg1_id: {
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'],
ports_rpc['sg_member_ips'][sg1_id]['IPv6'])
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_source_group(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25',
ethertype=const.IPv6,
remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id,
sg2_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ports_rest2 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg2_id])
port_id2 = ports_rest2['port']['id']
port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': 'ingress',
'source_ip_prefix': port_ip2 + '/128',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase):
def test_init_firewall_with_none_driver(self):
set_enable_security_groups(False)
agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock())
self.assertEqual(agent.firewall.__class__.__name__,
'NoopFirewallDriver')
class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase):
def setUp(self, defer_refresh_firewall=False):
super(BaseSecurityGroupAgentRpcTestCase, self).setUp()
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock(),
defer_refresh_firewall=defer_refresh_firewall)
mock.patch('neutron.agent.linux.iptables_manager').start()
self.default_firewall = self.agent.firewall
self.firewall = mock.Mock()
firewall_object = FakeFirewallDriver()
self.firewall.defer_apply.side_effect = firewall_object.defer_apply
self.agent.firewall = self.firewall
self.fake_device = {'device': 'fake_device',
'network_id': 'fake_net',
'security_groups': ['fake_sgid1', 'fake_sgid2'],
'security_group_source_groups': ['fake_sgid2'],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports = {'fake_device': self.fake_device}
self.firewall.security_group_updated = mock.Mock()
class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentRpcTestCase, self).setUp(
defer_refresh_firewall)
rpc = self.agent.plugin_rpc
rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
rpc.security_group_rules_for_devices.return_value = (
self.firewall.ports)
def test_prepare_and_remove_devices_filter(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
# ignore device which is not filtered
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.prepare_port_filter(
self.fake_device),
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_prepare_devices_filter_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_prepare_devices_filter_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_security_groups_rule_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_rule_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall()])
def test_refresh_firewall(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
def test_refresh_firewall_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
def test_refresh_firewall_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
class SecurityGroupAgentEnhancedRpcTestCase(
BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp(
defer_refresh_firewall=defer_refresh_firewall)
fake_sg_info = {
'security_groups': collections.OrderedDict([
('fake_sgid2', []),
('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]),
'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}},
'devices': self.firewall.ports}
self.agent.plugin_rpc.security_group_info_for_devices.return_value = (
fake_sg_info)
def test_prepare_and_remove_devices_filter_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
# these two mocks are too long, just use tmp_mock to replace them
tmp_mock1 = mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])
tmp_mock2 = mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []})
# ignore device which is not filtered
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.update_security_group_rules(
'fake_sgid2', []),
tmp_mock1,
tmp_mock2,
mock.call.prepare_port_filter(
self.fake_device),
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_security_groups_rule_updated_enhanced_rpc(self):
sg_list = ['fake_sgid1', 'fake_sgid3']
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(sg_list)
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
self.firewall.security_group_updated.assert_called_once_with(
'sg_rule', set(sg_list))
def test_security_groups_rule_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated_enhanced_rpc(self):
sg_list = ['fake_sgid2', 'fake_sgid3']
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(sg_list)
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
self.firewall.security_group_updated.assert_called_once_with(
'sg_member', set(sg_list))
def test_security_groups_member_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(
['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall()])
def test_refresh_firewall_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []}),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []}),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []
}),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []}),
mock.call.update_port_filter(self.fake_device)
]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none_enhanced_rpc(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
class SecurityGroupAgentRpcWithDeferredRefreshTestCase(
SecurityGroupAgentRpcTestCase):
def setUp(self):
super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp(
defer_refresh_firewall=True)
@contextlib.contextmanager
def add_fake_device(self, device, sec_groups, source_sec_groups=None):
fake_device = {'device': device,
'security_groups': sec_groups,
'security_group_source_groups': source_sec_groups or [],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports[device] = fake_device
yield
del self.firewall.ports[device]
def test_security_groups_rule_updated(self):
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_rule_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1',
'fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated(self):
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_member_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_member_updated(['fake_sgid1',
'fake_sgid3'])
self.agent.security_groups_member_updated(['fake_sgid2',
'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid2']):
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid1B']):
self.agent.security_groups_member_updated(['fake_sgid1B'])
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated(self):
self.agent.security_groups_provider_updated(None)
self.assertTrue(self.agent.global_refresh_firewall)
def test_security_groups_provider_updated_devices_specified(self):
self.agent.firewall.ports = {
'fake_device_1': {
'id': 'fake_port_id_1',
'device': 'fake_device_1'},
'fake_device_2': {
'id': 'fake_port_id_2',
'device': 'fake_device_2'}}
self.agent.security_groups_provider_updated(
['fake_port_id_1', 'fake_port_id_2'])
self.assertFalse(self.agent.global_refresh_firewall)
self.assertIn('fake_device_1', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_setup_port_filters_new_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_updated_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filter_new_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']),
set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_and_new_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def _test_prepare_devices_filter(self, devices):
# simulate an RPC arriving and calling _security_group_updated()
self.agent.devices_to_refilter |= set(['fake_new_device'])
def test_setup_port_filters_new_port_and_rpc(self):
# Make sure that if an RPC arrives and adds a device to
# devices_to_refilter while we are in setup_port_filters()
# that it is not cleared, and will be processed later.
self.agent.prepare_devices_filter = self._test_prepare_devices_filter
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['new_device', 'fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['new_device']), set())
self.assertEqual(self.agent.devices_to_refilter,
set(['fake_new_device']))
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(), set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_all_updates(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(['fake_new_device']),
set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_no_update(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_with_global_refresh(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = True
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with()
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
class FakeSGNotifierAPI(securitygroups_rpc.SecurityGroupAgentRpcApiMixin):
def __init__(self):
self.topic = 'fake'
target = oslo_messaging.Target(topic=self.topic, version='1.0')
self.client = n_rpc.get_client(target)
class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupAgentRpcApiTestCase, self).setUp()
self.notifier = FakeSGNotifierAPI()
self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare',
return_value=self.notifier.client).start()
self.mock_cast = mock.patch.object(self.notifier.client,
'cast').start()
def test_security_groups_provider_updated(self):
self.notifier.security_groups_provider_updated(None)
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_provider_updated',
devices_to_update=None)])
def test_security_groups_rule_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_rule_updated',
security_groups=['fake_sgid'])])
def test_security_groups_member_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_member_updated',
security_groups=['fake_sgid'])])
def test_security_groups_rule_not_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=[])
self.assertFalse(self.mock_cast.called)
def test_security_groups_member_not_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=[])
self.assertFalse(self.mock_cast.called)
#Note(nati) bn -> binary_name
# id -> device_id
PHYSDEV_MOD = '-m physdev'
PHYSDEV_IS_BRIDGED = '--physdev-is-bridged'
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'physdev_mod': PHYSDEV_MOD,
'physdev_is_bridged': PHYSDEV_IS_BRIDGED}
CHAINS_MANGLE = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark'
IPTABLES_ARG['chains'] = CHAINS_MANGLE
CHAINS_MANGLE_V6 = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING'
IPTABLES_ARG['chains'] = CHAINS_MANGLE_V6
CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
IPTABLES_ARG['port1'] = 'port1'
IPTABLES_ARG['port2'] = 'port2'
IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC'
IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD'
IPTABLES_ARG['ip1'] = '10.0.0.3/32'
IPTABLES_ARG['ip2'] = '10.0.0.4/32'
IPTABLES_ARG['chains'] = CHAINS_NAT
IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_BRIDGE_NET_1 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_BRIDGE_NET_2 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 4 -m physdev --physdev-in brqfakenet2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
-I %(bn)s-PREROUTING 5 -i brqfakenet2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
-I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_port2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i qvbtap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i qvbtap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 4 -m physdev --physdev-in qvbtap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
-I %(bn)s-PREROUTING 5 -i qvbtap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
-I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_RAW = 'OUTPUT|PREROUTING'
IPTABLES_ARG['chains'] = CHAINS_RAW
IPTABLES_RAW = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback'
CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1'
CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2'
IPTABLES_ARG['chains'] = CHAINS_1
IPSET_FILTER_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -m set --match-set NIPv4security_group1 src -j \
RETURN
-I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 5 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -s 10.0.0.4/32 -j RETURN
-I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPSET_FILTER_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPSET_FILTER_2_3 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 5 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2_3 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
-I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-sg-chain 1 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_1
IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_port1 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_port1 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 6 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_port1 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_port1 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_port1 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_port1 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_port1 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_port1 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 9 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 10 -j %(bn)s-sg-fallback
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_%(port1)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_%(port1)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_%(port1)s 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_%(port1)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_%(port1)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_%(port1)s 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 9 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 10 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_%(port2)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_%(port2)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_%(port2)s 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_%(port2)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_%(port2)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_%(port2)s 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 9 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 10 -j %(bn)s-sg-fallback
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-sg-chain 1 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER
PHYSDEV_INGRESS = 'physdev-out'
PHYSDEV_EGRESS = 'physdev-in'
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
clear_mgrs = lambda: ip_conntrack.CONTRACK_MGRS.clear()
self.addCleanup(clear_mgrs)
clear_mgrs() # clear before start in case other tests didn't clean up
super(TestSecurityGroupAgentWithIptables, self).setUp()
set_firewall_driver(self.FIREWALL_DRIVER)
cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT')
self.utils_exec = mock.patch(
'neutron.agent.linux.utils.execute').start()
self.rpc = mock.Mock()
self._init_agent(defer_refresh_firewall)
if test_rpc_v1_1:
self.rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
self.iptables = self.agent.firewall.iptables
self.ipconntrack = self.agent.firewall.ipconntrack
# TODO(jlibosva) Get rid of mocking iptables execute and mock out
# firewall instead
self.iptables.use_ipv6 = True
self.iptables_execute = mock.patch.object(self.iptables,
"execute").start()
self.iptables_execute_return_values = []
self.expected_call_count = 0
self.expected_calls = []
self.expected_process_inputs = []
self.iptables_execute.side_effect = self.iptables_execute_return_values
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.4/32',
'ethertype': const.IPv4}]
rule3 = rule2[:]
rule3 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
rule4 = rule1[:]
rule4 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.3/32',
'ethertype': const.IPv4}]
rule5 = rule4[:]
rule5 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
self.devices1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule1)}
self.devices2 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule2)),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule4))
])
self.devices3 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule3)),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule5))
])
self.agent.firewall.security_group_updated = mock.Mock()
@staticmethod
def _enforce_order_in_firewall(firewall):
# for the sake of the test, eliminate any order randomness:
# it helps to match iptables output against regexps consistently
for attr in ('filtered_ports', 'unfiltered_ports'):
setattr(firewall, attr, collections.OrderedDict())
def _init_agent(self, defer_refresh_firewall):
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
defer_refresh_firewall=defer_refresh_firewall)
self._enforce_order_in_firewall(self.agent.firewall)
# don't mess with sysctl knobs in unit tests
self.agent.firewall._enabled_netfilter_for_bridges = True
def _device(self, device, ip, mac_address, rule):
return {'device': device,
'network_id': 'fakenet%s' % device[-1:],
'fixed_ips': [ip],
'mac_address': mac_address,
'security_groups': ['security_group1'],
'security_group_rules': rule,
'security_group_source_groups': [
'security_group1']}
def _regex(self, value):
value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS)
value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS)
value = value.replace('\n', '\\n')
value = value.replace('[', r'\[')
value = value.replace(']', r'\]')
value = value.replace('*', r'\*')
return value
def _register_mock_call(self, *args, **kwargs):
return_value = kwargs.pop('return_value', None)
self.iptables_execute_return_values.append(return_value)
has_process_input = 'process_input' in kwargs
process_input = kwargs.get('process_input')
self.expected_process_inputs.append((has_process_input, process_input))
if has_process_input:
kwargs['process_input'] = mock.ANY
self.expected_calls.append(mock.call(*args, **kwargs))
self.expected_call_count += 1
def _verify_mock_calls(self, exp_fw_sg_updated_call=False):
self.assertEqual(self.expected_call_count,
self.iptables_execute.call_count)
self.iptables_execute.assert_has_calls(self.expected_calls)
for i, expected in enumerate(self.expected_process_inputs):
check, expected_regex = expected
if not check:
continue
# The second or later arguments of self.iptables.execute
# are keyword parameter, so keyword argument is extracted by [1]
kwargs = self.iptables_execute.call_args_list[i][1]
self.assertThat(kwargs['process_input'],
matchers.MatchesRegex(expected_regex))
self.assertEqual(exp_fw_sg_updated_call,
self.agent.firewall.security_group_updated.called)
def _replay_iptables(self, v4_filter, v6_filter, raw):
self._register_mock_call(
['iptables-save'],
run_as_root=True,
return_value='')
self._register_mock_call(
['iptables-restore', '-n'],
process_input=self._regex(v4_filter + raw),
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-save'],
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-restore', '-n'],
process_input=self._regex(v6_filter + raw),
run_as_root=True,
return_value='')
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
class TestSecurityGroupAgentEnhancedRpcWithIptables(
TestSecurityGroupAgentWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp(
defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False)
self.sg_info = self.rpc.security_group_info_for_devices
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4},
{'direction': 'ingress',
'remote_group_id': 'security_group1',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
devices_info1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[])}
self.devices_info1 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32'], 'IPv6': []}},
'devices': devices_info1}
devices_info2 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[])),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
[]))
])
self.devices_info2 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
self.devices_info3 = {'security_groups': {'security_group1': rule2},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls(True)
self.assertEqual(
2, self.agent.firewall.security_group_updated.call_count)
def test_security_group_rule_updated(self):
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls(True)
self.agent.firewall.security_group_updated.assert_called_with(
'sg_rule', set(['security_group1']))
class TestSecurityGroupAgentEnhancedIpsetWithIptables(
TestSecurityGroupAgentEnhancedRpcWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp(
defer_refresh_firewall)
self.agent.firewall.enable_ipset = True
self.ipset = self.agent.firewall.ipset
self.ipset_execute = mock.patch.object(self.ipset,
"execute").start()
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3'])
self.ipset._get_deleted_set_ips = mock.Mock(return_value=[])
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls(True)
self.assertEqual(
2, self.agent.firewall.security_group_updated.call_count)
def test_security_group_rule_updated(self):
self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3'])
self.ipset._get_deleted_set_ips = mock.Mock(return_value=[])
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls(True)
self.agent.firewall.security_group_updated.assert_called_with(
'sg_rule', set(['security_group1']))
class SGNotificationTestMixin(object):
def test_security_group_rule_updated(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description):
security_group_id = sg['security_group']['id']
rule = self._build_security_group_rule(
security_group_id,
direction='ingress',
proto=const.PROTO_NAME_TCP)
security_group_rule = self._make_security_group_rule(self.fmt,
rule)
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id]),
mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id])])
def test_security_group_member_updated(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
security_group_id)
self._delete('ports', port['port']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_member_updated(
mock.ANY, [mock.ANY])])
class TestSecurityGroupAgentWithOVSIptables(
TestSecurityGroupAgentWithIptables):
FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
super(TestSecurityGroupAgentWithOVSIptables, self).setUp(
defer_refresh_firewall,
test_rpc_v1_1)
def _init_agent(self, defer_refresh_firewall):
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
defer_refresh_firewall=defer_refresh_firewall)
self._enforce_order_in_firewall(self.agent.firewall)
# don't mess with sysctl knobs in unit tests
self.agent.firewall._enabled_netfilter_for_bridges = True
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
def _regex(self, value):
#Note(nati): tap is prefixed on the device
# in the OVSHybridIptablesFirewallDriver
value = value.replace('tap_port', 'taptap_port')
value = value.replace('qvbtaptap_port', 'qvbtap_port')
value = value.replace('o_port', 'otap_port')
value = value.replace('i_port', 'itap_port')
value = value.replace('s_port', 'stap_port')
return super(
TestSecurityGroupAgentWithOVSIptables,
self)._regex(value)
class TestSecurityGroupExtensionControl(base.BaseTestCase):
def test_disable_security_group_extension_by_config(self):
set_enable_security_groups(False)
exp_aliases = ['dummy1', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
def test_enable_security_group_extension_by_config(self):
set_enable_security_groups(True)
exp_aliases = ['dummy1', 'security-group', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
| 45.786105
| 79
| 0.605687
|
import collections
import contextlib
import mock
import netaddr
from neutron_lib import constants as const
from neutron_lib import context
from neutron_lib.plugins import directory
from oslo_config import cfg
import oslo_messaging
from testtools import matchers
import webob.exc
from neutron.agent import firewall as firewall_base
from neutron.agent.linux import ip_conntrack
from neutron.agent.linux import iptables_manager
from neutron.agent import securitygroups_rpc as sg_rpc
from neutron.api.rpc.handlers import securitygroups_rpc
from neutron.common import rpc as n_rpc
from neutron.db import securitygroups_rpc_base as sg_db_rpc
from neutron.extensions import allowedaddresspairs as addr_pair
from neutron.extensions import securitygroup as ext_sg
from neutron.tests import base
from neutron.tests import tools
from neutron.tests.unit.extensions import test_securitygroup as test_sg
FAKE_PREFIX = {const.IPv4: '10.0.0.0/24',
const.IPv6: '2001:db8::/64'}
FAKE_IP = {const.IPv4: '10.0.0.1',
const.IPv6: 'fe80::1',
'IPv6_GLOBAL': '2001:db8::1',
'IPv6_LLA': 'fe80::123',
'IPv6_DHCP': '2001:db8::3'}
TEST_PLUGIN_CLASS = ('neutron.tests.unit.agent.test_securitygroups_rpc.'
'SecurityGroupRpcTestPlugin')
FIREWALL_BASE_PACKAGE = 'neutron.agent.linux.iptables_firewall.'
FIREWALL_IPTABLES_DRIVER = FIREWALL_BASE_PACKAGE + 'IptablesFirewallDriver'
FIREWALL_HYBRID_DRIVER = (FIREWALL_BASE_PACKAGE +
'OVSHybridIptablesFirewallDriver')
FIREWALL_NOOP_DRIVER = 'neutron.agent.firewall.NoopFirewallDriver'
def ingress_address_assignment_rules(port):
rules = []
v4_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips']
if netaddr.IPNetwork(ip['ip_address']).version == 4]
v6_addrs = [ip['ip_address'] for ip in port['port']['fixed_ips']
if netaddr.IPNetwork(ip['ip_address']).version == 6]
if v6_addrs:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'protocol': 'ipv6-icmp',
'source_port_range_min': 134})
for dest in v4_addrs + ['255.255.255.255']:
rules.append({'direction': 'ingress',
'ethertype': 'IPv4',
'port_range_max': 68,
'port_range_min': 68,
'protocol': 'udp',
'source_port_range_max': 67,
'source_port_range_min': 67,
'dest_ip_prefix': '%s/32' % dest})
for dest in v6_addrs:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_max': 546,
'port_range_min': 546,
'protocol': 'udp',
'source_port_range_max': 547,
'source_port_range_min': 547,
'dest_ip_prefix': '%s/128' % dest})
for dest in ['fe80::/64']:
rules.append({'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_max': 546,
'port_range_min': 546,
'protocol': 'udp',
'source_port_range_max': 547,
'source_port_range_min': 547,
'dest_ip_prefix': '%s' % dest})
return rules
def set_enable_security_groups(enabled):
cfg.CONF.set_override('enable_security_group', enabled,
group='SECURITYGROUP')
def set_firewall_driver(firewall_driver):
cfg.CONF.set_override('firewall_driver', firewall_driver,
group='SECURITYGROUP')
class FakeFirewallDriver(firewall_base.FirewallDriver):
def prepare_port_filter(self, port):
raise NotImplementedError()
def update_port_filter(self, port):
raise NotImplementedError()
class SecurityGroupRpcTestPlugin(test_sg.SecurityGroupTestPlugin,
sg_db_rpc.SecurityGroupServerRpcMixin):
def __init__(self):
super(SecurityGroupRpcTestPlugin, self).__init__()
self.notifier = mock.Mock()
self.devices = {}
def create_port(self, context, port):
result = super(SecurityGroupRpcTestPlugin,
self).create_port(context, port)
self.devices[result['id']] = result
self.notify_security_groups_member_updated(context, result)
return result
def update_port(self, context, id, port):
original_port = self.get_port(context, id)
updated_port = super(SecurityGroupRpcTestPlugin,
self).update_port(context, id, port)
self.devices[id] = updated_port
self.update_security_group_on_port(
context, id, port, original_port, updated_port)
def delete_port(self, context, id):
port = self.get_port(context, id)
super(SecurityGroupRpcTestPlugin, self).delete_port(context, id)
self.notify_security_groups_member_updated(context, port)
del self.devices[id]
def get_port_from_device(self, context, device):
device = self.devices.get(device)
if device:
device['security_group_rules'] = []
device['security_group_source_groups'] = []
device['fixed_ips'] = [ip['ip_address']
for ip in device['fixed_ips']]
return device
class SGServerRpcCallBackTestCase(test_sg.SecurityGroupDBTestCase):
def setUp(self, plugin=None):
plugin = plugin or TEST_PLUGIN_CLASS
set_firewall_driver(FIREWALL_NOOP_DRIVER)
super(SGServerRpcCallBackTestCase, self).setUp(plugin)
self.notifier = directory.get_plugin().notifier
self.rpc = securitygroups_rpc.SecurityGroupServerRpcCallback()
def _test_security_group_port(self, device_owner, gw_ip,
cidr, ip_version, ip_address):
with self.network() as net:
with self.subnet(net,
gateway_ip=gw_ip,
cidr=cidr,
ip_version=ip_version) as subnet:
kwargs = {
'fixed_ips': [{'subnet_id': subnet['subnet']['id'],
'ip_address': ip_address}]}
if device_owner:
kwargs['device_owner'] = device_owner
res = self._create_port(
self.fmt, net['network']['id'], **kwargs)
res = self.deserialize(self.fmt, res)
port_id = res['port']['id']
if device_owner in const.ROUTER_INTERFACE_OWNERS:
data = {'port': {'fixed_ips': []}}
req = self.new_update_request('ports', data, port_id)
res = self.deserialize(self.fmt,
req.get_response(self.api))
self._delete('ports', port_id)
def test_notify_security_group_ipv6_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::1')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_dvr_ipv6_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_DVR_INTERFACE,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::2')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv6_normal_port_added(self):
self._test_security_group_port(
None,
'2001:0db8::1',
'2001:0db8::/64',
6,
'2001:0db8::3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_dhcp_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_DHCP,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.2')
self.assertTrue(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_gateway_port_added(self):
self._test_security_group_port(
const.DEVICE_OWNER_ROUTER_INTF,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.1')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def test_notify_security_group_ipv4_normal_port_added(self):
self._test_security_group_port(
None,
'192.168.1.1',
'192.168.1.0/24',
4,
'192.168.1.3')
self.assertFalse(self.notifier.security_groups_provider_updated.called)
def _test_sg_rules_for_devices_ipv4_ingress_port_range(
self, min_port, max_port):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, str(min_port),
str(max_port))
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': max_port,
'security_group_id': sg1_id,
'port_range_min': min_port},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_sg_rules_for_devices_ipv4_ingress_port_range_min_port_1(self):
self._test_sg_rules_for_devices_ipv4_ingress_port_range(1, 10)
def test_security_group_info_for_ports_with_no_rules(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg:
sg_id = sg['security_group']['id']
self._delete_default_security_group_egress_rules(sg_id)
res = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg_id])
ports_rest = self.deserialize(self.fmt, res)
port_id = ports_rest['port']['id']
self.rpc.devices = {port_id: ports_rest['port']}
devices = [port_id]
ctx = context.get_admin_context()
sg_info = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {sg_id: []}
self.assertEqual(expected, sg_info['security_groups'])
self._delete('ports', port_id)
@contextlib.contextmanager
def _port_with_addr_pairs_and_security_group(self):
plugin_obj = directory.get_plugin()
if ('allowed-address-pairs'
not in plugin_obj.supported_extension_aliases):
self.skipTest("Test depends on allowed-address-pairs extension")
fake_prefix = FAKE_PREFIX['IPv4']
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '22',
'22', remote_group_id=sg1_id)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', 'tcp', '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(res.status_int, 201)
address_pairs = [{'mac_address': '00:00:00:00:00:01',
'ip_address': '10.0.1.0/24'},
{'mac_address': '00:00:00:00:00:01',
'ip_address': '11.0.0.1'}]
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id],
arg_list=(addr_pair.ADDRESS_PAIRS,),
allowed_address_pairs=address_pairs)
yield self.deserialize(self.fmt, res1)
def test_security_group_info_for_devices_ipv4_addr_pair(self):
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
sg_member_ips = self.rpc.security_group_info_for_devices(
ctx, devices=devices)['sg_member_ips']
expected_member_ips = [
'10.0.1.0/24', '11.0.0.1',
port['port']['fixed_ips'][0]['ip_address']]
self.assertEqual(sorted(expected_member_ips),
sorted(sg_member_ips[sg_id]['IPv4']))
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_ingress_addr_pair(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self._port_with_addr_pairs_and_security_group() as port:
port_id = port['port']['id']
sg_id = port['port']['security_groups'][0]
devices = [port_id, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id]
expected = [{'direction': 'egress', 'ethertype': 'IPv4',
'security_group_id': sg_id},
{'direction': 'egress', 'ethertype': 'IPv6',
'security_group_id': sg_id},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '11.0.0.1/32',
'port_range_min': 22},
{'direction': 'ingress',
'protocol': 'tcp', 'ethertype': 'IPv4',
'port_range_max': 22,
'remote_group_id': sg_id,
'security_group_id': sg_id,
'source_ip_prefix': '10.0.1.0/24',
'port_range_min': 22},
{'direction': 'ingress', 'protocol': 'tcp',
'ethertype': 'IPv4',
'port_range_max': 23, 'security_group_id': sg_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(port)
expected = tools.UnorderedList(expected)
self.assertEqual(expected,
port_rpc['security_group_rules'])
self.assertEqual(port['port']['allowed_address_pairs'],
port_rpc['allowed_address_pairs'])
self._delete('ports', port_id)
def test_security_group_rules_for_devices_ipv4_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv4]
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22')
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'port_range_max': 23, 'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id,
sg2_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
port_fixed_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': u'ingress',
'source_ip_prefix': port_fixed_ip2 + '/32',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_info_for_devices_ipv4_source_group(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25', remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
res2 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg2_id])
ports_rest2 = self.deserialize(self.fmt, res2)
port_id2 = ports_rest2['port']['id']
port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id}
]},
'sg_member_ips': {sg2_id: {
'IPv4': set([port_ip2]),
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg2_id]['IPv4'],
ports_rpc['sg_member_ips'][sg2_id]['IPv4'])
self._delete('ports', port_id1)
self._delete('ports', port_id2)
def test_security_group_rules_for_devices_ipv6_ingress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
res = self._create_security_group_rule(self.fmt, rules)
self.deserialize(self.fmt, res)
self.assertEqual(webob.exc.HTTPCreated.code, res.status_int)
self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id'],
'ip_address': FAKE_IP['IPv6_DHCP']}],
device_owner=const.DEVICE_OWNER_DHCP,
security_groups=[sg1_id])
res1 = self._create_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
source_port, dest_port, ethertype = sg_db_rpc.DHCP_RULE_PORT[6]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'source_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_info_for_devices_only_ipv6_rule(self):
with self.network() as n,\
self.subnet(n),\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '22',
'22', remote_group_id=sg1_id,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
res1 = self._create_port(
self.fmt, n['network']['id'],
security_groups=[sg1_id])
ports_rest1 = self.deserialize(self.fmt, res1)
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_info_for_devices(
ctx, devices=devices)
expected = {
'security_groups': {sg1_id: [
{'direction': 'egress', 'ethertype': const.IPv4},
{'direction': 'egress', 'ethertype': const.IPv6},
{'direction': u'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22, 'port_range_min': 22,
'remote_group_id': sg1_id}
]},
'sg_member_ips': {sg1_id: {
'IPv6': set(),
}}
}
self.assertEqual(expected['security_groups'],
ports_rpc['security_groups'])
self.assertEqual(expected['sg_member_ips'][sg1_id]['IPv6'],
ports_rpc['sg_member_ips'][sg1_id]['IPv6'])
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_egress(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1:
sg1_id = sg1['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_TCP, '22',
'22',
ethertype=const.IPv6)
rule2 = self._build_security_group_rule(
sg1_id,
'egress', const.PROTO_NAME_UDP, '23',
'23', fake_prefix,
ethertype=const.IPv6)
rules = {
'security_group_rules': [rule1['security_group_rule'],
rule2['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 22,
'security_group_id': sg1_id,
'port_range_min': 22},
{'direction': 'egress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv6,
'port_range_max': 23,
'security_group_id': sg1_id,
'port_range_min': 23,
'dest_ip_prefix': fake_prefix},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
def test_security_group_rules_for_devices_ipv6_source_group(self):
fake_prefix = FAKE_PREFIX[const.IPv6]
fake_gateway = FAKE_IP[const.IPv6]
with self.network() as n,\
self.subnet(n, gateway_ip=fake_gateway,
cidr=fake_prefix, ip_version=6
) as subnet_v6,\
self.security_group() as sg1,\
self.security_group() as sg2:
sg1_id = sg1['security_group']['id']
sg2_id = sg2['security_group']['id']
rule1 = self._build_security_group_rule(
sg1_id,
'ingress', const.PROTO_NAME_TCP, '24',
'25',
ethertype=const.IPv6,
remote_group_id=sg2['security_group']['id'])
rules = {
'security_group_rules': [rule1['security_group_rule']]}
self._make_security_group_rule(self.fmt, rules)
ports_rest1 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg1_id,
sg2_id])
port_id1 = ports_rest1['port']['id']
self.rpc.devices = {port_id1: ports_rest1['port']}
devices = [port_id1, 'no_exist_device']
ports_rest2 = self._make_port(
self.fmt, n['network']['id'],
fixed_ips=[{'subnet_id': subnet_v6['subnet']['id']}],
security_groups=[sg2_id])
port_id2 = ports_rest2['port']['id']
port_ip2 = ports_rest2['port']['fixed_ips'][0]['ip_address']
ctx = context.get_admin_context()
ports_rpc = self.rpc.security_group_rules_for_devices(
ctx, devices=devices)
port_rpc = ports_rpc[port_id1]
expected = [{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg1_id},
{'direction': 'egress', 'ethertype': const.IPv4,
'security_group_id': sg2_id},
{'direction': 'egress', 'ethertype': const.IPv6,
'security_group_id': sg2_id},
{'direction': 'ingress',
'source_ip_prefix': port_ip2 + '/128',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv6,
'port_range_max': 25, 'port_range_min': 24,
'remote_group_id': sg2_id,
'security_group_id': sg1_id},
] + ingress_address_assignment_rules(ports_rest1)
self.assertEqual(port_rpc['security_group_rules'],
expected)
self._delete('ports', port_id1)
self._delete('ports', port_id2)
class SecurityGroupAgentRpcTestCaseForNoneDriver(base.BaseTestCase):
def test_init_firewall_with_none_driver(self):
set_enable_security_groups(False)
agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock())
self.assertEqual(agent.firewall.__class__.__name__,
'NoopFirewallDriver')
class BaseSecurityGroupAgentRpcTestCase(base.BaseTestCase):
def setUp(self, defer_refresh_firewall=False):
super(BaseSecurityGroupAgentRpcTestCase, self).setUp()
set_firewall_driver(FIREWALL_NOOP_DRIVER)
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=mock.Mock(),
defer_refresh_firewall=defer_refresh_firewall)
mock.patch('neutron.agent.linux.iptables_manager').start()
self.default_firewall = self.agent.firewall
self.firewall = mock.Mock()
firewall_object = FakeFirewallDriver()
self.firewall.defer_apply.side_effect = firewall_object.defer_apply
self.agent.firewall = self.firewall
self.fake_device = {'device': 'fake_device',
'network_id': 'fake_net',
'security_groups': ['fake_sgid1', 'fake_sgid2'],
'security_group_source_groups': ['fake_sgid2'],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports = {'fake_device': self.fake_device}
self.firewall.security_group_updated = mock.Mock()
class SecurityGroupAgentRpcTestCase(BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentRpcTestCase, self).setUp(
defer_refresh_firewall)
rpc = self.agent.plugin_rpc
rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
rpc.security_group_rules_for_devices.return_value = (
self.firewall.ports)
def test_prepare_and_remove_devices_filter(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.prepare_port_filter(
self.fake_device),
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_prepare_devices_filter_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_prepare_devices_filter_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.prepare_devices_filter(['fake_device'])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
def test_security_groups_rule_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_rule_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall([self.fake_device['device']])])
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_not_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall()])
def test_refresh_firewall(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
def test_refresh_firewall_with_firewall_disabled(self):
cfg.CONF.set_override('enable_security_group', False, 'SECURITYGROUP')
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
def test_refresh_firewall_with_noopfirewall(self):
self.agent.firewall = self.default_firewall
self.agent.plugin_rpc.security_group_info_for_devices = mock.Mock()
self.agent.plugin_rpc.security_group_rules_for_devices = mock.Mock()
self.agent.firewall.defer_apply = mock.Mock()
self.agent.refresh_firewall([self.fake_device])
self.assertFalse(self.agent.plugin_rpc.
security_group_info_for_devices.called)
self.assertFalse(self.agent.plugin_rpc.
security_group_rules_for_devices.called)
self.assertFalse(self.agent.firewall.defer_apply.called)
class SecurityGroupAgentEnhancedRpcTestCase(
BaseSecurityGroupAgentRpcTestCase):
def setUp(self, defer_refresh_firewall=False):
super(SecurityGroupAgentEnhancedRpcTestCase, self).setUp(
defer_refresh_firewall=defer_refresh_firewall)
fake_sg_info = {
'security_groups': collections.OrderedDict([
('fake_sgid2', []),
('fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])]),
'sg_member_ips': {'fake_sgid2': {'IPv4': [], 'IPv6': []}},
'devices': self.firewall.ports}
self.agent.plugin_rpc.security_group_info_for_devices.return_value = (
fake_sg_info)
def test_prepare_and_remove_devices_filter_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.remove_devices_filter(['fake_device'])
tmp_mock1 = mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}])
tmp_mock2 = mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []})
self.firewall.assert_has_calls([mock.call.defer_apply(),
mock.call.update_security_group_rules(
'fake_sgid2', []),
tmp_mock1,
tmp_mock2,
mock.call.prepare_port_filter(
self.fake_device),
mock.call.defer_apply(),
mock.call.remove_port_filter(
self.fake_device),
])
def test_security_groups_rule_updated_enhanced_rpc(self):
sg_list = ['fake_sgid1', 'fake_sgid3']
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(sg_list)
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
self.firewall.security_group_updated.assert_called_once_with(
'sg_rule', set(sg_list))
def test_security_groups_rule_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_rule_updated(['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated_enhanced_rpc(self):
sg_list = ['fake_sgid2', 'fake_sgid3']
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(sg_list)
self.agent.refresh_firewall.assert_called_once_with(
[self.fake_device['device']])
self.firewall.security_group_updated.assert_called_once_with(
'sg_member', set(sg_list))
def test_security_groups_member_not_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.security_groups_member_updated(
['fake_sgid3', 'fake_sgid4'])
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated_enhanced_rpc(self):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_provider_updated(None)
self.agent.refresh_firewall.assert_has_calls(
[mock.call.refresh_firewall()])
def test_refresh_firewall_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_port_id'])
self.agent.refresh_firewall()
calls = [mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []}),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules(
'fake_sgid1', [{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members(
'fake_sgid2', {'IPv4': [], 'IPv6': []}),
mock.call.update_port_filter(self.fake_device)]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_devices_enhanced_rpc(self):
self.agent.prepare_devices_filter(['fake_device'])
self.agent.refresh_firewall([self.fake_device])
calls = [mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []
}),
mock.call.prepare_port_filter(self.fake_device),
mock.call.defer_apply(),
mock.call.update_security_group_rules('fake_sgid2', []),
mock.call.update_security_group_rules('fake_sgid1', [
{'remote_group_id': 'fake_sgid2'}]),
mock.call.update_security_group_members('fake_sgid2', {
'IPv4': [], 'IPv6': []}),
mock.call.update_port_filter(self.fake_device)
]
self.firewall.assert_has_calls(calls)
def test_refresh_firewall_none_enhanced_rpc(self):
self.agent.refresh_firewall([])
self.assertFalse(self.firewall.called)
class SecurityGroupAgentRpcWithDeferredRefreshTestCase(
SecurityGroupAgentRpcTestCase):
def setUp(self):
super(SecurityGroupAgentRpcWithDeferredRefreshTestCase, self).setUp(
defer_refresh_firewall=True)
@contextlib.contextmanager
def add_fake_device(self, device, sec_groups, source_sec_groups=None):
fake_device = {'device': device,
'security_groups': sec_groups,
'security_group_source_groups': source_sec_groups or [],
'security_group_rules': [{'security_group_id':
'fake_sgid1',
'remote_group_id':
'fake_sgid2'}]}
self.firewall.ports[device] = fake_device
yield
del self.firewall.ports[device]
def test_security_groups_rule_updated(self):
self.agent.security_groups_rule_updated(['fake_sgid1', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_rule_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1',
'fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_rule_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid2']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_rule_updated(['fake_sgid1'])
self.agent.security_groups_rule_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated(self):
self.agent.security_groups_member_updated(['fake_sgid2', 'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_member_updated_same_port(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgidX']):
self.agent.refresh_firewall = mock.Mock()
self.agent.security_groups_member_updated(['fake_sgid1',
'fake_sgid3'])
self.agent.security_groups_member_updated(['fake_sgid2',
'fake_sgid3'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertNotIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid2']):
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_multiple_security_groups_member_updated_multiple_ports(self):
with self.add_fake_device(device='fake_device_2',
sec_groups=['fake_sgid1', 'fake_sgid1B'],
source_sec_groups=['fake_sgid1B']):
self.agent.security_groups_member_updated(['fake_sgid1B'])
self.agent.security_groups_member_updated(['fake_sgid2'])
self.assertIn('fake_device', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
self.assertFalse(self.firewall.security_group_updated.called)
def test_security_groups_provider_updated(self):
self.agent.security_groups_provider_updated(None)
self.assertTrue(self.agent.global_refresh_firewall)
def test_security_groups_provider_updated_devices_specified(self):
self.agent.firewall.ports = {
'fake_device_1': {
'id': 'fake_port_id_1',
'device': 'fake_device_1'},
'fake_device_2': {
'id': 'fake_port_id_2',
'device': 'fake_device_2'}}
self.agent.security_groups_provider_updated(
['fake_port_id_1', 'fake_port_id_2'])
self.assertFalse(self.agent.global_refresh_firewall)
self.assertIn('fake_device_1', self.agent.devices_to_refilter)
self.assertIn('fake_device_2', self.agent.devices_to_refilter)
def test_setup_port_filters_new_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_updated_ports_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filter_new_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']),
set(['fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_updated_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_only(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_and_new_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['fake_new_device']), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def _test_prepare_devices_filter(self, devices):
self.agent.devices_to_refilter |= set(['fake_new_device'])
def test_setup_port_filters_new_port_and_rpc(self):
self.agent.prepare_devices_filter = self._test_prepare_devices_filter
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['new_device', 'fake_device'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(['new_device']), set())
self.assertEqual(self.agent.devices_to_refilter,
set(['fake_new_device']))
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_sg_updates_and_updated_ports(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(), set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_all_updates(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set(['fake_device', 'fake_device_2'])
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(
set(['fake_new_device']),
set(['fake_device', 'fake_updated_device']))
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.prepare_devices_filter.assert_called_once_with(
set(['fake_new_device']))
self.agent.refresh_firewall.assert_called_once_with(
set(['fake_device', 'fake_device_2', 'fake_updated_device']))
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_no_update(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = False
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.assertFalse(self.agent.refresh_firewall.called)
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
def test_setup_port_filters_with_global_refresh(self):
self.agent.prepare_devices_filter = mock.Mock()
self.agent.refresh_firewall = mock.Mock()
self.agent.devices_to_refilter = set()
self.agent.global_refresh_firewall = True
self.agent.setup_port_filters(set(), set())
self.assertFalse(self.agent.devices_to_refilter)
self.assertFalse(self.agent.global_refresh_firewall)
self.agent.refresh_firewall.assert_called_once_with()
self.assertFalse(self.agent.prepare_devices_filter.called)
self.assertFalse(self.firewall.security_group_updated.called)
class FakeSGNotifierAPI(securitygroups_rpc.SecurityGroupAgentRpcApiMixin):
def __init__(self):
self.topic = 'fake'
target = oslo_messaging.Target(topic=self.topic, version='1.0')
self.client = n_rpc.get_client(target)
class SecurityGroupAgentRpcApiTestCase(base.BaseTestCase):
def setUp(self):
super(SecurityGroupAgentRpcApiTestCase, self).setUp()
self.notifier = FakeSGNotifierAPI()
self.mock_prepare = mock.patch.object(self.notifier.client, 'prepare',
return_value=self.notifier.client).start()
self.mock_cast = mock.patch.object(self.notifier.client,
'cast').start()
def test_security_groups_provider_updated(self):
self.notifier.security_groups_provider_updated(None)
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_provider_updated',
devices_to_update=None)])
def test_security_groups_rule_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_rule_updated',
security_groups=['fake_sgid'])])
def test_security_groups_member_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=['fake_sgid'])
self.mock_cast.assert_has_calls(
[mock.call(None, 'security_groups_member_updated',
security_groups=['fake_sgid'])])
def test_security_groups_rule_not_updated(self):
self.notifier.security_groups_rule_updated(
None, security_groups=[])
self.assertFalse(self.mock_cast.called)
def test_security_groups_member_not_updated(self):
self.notifier.security_groups_member_updated(
None, security_groups=[])
self.assertFalse(self.mock_cast.called)
PHYSDEV_MOD = '-m physdev'
PHYSDEV_IS_BRIDGED = '--physdev-is-bridged'
IPTABLES_ARG = {'bn': iptables_manager.binary_name,
'physdev_mod': PHYSDEV_MOD,
'physdev_is_bridged': PHYSDEV_IS_BRIDGED}
CHAINS_MANGLE = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING|mark'
IPTABLES_ARG['chains'] = CHAINS_MANGLE
CHAINS_MANGLE_V6 = 'FORWARD|INPUT|OUTPUT|POSTROUTING|PREROUTING'
IPTABLES_ARG['chains'] = CHAINS_MANGLE_V6
CHAINS_NAT = 'OUTPUT|POSTROUTING|PREROUTING|float-snat|snat'
IPTABLES_ARG['port1'] = 'port1'
IPTABLES_ARG['port2'] = 'port2'
IPTABLES_ARG['mac1'] = '12:34:56:78:9A:BC'
IPTABLES_ARG['mac2'] = '12:34:56:78:9A:BD'
IPTABLES_ARG['ip1'] = '10.0.0.3/32'
IPTABLES_ARG['ip2'] = '10.0.0.4/32'
IPTABLES_ARG['chains'] = CHAINS_NAT
IPTABLES_RAW_DEFAULT = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_BRIDGE_NET_1 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_BRIDGE_NET_2 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i brqfakenet1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for port1" -j CT --zone 1
-I %(bn)s-PREROUTING 4 -m physdev --physdev-in brqfakenet2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
-I %(bn)s-PREROUTING 5 -i brqfakenet2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
-I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_port2 \
-m comment --comment "Set zone for port2" -j CT --zone 2
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_1 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i qvbtap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_port1 \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_RAW_DEVICE_2 = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-OUTPUT - [0:0]
:%(bn)s-PREROUTING - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
-I %(bn)s-PREROUTING 1 -m physdev --physdev-in qvbtap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 2 -i qvbtap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 3 -m physdev --physdev-in tap_%(port1)s \
-m comment --comment "Set zone for %(port1)s" -j CT --zone 1
-I %(bn)s-PREROUTING 4 -m physdev --physdev-in qvbtap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
-I %(bn)s-PREROUTING 5 -i qvbtap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
-I %(bn)s-PREROUTING 6 -m physdev --physdev-in tap_%(port2)s \
-m comment --comment "Set zone for %(port2)s" -j CT --zone 2
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_RAW = 'OUTPUT|PREROUTING'
IPTABLES_ARG['chains'] = CHAINS_RAW
IPTABLES_RAW = """# Generated by iptables_manager
*raw
:OUTPUT - [0:0]
:PREROUTING - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I OUTPUT 1 -j %(bn)s-OUTPUT
-I PREROUTING 1 -j %(bn)s-PREROUTING
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
CHAINS_EMPTY = 'FORWARD|INPUT|OUTPUT|local|sg-chain|sg-fallback'
CHAINS_1 = CHAINS_EMPTY + '|i_port1|o_port1|s_port1'
CHAINS_2 = CHAINS_1 + '|i_port2|o_port2|s_port2'
IPTABLES_ARG['chains'] = CHAINS_1
IPSET_FILTER_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -m set --match-set NIPv4security_group1 src -j \
RETURN
-I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 5 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_1_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_port1 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_port1 4 -s 10.0.0.4/32 -j RETURN
-I %(bn)s-i_port1 5 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 2 -j %(bn)s-s_port1
-I %(bn)s-o_port1 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_port1 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 6 -j RETURN
-I %(bn)s-o_port1 7 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_port1 1 -s 10.0.0.3/32 -m mac --mac-source 12:34:56:78:9A:BC \
-j RETURN
-I %(bn)s-s_port1 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPSET_FILTER_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPSET_FILTER_2_3 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -m set --match-set NIPv4security_group1 src -j RETURN
-I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 5 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 6 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_FILTER_2_3 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port1)s 4 -s %(ip2)s -j RETURN
-I %(bn)s-i_%(port1)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 2 -s 10.0.0.2/32 -p udp -m udp --sport 67 \
--dport 68 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p tcp -m tcp --dport 22 -j RETURN
-I %(bn)s-i_%(port2)s 4 -s %(ip1)s -j RETURN
-I %(bn)s-i_%(port2)s 5 -p icmp -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 2 -j %(bn)s-s_%(port1)s
-I %(bn)s-o_%(port1)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 6 -j RETURN
-I %(bn)s-o_%(port1)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s 0.0.0.0/32 -d 255.255.255.255/32 -p udp -m udp \
--sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 2 -j %(bn)s-s_%(port2)s
-I %(bn)s-o_%(port2)s 3 -p udp -m udp --sport 68 --dport 67 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p udp -m udp --sport 67 --dport 68 -j DROP
-I %(bn)s-o_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 6 -j RETURN
-I %(bn)s-o_%(port2)s 7 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 8 -j %(bn)s-sg-fallback
-I %(bn)s-s_%(port1)s 1 -s %(ip1)s -m mac --mac-source %(mac1)s -j RETURN
-I %(bn)s-s_%(port1)s 2 -j DROP
-I %(bn)s-s_%(port2)s 1 -s %(ip2)s -m mac --mac-source %(mac2)s -j RETURN
-I %(bn)s-s_%(port2)s 2 -j DROP
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_EMPTY = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-sg-chain 1 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_1
IPTABLES_FILTER_V6_1 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-i_port1 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_port1 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_port1 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_port1 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_port1 6 -m state --state INVALID -j DROP
-I %(bn)s-i_port1 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_port1 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_port1 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_port1 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_port1 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_port1 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_port1 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_port1 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_port1 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_port1 9 -m state --state INVALID -j DROP
-I %(bn)s-o_port1 10 -j %(bn)s-sg-fallback
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-i_port1
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_port1 \
%(physdev_is_bridged)s -j %(bn)s-o_port1
-I %(bn)s-sg-chain 3 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_2
IPTABLES_FILTER_V6_2 = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-FORWARD 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-FORWARD 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-sg-chain
-I %(bn)s-INPUT 1 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-INPUT 2 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-i_%(port1)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_%(port1)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_%(port1)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_%(port1)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port1)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port1)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-i_%(port2)s 1 -p ipv6-icmp -m icmp6 --icmpv6-type 130 -j RETURN
-I %(bn)s-i_%(port2)s 2 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j RETURN
-I %(bn)s-i_%(port2)s 3 -p ipv6-icmp -m icmp6 --icmpv6-type 135 -j RETURN
-I %(bn)s-i_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 136 -j RETURN
-I %(bn)s-i_%(port2)s 5 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-i_%(port2)s 6 -m state --state INVALID -j DROP
-I %(bn)s-i_%(port2)s 7 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port1)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_%(port1)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_%(port1)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_%(port1)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_%(port1)s 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_%(port1)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_%(port1)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_%(port1)s 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port1)s 9 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port1)s 10 -j %(bn)s-sg-fallback
-I %(bn)s-o_%(port2)s 1 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 131 -j RETURN
-I %(bn)s-o_%(port2)s 2 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 135 -j RETURN
-I %(bn)s-o_%(port2)s 3 -s ::/128 -d ff02::/16 -p ipv6-icmp -m icmp6 \
--icmpv6-type 143 -j RETURN
-I %(bn)s-o_%(port2)s 4 -p ipv6-icmp -m icmp6 --icmpv6-type 134 -j DROP
-I %(bn)s-o_%(port2)s 5 -p ipv6-icmp -j RETURN
-I %(bn)s-o_%(port2)s 6 -p udp -m udp --sport 546 --dport 547 -j RETURN
-I %(bn)s-o_%(port2)s 7 -p udp -m udp --sport 547 --dport 546 -j DROP
-I %(bn)s-o_%(port2)s 8 -m state --state RELATED,ESTABLISHED -j RETURN
-I %(bn)s-o_%(port2)s 9 -m state --state INVALID -j DROP
-I %(bn)s-o_%(port2)s 10 -j %(bn)s-sg-fallback
-I %(bn)s-sg-chain 1 %(physdev_mod)s --physdev-INGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port1)s
-I %(bn)s-sg-chain 2 %(physdev_mod)s --physdev-EGRESS tap_%(port1)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port1)s
-I %(bn)s-sg-chain 3 %(physdev_mod)s --physdev-INGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-i_%(port2)s
-I %(bn)s-sg-chain 4 %(physdev_mod)s --physdev-EGRESS tap_%(port2)s \
%(physdev_is_bridged)s -j %(bn)s-o_%(port2)s
-I %(bn)s-sg-chain 5 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
IPTABLES_ARG['chains'] = CHAINS_EMPTY
IPTABLES_FILTER_V6_EMPTY = """# Generated by iptables_manager
*filter
:FORWARD - [0:0]
:INPUT - [0:0]
:OUTPUT - [0:0]
:neutron-filter-top - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
:%(bn)s-(%(chains)s) - [0:0]
-I FORWARD 1 -j neutron-filter-top
-I FORWARD 2 -j %(bn)s-FORWARD
-I INPUT 1 -j %(bn)s-INPUT
-I OUTPUT 1 -j neutron-filter-top
-I OUTPUT 2 -j %(bn)s-OUTPUT
-I neutron-filter-top 1 -j %(bn)s-local
-I %(bn)s-sg-chain 1 -j ACCEPT
-I %(bn)s-sg-fallback 1 -j DROP
COMMIT
# Completed by iptables_manager
""" % IPTABLES_ARG
class TestSecurityGroupAgentWithIptables(base.BaseTestCase):
FIREWALL_DRIVER = FIREWALL_IPTABLES_DRIVER
PHYSDEV_INGRESS = 'physdev-out'
PHYSDEV_EGRESS = 'physdev-in'
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
clear_mgrs = lambda: ip_conntrack.CONTRACK_MGRS.clear()
self.addCleanup(clear_mgrs)
clear_mgrs()
super(TestSecurityGroupAgentWithIptables, self).setUp()
set_firewall_driver(self.FIREWALL_DRIVER)
cfg.CONF.set_override('enable_ipset', False, group='SECURITYGROUP')
cfg.CONF.set_override('comment_iptables_rules', False, group='AGENT')
self.utils_exec = mock.patch(
'neutron.agent.linux.utils.execute').start()
self.rpc = mock.Mock()
self._init_agent(defer_refresh_firewall)
if test_rpc_v1_1:
self.rpc.security_group_info_for_devices.side_effect = (
oslo_messaging.UnsupportedVersion('1.2'))
self.iptables = self.agent.firewall.iptables
self.ipconntrack = self.agent.firewall.ipconntrack
# TODO(jlibosva) Get rid of mocking iptables execute and mock out
# firewall instead
self.iptables.use_ipv6 = True
self.iptables_execute = mock.patch.object(self.iptables,
"execute").start()
self.iptables_execute_return_values = []
self.expected_call_count = 0
self.expected_calls = []
self.expected_process_inputs = []
self.iptables_execute.side_effect = self.iptables_execute_return_values
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.4/32',
'ethertype': const.IPv4}]
rule3 = rule2[:]
rule3 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
rule4 = rule1[:]
rule4 += [{'direction': 'ingress',
'source_ip_prefix': '10.0.0.3/32',
'ethertype': const.IPv4}]
rule5 = rule4[:]
rule5 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
self.devices1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule1)}
self.devices2 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule2)),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule4))
])
self.devices3 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
rule3)),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
rule5))
])
self.agent.firewall.security_group_updated = mock.Mock()
@staticmethod
def _enforce_order_in_firewall(firewall):
# for the sake of the test, eliminate any order randomness:
# it helps to match iptables output against regexps consistently
for attr in ('filtered_ports', 'unfiltered_ports'):
setattr(firewall, attr, collections.OrderedDict())
def _init_agent(self, defer_refresh_firewall):
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
defer_refresh_firewall=defer_refresh_firewall)
self._enforce_order_in_firewall(self.agent.firewall)
# don't mess with sysctl knobs in unit tests
self.agent.firewall._enabled_netfilter_for_bridges = True
def _device(self, device, ip, mac_address, rule):
return {'device': device,
'network_id': 'fakenet%s' % device[-1:],
'fixed_ips': [ip],
'mac_address': mac_address,
'security_groups': ['security_group1'],
'security_group_rules': rule,
'security_group_source_groups': [
'security_group1']}
def _regex(self, value):
value = value.replace('physdev-INGRESS', self.PHYSDEV_INGRESS)
value = value.replace('physdev-EGRESS', self.PHYSDEV_EGRESS)
value = value.replace('\n', '\\n')
value = value.replace('[', r'\[')
value = value.replace(']', r'\]')
value = value.replace('*', r'\*')
return value
def _register_mock_call(self, *args, **kwargs):
return_value = kwargs.pop('return_value', None)
self.iptables_execute_return_values.append(return_value)
has_process_input = 'process_input' in kwargs
process_input = kwargs.get('process_input')
self.expected_process_inputs.append((has_process_input, process_input))
if has_process_input:
kwargs['process_input'] = mock.ANY
self.expected_calls.append(mock.call(*args, **kwargs))
self.expected_call_count += 1
def _verify_mock_calls(self, exp_fw_sg_updated_call=False):
self.assertEqual(self.expected_call_count,
self.iptables_execute.call_count)
self.iptables_execute.assert_has_calls(self.expected_calls)
for i, expected in enumerate(self.expected_process_inputs):
check, expected_regex = expected
if not check:
continue
kwargs = self.iptables_execute.call_args_list[i][1]
self.assertThat(kwargs['process_input'],
matchers.MatchesRegex(expected_regex))
self.assertEqual(exp_fw_sg_updated_call,
self.agent.firewall.security_group_updated.called)
def _replay_iptables(self, v4_filter, v6_filter, raw):
self._register_mock_call(
['iptables-save'],
run_as_root=True,
return_value='')
self._register_mock_call(
['iptables-restore', '-n'],
process_input=self._regex(v4_filter + raw),
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-save'],
run_as_root=True,
return_value='')
self._register_mock_call(
['ip6tables-restore', '-n'],
process_input=self._regex(v6_filter + raw),
run_as_root=True,
return_value='')
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
class TestSecurityGroupAgentEnhancedRpcWithIptables(
TestSecurityGroupAgentWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedRpcWithIptables, self).setUp(
defer_refresh_firewall=defer_refresh_firewall, test_rpc_v1_1=False)
self.sg_info = self.rpc.security_group_info_for_devices
rule1 = [{'direction': 'ingress',
'protocol': const.PROTO_NAME_UDP,
'ethertype': const.IPv4,
'source_ip_prefix': '10.0.0.2/32',
'source_port_range_min': 67,
'source_port_range_max': 67,
'port_range_min': 68,
'port_range_max': 68},
{'direction': 'ingress',
'protocol': const.PROTO_NAME_TCP,
'ethertype': const.IPv4,
'port_range_min': 22,
'port_range_max': 22},
{'direction': 'egress',
'ethertype': const.IPv4},
{'direction': 'ingress',
'remote_group_id': 'security_group1',
'ethertype': const.IPv4}]
rule2 = rule1[:]
rule2 += [{'direction': 'ingress',
'protocol': const.PROTO_NAME_ICMP,
'ethertype': const.IPv4}]
devices_info1 = {'tap_port1': self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[])}
self.devices_info1 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32'], 'IPv6': []}},
'devices': devices_info1}
devices_info2 = collections.OrderedDict([
('tap_port1', self._device('tap_port1',
'10.0.0.3/32',
'12:34:56:78:9a:bc',
[])),
('tap_port2', self._device('tap_port2',
'10.0.0.4/32',
'12:34:56:78:9a:bd',
[]))
])
self.devices_info2 = {'security_groups': {'security_group1': rule1},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
self.devices_info3 = {'security_groups': {'security_group1': rule2},
'sg_member_ips': {
'security_group1': {
'IPv4': ['10.0.0.3/32', '10.0.0.4/32'],
'IPv6': []}},
'devices': devices_info2}
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls(True)
self.assertEqual(
2, self.agent.firewall.security_group_updated.call_count)
def test_security_group_rule_updated(self):
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls(True)
self.agent.firewall.security_group_updated.assert_called_with(
'sg_rule', set(['security_group1']))
class TestSecurityGroupAgentEnhancedIpsetWithIptables(
TestSecurityGroupAgentEnhancedRpcWithIptables):
def setUp(self, defer_refresh_firewall=False):
super(TestSecurityGroupAgentEnhancedIpsetWithIptables, self).setUp(
defer_refresh_firewall)
self.agent.firewall.enable_ipset = True
self.ipset = self.agent.firewall.ipset
self.ipset_execute = mock.patch.object(self.ipset,
"execute").start()
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.sg_info.return_value = self.devices_info1
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.sg_info.return_value = self.devices_info1
self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3'])
self.ipset._get_deleted_set_ips = mock.Mock(return_value=[])
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_BRIDGE_NET_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.sg_info.return_value = self.devices_info2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.sg_info.return_value = self.devices_info1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls(True)
self.assertEqual(
2, self.agent.firewall.security_group_updated.call_count)
def test_security_group_rule_updated(self):
self.ipset._get_new_set_ips = mock.Mock(return_value=['10.0.0.3'])
self.ipset._get_deleted_set_ips = mock.Mock(return_value=[])
self.sg_info.return_value = self.devices_info2
self._replay_iptables(IPSET_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self._replay_iptables(IPSET_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_BRIDGE_NET_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.sg_info.return_value = self.devices_info3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls(True)
self.agent.firewall.security_group_updated.assert_called_with(
'sg_rule', set(['security_group1']))
class SGNotificationTestMixin(object):
def test_security_group_rule_updated(self):
name = 'webservers'
description = 'my webservers'
with self.security_group(name, description) as sg:
with self.security_group(name, description):
security_group_id = sg['security_group']['id']
rule = self._build_security_group_rule(
security_group_id,
direction='ingress',
proto=const.PROTO_NAME_TCP)
security_group_rule = self._make_security_group_rule(self.fmt,
rule)
self._delete('security-group-rules',
security_group_rule['security_group_rule']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id]),
mock.call.security_groups_rule_updated(mock.ANY,
[security_group_id])])
def test_security_group_member_updated(self):
with self.network() as n:
with self.subnet(n):
with self.security_group() as sg:
security_group_id = sg['security_group']['id']
res = self._create_port(self.fmt, n['network']['id'])
port = self.deserialize(self.fmt, res)
data = {'port': {'fixed_ips': port['port']['fixed_ips'],
'name': port['port']['name'],
ext_sg.SECURITYGROUPS:
[security_group_id]}}
req = self.new_update_request('ports', data,
port['port']['id'])
res = self.deserialize(self.fmt,
req.get_response(self.api))
self.assertEqual(res['port'][ext_sg.SECURITYGROUPS][0],
security_group_id)
self._delete('ports', port['port']['id'])
self.notifier.assert_has_calls(
[mock.call.security_groups_member_updated(
mock.ANY, [mock.ANY])])
class TestSecurityGroupAgentWithOVSIptables(
TestSecurityGroupAgentWithIptables):
FIREWALL_DRIVER = FIREWALL_HYBRID_DRIVER
def setUp(self, defer_refresh_firewall=False, test_rpc_v1_1=True):
super(TestSecurityGroupAgentWithOVSIptables, self).setUp(
defer_refresh_firewall,
test_rpc_v1_1)
def _init_agent(self, defer_refresh_firewall):
self.agent = sg_rpc.SecurityGroupAgentRpc(
context=None, plugin_rpc=self.rpc,
defer_refresh_firewall=defer_refresh_firewall)
self._enforce_order_in_firewall(self.agent.firewall)
self.agent.firewall._enabled_netfilter_for_bridges = True
def test_prepare_remove_port(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_member_updated(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_1_2, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_1, IPTABLES_FILTER_V6_1,
IPTABLES_RAW_DEVICE_1)
self._replay_iptables(IPTABLES_FILTER_EMPTY, IPTABLES_FILTER_V6_EMPTY,
IPTABLES_RAW_DEFAULT)
self.agent.prepare_devices_filter(['tap_port1'])
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self.agent.security_groups_member_updated(['security_group1'])
self.agent.prepare_devices_filter(['tap_port2'])
self.rpc.security_group_rules_for_devices.return_value = self.devices1
self.agent.security_groups_member_updated(['security_group1'])
self.agent.remove_devices_filter(['tap_port2'])
self.agent.remove_devices_filter(['tap_port1'])
self._verify_mock_calls()
def test_security_group_rule_updated(self):
self.ipconntrack._device_zone_map = {}
self.rpc.security_group_rules_for_devices.return_value = self.devices2
self._replay_iptables(IPTABLES_FILTER_2, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self._replay_iptables(IPTABLES_FILTER_2_3, IPTABLES_FILTER_V6_2,
IPTABLES_RAW_DEVICE_2)
self.agent.prepare_devices_filter(['tap_port1', 'tap_port3'])
self.rpc.security_group_rules_for_devices.return_value = self.devices3
self.agent.security_groups_rule_updated(['security_group1'])
self._verify_mock_calls()
def _regex(self, value):
#Note(nati): tap is prefixed on the device
# in the OVSHybridIptablesFirewallDriver
value = value.replace('tap_port', 'taptap_port')
value = value.replace('qvbtaptap_port', 'qvbtap_port')
value = value.replace('o_port', 'otap_port')
value = value.replace('i_port', 'itap_port')
value = value.replace('s_port', 'stap_port')
return super(
TestSecurityGroupAgentWithOVSIptables,
self)._regex(value)
class TestSecurityGroupExtensionControl(base.BaseTestCase):
def test_disable_security_group_extension_by_config(self):
set_enable_security_groups(False)
exp_aliases = ['dummy1', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
def test_enable_security_group_extension_by_config(self):
set_enable_security_groups(True)
exp_aliases = ['dummy1', 'security-group', 'dummy2']
ext_aliases = ['dummy1', 'security-group', 'dummy2']
sg_rpc.disable_security_group_extension_by_config(ext_aliases)
self.assertEqual(ext_aliases, exp_aliases)
| true
| true
|
1c429bafa9a19f7c83fd8e02eb564e6443c7c7f2
| 128,741
|
py
|
Python
|
env/lib/python3.8/site-packages/pandas/tests/plotting/test_frame.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
env/lib/python3.8/site-packages/pandas/tests/plotting/test_frame.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
env/lib/python3.8/site-packages/pandas/tests/plotting/test_frame.py
|
acrucetta/Chicago_COVI_WebApp
|
a37c9f492a20dcd625f8647067394617988de913
|
[
"MIT",
"Unlicense"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
# coding: utf-8
""" Test cases for DataFrame.plot """
from datetime import date, datetime
import itertools
import string
import warnings
import numpy as np
from numpy.random import rand, randn
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.api import is_list_like
import pandas as pd
from pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.slow
def test_plot(self):
from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0
df = self.tdf
_check_plot_works(df.plot, grid=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({"x": [1, 2], "y": [3, 4]})
if _mpl_ge_3_1_0():
msg = "'Line2D' object has no property 'blarg'"
else:
msg = "Unknown property blarg"
with pytest.raises(AttributeError, match=msg):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title="blah")
# We have to redo it here because _check_plot_works does two plots,
# once without an ax kwarg and once with an ax kwarg and the new sharex
# behaviour does not remove the visibility of the latter axis (as ax is
# present). see: https://github.com/pandas-dev/pandas/issues/9737
axes = df.plot(subplots=True, title="blah")
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
# axes[0].figure.savefig("test.png")
for ax in axes[:2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title="blah")
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
# unicode
index = MultiIndex.from_tuples(
[
("\u03b1", 0),
("\u03b1", 1),
("\u03b2", 2),
("\u03b2", 3),
("\u03b3", 4),
("\u03b3", 5),
("\u03b4", 6),
("\u03b4", 7),
],
names=["i0", "i1"],
)
columns = MultiIndex.from_tuples(
[("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"]
)
df = DataFrame(np.random.randint(0, 10, (8, 2)), columns=columns, index=index)
_check_plot_works(df.plot, title="\u03A3")
# GH 6951
# Test with single column
df = DataFrame({"x": np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
# When ax is supplied and required number of axes is 1,
# passed ax should be used:
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
def test_integer_array_plot(self):
# GH 25587
arr = integer_array([1, 2, 3, 4], dtype="UInt32")
s = Series(arr)
_check_plot_works(s.plot.line)
_check_plot_works(s.plot.bar)
_check_plot_works(s.plot.hist)
_check_plot_works(s.plot.pie)
df = DataFrame({"x": arr, "y": arr})
_check_plot_works(df.plot.line)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.hist)
_check_plot_works(df.plot.pie, y="y")
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.hexbin, x="x", y="y")
def test_mpl2_color_cycle_str(self):
# GH 15516
colors = ["C" + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
for c in colors:
_check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
# GH 3486
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
def test_rgb_tuple_color(self):
# GH 16695
df = DataFrame({"x": [1, 2], "y": [3, 4]})
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0))
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
def test_color_and_style_arguments(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
# passing both 'color' and 'style' arguments should be allowed
# if there is no color symbol in the style strings:
ax = df.plot(color=["red", "black"], style=["-", "--"])
# check that the linestyles are correctly set:
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ["-", "--"]
# check that the colors are correctly set:
color = [line.get_color() for line in ax.lines]
assert color == ["red", "black"]
# passing both 'color' and 'style' arguments should not be allowed
# if there is a color symbol in the style strings:
with pytest.raises(ValueError):
df.plot(color=["red", "black"], style=["k-", "r--"])
def test_nonnumeric_exclude(self):
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1 # B was plotted
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
ax = df.plot(x="a", y="b")
self._check_text_labels(ax.xaxis.get_label(), "a")
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
# GH 8494
df = DataFrame(randn(2, 2), columns=["a", "b"])
df.index.name = "NAME"
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
@pytest.mark.slow
def test_plot_xy(self):
# columns.inferred_type == 'string'
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
self._check_data(df.plot(x=0), df.set_index("A").plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
self._check_data(df.plot(x="A"), df.set_index("A").plot())
self._check_data(df.plot(y="B"), df.B.plot())
# columns.inferred_type == 'integer'
df.columns = np.arange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
# figsize and title
ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
self._check_text_labels(ax.title, "Test")
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
# columns.inferred_type == 'mixed'
# TODO add MultiIndex test
@pytest.mark.slow
@pytest.mark.parametrize(
"input_log, expected_log", [(True, "log"), ("sym", "symlog")]
)
def test_logscales(self, input_log, expected_log):
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=input_log)
self._check_ax_scales(ax, yaxis=expected_log)
assert ax.get_yscale() == expected_log
ax = df.plot(logx=input_log)
self._check_ax_scales(ax, xaxis=expected_log)
assert ax.get_xscale() == expected_log
ax = df.plot(loglog=input_log)
self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
assert ax.get_xscale() == expected_log
assert ax.get_yscale() == expected_log
@pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"])
def test_invalid_logscale(self, input_param):
# GH: 24867
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
msg = "Boolean, None and 'sym' are valid options, 'sm' is given."
with pytest.raises(ValueError, match=msg):
df.plot(**{input_param: "sm"})
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["xaxis.compat"] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["x_compat"] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
# useful if you're plotting a bunch together
with pd.plotting.plot_params.use("x_compat", True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=["a", "b"],
)
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame(
{"y": np.arange(100)}, index=np.arange(99, -1, -1), dtype=np.int64
)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame(
{"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
)
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]})
ax = df.plot(x="z", y="y")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if not (kind == "bar" and self.mpl_ge_3_1_0):
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_groupby_boxplot_sharey(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# set sharey=True should be identical
axes = df.groupby("c").boxplot(sharey=True)
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# sharey=False, all yticklabels should be visible
axes = df.groupby("c").boxplot(sharey=False)
expected = [True, True, True, True]
self._assert_ytickslabels_visibility(axes, expected)
def test_groupby_boxplot_sharex(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# set sharex=False should be identical
axes = df.groupby("c").boxplot(sharex=False)
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# sharex=True, yticklabels should be visible
# only for bottom plots
axes = df.groupby("c").boxplot(sharex=True)
expected = [False, False, True, True]
self._assert_xtickslabels_visibility(axes, expected)
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (
ax_numeric.get_lines()[0].get_data()[1] == testdata["numeric"].values
).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (
ax_timedelta.get_lines()[0].get_data()[1] == testdata["timedelta"].values
).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (
ax_datetime_no_tz.get_lines()[0].get_data()[1]
== testdata["datetime_no_tz"].values
).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (
ax_datetime_all_tz.get_lines()[0].get_data()[1]
== testdata["datetime_all_tz"].values
).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
"""
This test will fail for:
period:
since period isn't yet implemented in ``select_dtypes``
and because it will need a custom value converter +
tick formatter (as was done for x-axis plots)
categorical:
because it will need a custom value converter +
tick formatter (also doesn't work for x-axis, as of now)
datetime_mixed_tz:
because of the way how pandas handles ``Series`` of
``datetime`` objects with different timezone,
generally converting ``datetime`` objects in a tz-aware
form could help with this problem
"""
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = -DataFrame(
rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=["w", "x", "y", "z"])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame(
{"w": rand(6), "x": rand(6), "y": -rand(6), "z": -rand(6)}
)
# each column has positive-negative mixed value
mixed_df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["w", "x", "y", "z"],
)
for kind in ["line", "area"]:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
# Use an index with strictly positive values, preventing
# matplotlib from warning about ignoring xlim
df2 = df.set_index(df.index + 1)
_check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2})
tdf = DataFrame({"a": values1, "b": values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False])
)
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False])
)
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=["x", "y", "z"])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=["x", "y", "z", "four"])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
tm.close()
ax = df.plot(kind="bar", color="green")
self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, position=0.2
)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.0]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
self._check_bar_alignment(df, kind="bar", subplots=True, width=1)
self._check_bar_alignment(df, kind="barh", subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.Index(list("ABCDEF")),
columns=pd.Index(list("abcde")),
)
# categorical index must behave the same
df2 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.CategoricalIndex(list("ABCDEF")),
columns=pd.CategoricalIndex(list("abcde")),
)
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x="x")
with pytest.raises(TypeError):
df.plot.scatter(y="y")
# GH 6951
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_raise_error_on_datetime_time_data(self):
# GH 8113, datetime.time type is not supported by matplotlib in scatter
df = pd.DataFrame(np.random.randn(10), columns=["a"])
df["dtime"] = pd.date_range(start="2014-01-01", freq="h", periods=10).time
msg = "must be a string or a number, not 'datetime.time'"
with pytest.raises(TypeError, match=msg):
df.plot(kind="scatter", x="dtime", y="a")
def test_scatterplot_datetime_data(self):
# GH 30391
dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W")
vals = np.random.normal(0, 1, len(dates))
df = pd.DataFrame({"dates": dates, "vals": vals})
_check_plot_works(df.plot.scatter, x="dates", y="vals")
_check_plot_works(df.plot.scatter, x=0, y=1)
def test_scatterplot_object_data(self):
# GH 18755
df = pd.DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
df = pd.DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax1 = df.plot.scatter(x="A label", y="B label")
ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax = df.plot.hexbin("A label", "B label", gridsize=12)
assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
fig, axes = plt.subplots(1, 2)
df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points() for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self, x, y):
# after fixing GH 18755, should be able to plot categorical data
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}
)
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
axes = [df.plot.scatter(x="x", y="y", c="z"), df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == "Greys"
# n.b. there appears to be no public method
# to get the colorbar label
assert ax.collections[0].colorbar._label == "z"
cm = "cubehelix"
ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c="red")
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=["r"])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({"A": [1, 2], "B": [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x="A", y="B", c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
# comparing floats for equality but here we expect them to be
# identical.
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)
# we don't test the colors of the faces in this next plot because they
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
def test_scatter_colors(self):
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x="a", y="b", c="c", color="green")
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x="a", y="b", c="c")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])),
)
ax = df.plot.scatter(x="a", y="b", color="white")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64),
)
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(
randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)
)
_check_plot_works(df.plot.bar)
df = DataFrame({"a": [0, 1], "b": [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(
self,
df,
kind="bar",
stacked=False,
subplots=False,
align="center",
width=0.5,
position=0.5,
):
axes = df.plot(
kind=kind,
stacked=stacked,
subplots=subplots,
align=align,
width=width,
position=position,
grid=True,
)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == "bar":
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min(p.get_x() for p in ax.patches)
max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
elif kind == "barh":
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min(p.get_y() for p in ax.patches)
max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == "bar" and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == "bar" and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == "barh" and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == "barh" and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == "center":
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == "edge":
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
# GH2157
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=False, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="bar", subplots=True, width=0.9)
self._check_bar_alignment(df, kind="barh", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True, align="edge")
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, align="edge")
self._check_bar_alignment(df, kind="barh", stacked=True, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, align="edge"
)
@pytest.mark.slow
def test_bar_log_no_subplots(self):
# GH3254, GH3298 matplotlib/matplotlib#1882, #1892
# regressions in 1.2.1
expected = np.array([0.1, 1.0, 10.0, 100])
# no subplots
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True
)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df["height"]
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(
ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)
)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
# if horizontal, yticklabels are rotated
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis="log")
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.plot.box(return_type="NOTATYPE")
result = df.plot.box(return_type="dict")
self._check_box_return_type(result, "dict")
result = df.plot.box(return_type="axes")
self._check_box_return_type(result, "axes")
result = df.plot.box() # default axes
self._check_box_return_type(result, "axes")
result = df.plot.box(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
# normal style: return_type=None
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
for t in ["dict", "axes", "both"]:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned,
t,
expected_keys=["height", "weight", "category"],
check_ax_title=False,
)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind="kde", rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind="kde", subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind="kde", logy=True, subplots=True)
self._check_ax_scales(axes, yaxis="log")
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis="log")
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4, density=True)
# height of last bin (index 5) must be 1.0
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
# if horizontal, yticklabels are rotated
axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal")
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(
self,
patches,
expected_y=None,
expected_h=None,
expected_x=None,
expected_w=None,
):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
# dtype is depending on above values, no need to check
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame(
{
"A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),
"B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),
"C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),
},
columns=["A", "B", "C"],
)
nan_df = DataFrame(
{
"A": np.repeat(
np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])
),
"B": np.repeat(
np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])
),
"C": np.repeat(
np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])
),
},
columns=["A", "B", "C"],
)
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(
axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
# horizontal
ax = df.plot.hist(bins=5, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(
bins=5, stacked=True, subplots=True, orientation="horizontal"
)
self._check_box_coord(
axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
df2 = DataFrame(rand(3, 3), columns=["d", "e", "f"])
df3 = DataFrame(rand(3, 3), columns=["g", "h", "i"])
df4 = DataFrame(rand(3, 3), columns=["j", "k", "l"])
for kind in kinds:
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend="reverse", ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
# Secondary Y
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
self._check_legend_labels(
ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]
)
# Time Series
ind = date_range("1/1/2014", periods=3)
df = DataFrame(randn(3, 3), columns=["a", "b", "c"], index=ind)
df2 = DataFrame(randn(3, 3), columns=["d", "e", "f"], index=ind)
df3 = DataFrame(randn(3, 3), columns=["g", "h", "i"], index=ind)
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
# scatter
ax = df.plot.scatter(x="a", y="b", label="data1")
self._check_legend_labels(ax, labels=["data1"])
ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax)
self._check_legend_labels(ax, labels=["data1"])
ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
self._check_legend_labels(ax, labels=["data1", "data3"])
# ensure label args pass through and
# index name does not mutate
# column names don't mutate
df5 = df.set_index("a")
ax = df5.plot(y="b")
self._check_legend_labels(ax, labels=["b"])
ax = df5.plot(y="b", label="LABEL_b")
self._check_legend_labels(ax, labels=["LABEL_b"])
self._check_text_labels(ax.xaxis.get_label(), "a")
ax = df5.plot(y="c", label="LABEL_c", ax=ax)
self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
assert df5.columns.tolist() == ["b", "c"]
def test_missing_marker_multi_plots_on_same_ax(self):
# GH 18222
df = pd.DataFrame(
data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]
)
fig, ax = self.plt.subplots(nrows=1, ncols=3)
# Left plot
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
self._check_legend_labels(ax[0], labels=["r", "g", "b"])
self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
# Center plot
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
self._check_legend_labels(ax[1], labels=["b", "r", "g"])
self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
# Right plot
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
self._check_legend_labels(ax[2], labels=["g", "b", "r"])
self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
def test_legend_name(self):
multi = DataFrame(
randn(4, 4),
columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])],
)
multi.columns.names = ["group", "individual"]
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df.columns.name = "new"
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "new")
@pytest.mark.slow
def test_no_legend(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
for kind in kinds:
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [
{0: "^", 1: "+", 2: "o"},
{0: "^", 1: "+"},
["^", "+", "o"],
["^", "+"],
]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[: len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == "None"
@pytest.mark.slow
def test_line_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color="DodgerBlue")
self._check_colors(ax.lines, linecolors=["DodgerBlue"])
ax = df.plot(color="red")
self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"]
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ["r", "g", "b"]
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color="k")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(subplots=True, color="green")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["green"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"]
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap="jet")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
ax = df.plot(kind="hist", color="green")
self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind="kde", subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind="kde", color="k", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(kind="kde", color="red", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["red"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(kind="kde", colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(kind="kde", style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(kind="kde", style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = "k"
self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
self._check_colors(
bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
)
self._check_colors(
bp["medians"], linecolors=[medians_c] * len(bp["medians"])
)
self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type="dict")
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(
boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
)
bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
_check_colors(
bp,
dict_colors["boxes"],
dict_colors["whiskers"],
dict_colors["medians"],
dict_colors["caps"],
"r",
)
tm.close()
# partial colors
dict_colors = dict(whiskers="c", medians="m")
bp = df.plot.box(color=dict_colors, return_type="dict")
_check_colors(bp, default_colors[0], "c", "m")
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap="jet", return_type="dict")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type="dict")
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color="DodgerBlue", return_type="dict")
_check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes="red", xxxx="blue"))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
import cycler
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(
np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)],
columns=["test"],
)
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
@td.skip_if_no_scipy
def test_kind_both_ways(self):
df = DataFrame({"x": [1, 2, 3]})
for kind in plotting.PlotAccessor._common_kinds:
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ["scatter", "hexbin"]:
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
def test_all_invalid_plot_data(self):
df = DataFrame(list("abcd"))
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ["area"]
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind="aasdf")
@pytest.mark.parametrize(
"x,y,lbl",
[
(["B", "C"], "A", "a"),
(["A"], ["B", "C"], ["b", "c"]),
("A", ["B", "C"], "badlabel"),
],
)
def test_invalid_xy_args(self, x, y, lbl):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
with pytest.raises(ValueError):
df.plot(x=x, y=y, label=lbl)
@pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")])
def test_invalid_xy_args_dup_cols(self, x, y):
# GH 18671, 19699 allows y to be list-like but not x
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB"))
with pytest.raises(ValueError):
df.plot(x=x, y=y)
@pytest.mark.parametrize(
"x,y,lbl,colors",
[
("A", ["B"], ["b"], ["red"]),
("A", ["B", "C"], ["b", "c"], ["red", "blue"]),
(0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]),
],
)
def test_y_listlike(self, x, y, lbl, colors):
# GH 19699: tests list-like y and verifies lbls & colors
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
_check_plot_works(df.plot, x="A", y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
self._check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])])
def test_xy_args_integer(self, x, y, colnames):
# GH 20056: tests integer args for xy and checks col names
df = DataFrame({"A": [1, 2], "B": [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", gridsize=10)
# TODO: need better way to test. This just does existence.
assert len(ax.collections) == 1
# GH 6951
axes = df.plot.hexbin(x="A", y="B", subplots=True)
# hexbin should have 2 axes in the figure, 1 for plotting and another
# is colorbar
assert len(axes[0].figure.axes) == 2
# return value is single axes
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", C="C")
assert len(ax.collections) == 1
ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
# Default to BuGn
ax = df.plot.hexbin(x="A", y="B")
assert ax.collections[0].cmap.name == "BuGn"
cm = "cubehelix"
ax = df.plot.hexbin(x="A", y="B", colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", cmap="YlGn")
assert ax.collections[0].cmap.name == "YlGn"
with pytest.raises(TypeError):
df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(
np.random.rand(5, 3),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y="Y")
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie, subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.plot.pie, subplots=True, labels=labels, colors=color_args
)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ["0", "1", "2", "3"]
for i, ax in enumerate(axes):
expected = list(base_expected) # force copy
expected[i] = ""
result = [x.get_text() for x in ax.texts]
assert result == expected
# legend labels
# NaN's not included in legend with subplots
# see https://github.com/pandas-dev/pandas/issues/8390
assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[
:i
] + base_expected[i + 1 :]
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(
df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works(
(df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ["yerr", "誤差"]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
@pytest.mark.slow
def test_errorbar_plot_iterator(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
# yerr is iterator
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ["line", "bar"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range("1/1/2000", periods=10, freq="M")
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
# check time-series plots
ix = date_range("1/1/2000", "1/1/2001", freq="M")
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
tdf.plot, kind=kind, yerr=tdf_err, subplots=True
)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"])
df_err = DataFrame(
np.random.randn(5, 2) / 5, index=range(5), columns=["x", "y"]
)
ax = _check_plot_works(df.plot.scatter, x="x", y="y")
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err="has_xerr"):
lines = []
errs = [c.lines for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines))
)
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=["a", "b", "c", "d", "e"])
ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red")
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, "red", has_err="has_xerr")
_check_errorbar_color(ax.containers, "red", has_err="has_yerr")
ax = df.plot.scatter(x="a", y="b", yerr="e", color="green")
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, "green", has_err="has_yerr")
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close("all")
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
@td.skip_if_no_scipy
def test_memory_leak(self):
""" Check that every plot type gets properly collected. """
import weakref
import gc
results = {}
for kind in plotting.PlotAccessor._all_kinds:
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = self.hexbin_df
args = {"x": "A", "y": "B"}
elif kind == "area":
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(
np.random.randn(10, 2),
index=date_range("1/1/2000", periods=10),
columns=list("AB"),
)
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index, columns=list("AB"))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4), index=ts.index, columns=list("ABCD"))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}),
plotting.PlotAccessor._dataframe_kinds,
kws={"x": "a", "y": "b"},
)
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=["A", "B"])
with pytest.raises(ValueError):
df.plot(colormap="invalid_colormap")
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# supplied ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({"a": randn(8), "b": randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1.0, loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize("method", ["line", "barh", "bar"])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (
pd.DataFrame(np.random.randn(15, 2), columns=list("AB"))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1)
)
fontsize = 20
sy = ["C", "D"]
kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
@pytest.mark.slow
def test_x_string_values_ticks(self):
# Test if string plot index have a fixed xtick position
# GH: 7612, GH: 22334
df = pd.DataFrame(
{
"sales": [3, 2, 3],
"visits": [20, 42, 28],
"day": ["Monday", "Tuesday", "Wednesday"],
}
)
ax = df.plot.area(x="day")
ax.set_xlim(-1, 3)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["Monday"] == 0.0
assert labels_position["Tuesday"] == 1.0
assert labels_position["Wednesday"] == 2.0
@pytest.mark.slow
def test_x_multiindex_values_ticks(self):
# Test if multiindex plot index have a fixed xtick position
# GH: 15912
index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]])
df = pd.DataFrame(np.random.randn(4, 2), columns=["A", "B"], index=index)
ax = df.plot()
ax.set_xlim(-1, 4)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["(2012, 1)"] == 0.0
assert labels_position["(2012, 2)"] == 1.0
assert labels_position["(2013, 1)"] == 2.0
assert labels_position["(2013, 2)"] == 3.0
@pytest.mark.parametrize("kind", ["line", "area"])
def test_xlim_plot_line(self, kind):
# test if xlim is set correctly in plot.line and plot.area
# GH 27686
df = pd.DataFrame([2, 4], index=[1, 2])
ax = df.plot(kind=kind)
xlims = ax.get_xlim()
assert xlims[0] < 1
assert xlims[1] > 2
def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
# test if xlim is set correctly when ax contains multiple different kinds
# of plots, GH 27686
fig, ax = self.plt.subplots()
indexes = ["k1", "k2", "k3", "k4"]
df = pd.DataFrame(
{
"s1": [1000, 2000, 1500, 2000],
"s2": [900, 1400, 2000, 3000],
"s3": [1500, 1500, 1600, 1200],
"secondary_y": [1, 3, 4, 3],
},
index=indexes,
)
df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False)
df[["secondary_y"]].plot(ax=ax, secondary_y=True)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 3
# make sure axis labels are plotted correctly as well
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
assert xticklabels == indexes
def test_subplots_sharex_false(self):
# test when sharex is set to False, two plots should have different
# labels, GH 25160
df = pd.DataFrame(np.random.rand(10, 2))
df.iloc[5:, 1] = np.nan
df.iloc[:5, 0] = np.nan
figs, axs = self.plt.subplots(2, 1)
df.plot.line(ax=axs, subplots=True, sharex=False)
expected_ax1 = np.arange(4.5, 10, 0.5)
expected_ax2 = np.arange(-0.5, 5, 0.5)
tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
def test_plot_no_rows(self):
# GH 27758
df = pd.DataFrame(columns=["foo"], dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = pd.DataFrame(["a", "b", "c"])
with pytest.raises(TypeError):
df.plot()
def test_missing_markers_legend(self):
# 14958
df = pd.DataFrame(np.random.randn(8, 3), columns=["A", "B", "C"])
ax = df.plot(y=["A"], marker="x", linestyle="solid")
df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax)
df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax)
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=["x", "o", "<"])
def test_missing_markers_legend_using_style(self):
# 14563
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6],
"B": [2, 4, 1, 3, 2, 4],
"C": [3, 3, 2, 6, 4, 2],
"X": [1, 2, 3, 4, 5, 6],
}
)
fig, ax = self.plt.subplots()
for kind in "ABC":
df.plot("X", kind, label=kind, ax=ax, style=".")
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| 38.835897
| 88
| 0.567108
|
from datetime import date, datetime
import itertools
import string
import warnings
import numpy as np
from numpy.random import rand, randn
import pytest
import pandas.util._test_decorators as td
from pandas.core.dtypes.api import is_list_like
import pandas as pd
from pandas import DataFrame, MultiIndex, PeriodIndex, Series, bdate_range, date_range
import pandas._testing as tm
from pandas.core.arrays import integer_array
from pandas.tests.plotting.common import TestPlotBase, _check_plot_works
from pandas.io.formats.printing import pprint_thing
import pandas.plotting as plotting
@td.skip_if_no_mpl
class TestDataFramePlots(TestPlotBase):
def setup_method(self, method):
TestPlotBase.setup_method(self, method)
import matplotlib as mpl
mpl.rcdefaults()
self.tdf = tm.makeTimeDataFrame()
self.hexbin_df = DataFrame(
{
"A": np.random.uniform(size=20),
"B": np.random.uniform(size=20),
"C": np.arange(20) + np.random.uniform(size=20),
}
)
def _assert_ytickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_yticklabels(), visible=exp)
def _assert_xtickslabels_visibility(self, axes, expected):
for ax, exp in zip(axes, expected):
self._check_visible(ax.get_xticklabels(), visible=exp)
@pytest.mark.slow
def test_plot(self):
from pandas.plotting._matplotlib.compat import _mpl_ge_3_1_0
df = self.tdf
_check_plot_works(df.plot, grid=False)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=4, layout=(2, 2))
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, subplots=True, use_index=False)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
df = DataFrame({"x": [1, 2], "y": [3, 4]})
if _mpl_ge_3_1_0():
msg = "'Line2D' object has no property 'blarg'"
else:
msg = "Unknown property blarg"
with pytest.raises(AttributeError, match=msg):
df.plot.line(blarg=True)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, use_index=True)
_check_plot_works(df.plot, sort_columns=False)
_check_plot_works(df.plot, yticks=[1, 5, 10])
_check_plot_works(df.plot, xticks=[1, 5, 10])
_check_plot_works(df.plot, ylim=(-100, 100), xlim=(-100, 100))
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, subplots=True, title="blah")
axes = df.plot(subplots=True, title="blah")
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:2]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible([ax.xaxis.get_label()], visible=False)
for ax in [axes[2]]:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible([ax.xaxis.get_label()])
self._check_ticks_props(ax, xrot=0)
_check_plot_works(df.plot, title="blah")
tuples = zip(string.ascii_letters[:10], range(10))
df = DataFrame(np.random.rand(10, 3), index=MultiIndex.from_tuples(tuples))
_check_plot_works(df.plot, use_index=True)
index = MultiIndex.from_tuples(
[
("\u03b1", 0),
("\u03b1", 1),
("\u03b2", 2),
("\u03b2", 3),
("\u03b3", 4),
("\u03b3", 5),
("\u03b4", 6),
("\u03b4", 7),
],
names=["i0", "i1"],
)
columns = MultiIndex.from_tuples(
[("bar", "\u0394"), ("bar", "\u0395")], names=["c0", "c1"]
)
df = DataFrame(np.random.randint(0, 10, (8, 2)), columns=columns, index=index)
_check_plot_works(df.plot, title="\u03A3")
df = DataFrame({"x": np.random.rand(10)})
axes = _check_plot_works(df.plot.bar, subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
axes = _check_plot_works(df.plot.bar, subplots=True, layout=(-1, 1))
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
fig, ax = self.plt.subplots()
axes = df.plot.bar(subplots=True, ax=ax)
assert len(axes) == 1
result = ax.axes
assert result is axes[0]
def test_integer_array_plot(self):
arr = integer_array([1, 2, 3, 4], dtype="UInt32")
s = Series(arr)
_check_plot_works(s.plot.line)
_check_plot_works(s.plot.bar)
_check_plot_works(s.plot.hist)
_check_plot_works(s.plot.pie)
df = DataFrame({"x": arr, "y": arr})
_check_plot_works(df.plot.line)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.hist)
_check_plot_works(df.plot.pie, y="y")
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.hexbin, x="x", y="y")
def test_mpl2_color_cycle_str(self):
colors = ["C" + str(x) for x in range(10)]
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
for c in colors:
_check_plot_works(df.plot, color=c)
def test_color_single_series_list(self):
df = DataFrame({"A": [1, 2, 3]})
_check_plot_works(df.plot, color=["red"])
def test_rgb_tuple_color(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0))
_check_plot_works(df.plot, x="x", y="y", color=(1, 0, 0, 0.5))
def test_color_empty_string(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(color="")
def test_color_and_style_arguments(self):
df = DataFrame({"x": [1, 2], "y": [3, 4]})
ax = df.plot(color=["red", "black"], style=["-", "--"])
linestyle = [line.get_linestyle() for line in ax.lines]
assert linestyle == ["-", "--"]
color = [line.get_color() for line in ax.lines]
assert color == ["red", "black"]
with pytest.raises(ValueError):
df.plot(color=["red", "black"], style=["k-", "r--"])
def test_nonnumeric_exclude(self):
df = DataFrame({"A": ["x", "y", "z"], "B": [1, 2, 3]})
ax = df.plot()
assert len(ax.get_lines()) == 1
@pytest.mark.slow
def test_implicit_label(self):
df = DataFrame(randn(10, 3), columns=["a", "b", "c"])
ax = df.plot(x="a", y="b")
self._check_text_labels(ax.xaxis.get_label(), "a")
@pytest.mark.slow
def test_donot_overwrite_index_name(self):
df = DataFrame(randn(2, 2), columns=["a", "b"])
df.index.name = "NAME"
df.plot(y="b", label="LABEL")
assert df.index.name == "NAME"
@pytest.mark.slow
def test_plot_xy(self):
df = self.tdf
self._check_data(df.plot(x=0, y=1), df.set_index("A")["B"].plot())
self._check_data(df.plot(x=0), df.set_index("A").plot())
self._check_data(df.plot(y=0), df.B.plot())
self._check_data(df.plot(x="A", y="B"), df.set_index("A").B.plot())
self._check_data(df.plot(x="A"), df.set_index("A").plot())
self._check_data(df.plot(y="B"), df.B.plot())
df.columns = np.arange(1, len(df.columns) + 1)
self._check_data(df.plot(x=1, y=2), df.set_index(1)[2].plot())
self._check_data(df.plot(x=1), df.set_index(1).plot())
self._check_data(df.plot(y=1), df[1].plot())
ax = df.plot(x=1, y=2, title="Test", figsize=(16, 8))
self._check_text_labels(ax.title, "Test")
self._check_axes_shape(ax, axes_num=1, layout=(1, 1), figsize=(16.0, 8.0))
@pytest.mark.slow
@pytest.mark.parametrize(
"input_log, expected_log", [(True, "log"), ("sym", "symlog")]
)
def test_logscales(self, input_log, expected_log):
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
ax = df.plot(logy=input_log)
self._check_ax_scales(ax, yaxis=expected_log)
assert ax.get_yscale() == expected_log
ax = df.plot(logx=input_log)
self._check_ax_scales(ax, xaxis=expected_log)
assert ax.get_xscale() == expected_log
ax = df.plot(loglog=input_log)
self._check_ax_scales(ax, xaxis=expected_log, yaxis=expected_log)
assert ax.get_xscale() == expected_log
assert ax.get_yscale() == expected_log
@pytest.mark.parametrize("input_param", ["logx", "logy", "loglog"])
def test_invalid_logscale(self, input_param):
df = DataFrame({"a": np.arange(100)}, index=np.arange(100))
msg = "Boolean, None and 'sym' are valid options, 'sm' is given."
with pytest.raises(ValueError, match=msg):
df.plot(**{input_param: "sm"})
@pytest.mark.slow
def test_xcompat(self):
import pandas as pd
df = self.tdf
ax = df.plot(x_compat=True)
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["xaxis.compat"] = True
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
pd.plotting.plot_params["x_compat"] = False
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
tm.close()
with pd.plotting.plot_params.use("x_compat", True):
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
tm.close()
ax = df.plot()
lines = ax.get_lines()
assert not isinstance(lines[0].get_xdata(), PeriodIndex)
assert isinstance(PeriodIndex(lines[0].get_xdata()), PeriodIndex)
def test_period_compat(self):
# GH 9012
# period-array conversions
df = DataFrame(
np.random.rand(21, 2),
index=bdate_range(datetime(2000, 1, 1), datetime(2000, 1, 31)),
columns=["a", "b"],
)
df.plot()
self.plt.axhline(y=0)
tm.close()
def test_unsorted_index(self):
df = DataFrame(
{"y": np.arange(100)}, index=np.arange(99, -1, -1), dtype=np.int64
)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y, check_index_type=False)
tm.close()
df.index = pd.Index(np.arange(99, -1, -1), dtype=np.float64)
ax = df.plot()
lines = ax.get_lines()[0]
rs = lines.get_xydata()
rs = Series(rs[:, 1], rs[:, 0], dtype=np.int64, name="y")
tm.assert_series_equal(rs, df.y)
def test_unsorted_index_lims(self):
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0]}, index=[1.0, 0.0, 3.0, 2.0])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame(
{"y": [0.0, 1.0, np.nan, 3.0, 4.0, 5.0, 6.0]},
index=[1.0, 0.0, 3.0, 2.0, np.nan, 3.0, 2.0],
)
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
df = DataFrame({"y": [0.0, 1.0, 2.0, 3.0], "z": [91.0, 90.0, 93.0, 92.0]})
ax = df.plot(x="z", y="y")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= np.nanmin(lines[0].get_data()[0])
assert xmax >= np.nanmax(lines[0].get_data()[0])
@pytest.mark.slow
def test_subplots(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
for kind in ["bar", "barh", "line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True, legend=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
assert axes.shape == (3,)
for ax, column in zip(axes, df.columns):
self._check_legend_labels(ax, labels=[pprint_thing(column)])
for ax in axes[:-2]:
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
if not (kind == "bar" and self.mpl_ge_3_1_0):
# change https://github.com/pandas-dev/pandas/issues/26714
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
axes = df.plot(kind=kind, subplots=True, sharex=False)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
axes = df.plot(kind=kind, subplots=True, legend=False)
for ax in axes:
assert ax.get_legend() is None
def test_groupby_boxplot_sharey(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharey can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# set sharey=True should be identical
axes = df.groupby("c").boxplot(sharey=True)
expected = [True, False, True, False]
self._assert_ytickslabels_visibility(axes, expected)
# sharey=False, all yticklabels should be visible
axes = df.groupby("c").boxplot(sharey=False)
expected = [True, True, True, True]
self._assert_ytickslabels_visibility(axes, expected)
def test_groupby_boxplot_sharex(self):
# https://github.com/pandas-dev/pandas/issues/20968
# sharex can now be switched check whether the right
# pair of axes is turned on or off
df = DataFrame(
{
"a": [-1.43, -0.15, -3.70, -1.43, -0.14],
"b": [0.56, 0.84, 0.29, 0.56, 0.85],
"c": [0, 1, 2, 3, 1],
},
index=[0, 1, 2, 3, 4],
)
# behavior without keyword
axes = df.groupby("c").boxplot()
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# set sharex=False should be identical
axes = df.groupby("c").boxplot(sharex=False)
expected = [True, True, True, True]
self._assert_xtickslabels_visibility(axes, expected)
# sharex=True, yticklabels should be visible
# only for bottom plots
axes = df.groupby("c").boxplot(sharex=True)
expected = [False, False, True, True]
self._assert_xtickslabels_visibility(axes, expected)
@pytest.mark.slow
def test_subplots_timeseries(self):
idx = date_range(start="2014-07-01", freq="M", periods=10)
df = DataFrame(np.random.rand(10, 3), index=idx)
for kind in ["line", "area"]:
axes = df.plot(kind=kind, subplots=True, sharex=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes[:-2]:
# GH 7801
self._check_visible(ax.xaxis) # xaxis must be visible for grid
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
self._check_visible(ax.xaxis.get_label(), visible=False)
self._check_visible(ax.get_yticklabels())
self._check_visible(axes[-1].xaxis)
self._check_visible(axes[-1].get_xticklabels())
self._check_visible(axes[-1].get_xticklabels(minor=True))
self._check_visible(axes[-1].xaxis.get_label())
self._check_visible(axes[-1].get_yticklabels())
self._check_ticks_props(axes, xrot=0)
axes = df.plot(kind=kind, subplots=True, sharex=False, rot=45, fontsize=7)
for ax in axes:
self._check_visible(ax.xaxis)
self._check_visible(ax.get_xticklabels())
self._check_visible(ax.get_xticklabels(minor=True))
self._check_visible(ax.xaxis.get_label())
self._check_visible(ax.get_yticklabels())
self._check_ticks_props(ax, xlabelsize=7, xrot=45, ylabelsize=7)
def test_subplots_timeseries_y_axis(self):
# GH16953
data = {
"numeric": np.array([1, 2, 5]),
"timedelta": [
pd.Timedelta(-10, unit="s"),
pd.Timedelta(10, unit="m"),
pd.Timedelta(10, unit="h"),
],
"datetime_no_tz": [
pd.to_datetime("2017-08-01 00:00:00"),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
"datetime_all_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00", utc=True),
pd.to_datetime("2017-08-02 00:00:00", utc=True),
],
"text": ["This", "should", "fail"],
}
testdata = DataFrame(data)
ax_numeric = testdata.plot(y="numeric")
assert (
ax_numeric.get_lines()[0].get_data()[1] == testdata["numeric"].values
).all()
ax_timedelta = testdata.plot(y="timedelta")
assert (
ax_timedelta.get_lines()[0].get_data()[1] == testdata["timedelta"].values
).all()
ax_datetime_no_tz = testdata.plot(y="datetime_no_tz")
assert (
ax_datetime_no_tz.get_lines()[0].get_data()[1]
== testdata["datetime_no_tz"].values
).all()
ax_datetime_all_tz = testdata.plot(y="datetime_all_tz")
assert (
ax_datetime_all_tz.get_lines()[0].get_data()[1]
== testdata["datetime_all_tz"].values
).all()
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
testdata.plot(y="text")
@pytest.mark.xfail(reason="not support for period, categorical, datetime_mixed_tz")
def test_subplots_timeseries_y_axis_not_supported(self):
data = {
"numeric": np.array([1, 2, 5]),
"period": [
pd.Period("2017-08-01 00:00:00", freq="H"),
pd.Period("2017-08-01 02:00", freq="H"),
pd.Period("2017-08-02 00:00:00", freq="H"),
],
"categorical": pd.Categorical(
["c", "b", "a"], categories=["a", "b", "c"], ordered=False
),
"datetime_mixed_tz": [
pd.to_datetime("2017-08-01 00:00:00", utc=True),
pd.to_datetime("2017-08-01 02:00:00"),
pd.to_datetime("2017-08-02 00:00:00"),
],
}
testdata = pd.DataFrame(data)
ax_period = testdata.plot(x="numeric", y="period")
assert (
ax_period.get_lines()[0].get_data()[1] == testdata["period"].values
).all()
ax_categorical = testdata.plot(x="numeric", y="categorical")
assert (
ax_categorical.get_lines()[0].get_data()[1]
== testdata["categorical"].values
).all()
ax_datetime_mixed_tz = testdata.plot(x="numeric", y="datetime_mixed_tz")
assert (
ax_datetime_mixed_tz.get_lines()[0].get_data()[1]
== testdata["datetime_mixed_tz"].values
).all()
@pytest.mark.slow
def test_subplots_layout(self):
# GH 6667
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, layout=(2, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(-1, 2))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(2, -1))
self._check_axes_shape(axes, axes_num=3, layout=(2, 2))
assert axes.shape == (2, 2)
axes = df.plot(subplots=True, layout=(1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(-1, 4))
self._check_axes_shape(axes, axes_num=3, layout=(1, 4))
assert axes.shape == (1, 4)
axes = df.plot(subplots=True, layout=(4, -1))
self._check_axes_shape(axes, axes_num=3, layout=(4, 1))
assert axes.shape == (4, 1)
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(1, 1))
with pytest.raises(ValueError):
df.plot(subplots=True, layout=(-1, -1))
# single column
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
axes = df.plot(subplots=True, layout=(3, 3))
self._check_axes_shape(axes, axes_num=1, layout=(3, 3))
assert axes.shape == (3, 3)
@pytest.mark.slow
def test_subplots_warnings(self):
# GH 9464
with tm.assert_produces_warning(None):
df = DataFrame(np.random.randn(100, 4))
df.plot(subplots=True, layout=(3, 2))
df = DataFrame(
np.random.randn(100, 4), index=date_range("1/1/2000", periods=100)
)
df.plot(subplots=True, layout=(3, 2))
@pytest.mark.slow
def test_subplots_multiple_axes(self):
# GH 5353, 6970, GH 7069
fig, axes = self.plt.subplots(2, 3)
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
returned = df.plot(subplots=True, ax=axes[0], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
# draw on second row
returned = df.plot(subplots=True, ax=axes[1], sharex=False, sharey=False)
self._check_axes_shape(returned, axes_num=3, layout=(1, 3))
assert returned.shape == (3,)
assert returned[0].figure is fig
self._check_axes_shape(axes, axes_num=6, layout=(2, 3))
tm.close()
with pytest.raises(ValueError):
fig, axes = self.plt.subplots(2, 3)
# pass different number of axes from required
df.plot(subplots=True, ax=axes)
# pass 2-dim axes and invalid layout
# invalid lauout should not affect to input and return value
# (show warning is tested in
# TestDataFrameGroupByPlots.test_grouped_box_multiple_axes
fig, axes = self.plt.subplots(2, 2)
with warnings.catch_warnings():
warnings.simplefilter("ignore", UserWarning)
df = DataFrame(np.random.rand(10, 4), index=list(string.ascii_letters[:10]))
returned = df.plot(
subplots=True, ax=axes, layout=(2, 1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(2, -1), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
returned = df.plot(
subplots=True, ax=axes, layout=(-1, 2), sharex=False, sharey=False
)
self._check_axes_shape(returned, axes_num=4, layout=(2, 2))
assert returned.shape == (4,)
# single column
fig, axes = self.plt.subplots(1, 1)
df = DataFrame(np.random.rand(10, 1), index=list(string.ascii_letters[:10]))
axes = df.plot(subplots=True, ax=[axes], sharex=False, sharey=False)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
assert axes.shape == (1,)
def test_subplots_ts_share_axes(self):
# GH 3964
fig, axes = self.plt.subplots(3, 3, sharex=True, sharey=True)
self.plt.subplots_adjust(left=0.05, right=0.95, hspace=0.3, wspace=0.3)
df = DataFrame(
np.random.randn(10, 9),
index=date_range(start="2014-07-01", freq="M", periods=10),
)
for i, ax in enumerate(axes.ravel()):
df[i].plot(ax=ax, fontsize=5)
# Rows other than bottom should not be visible
for ax in axes[0:-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=False)
# Bottom row should be visible
for ax in axes[-1].ravel():
self._check_visible(ax.get_xticklabels(), visible=True)
# First column should be visible
for ax in axes[[0, 1, 2], [0]].ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
# Other columns should not be visible
for ax in axes[[0, 1, 2], [1]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes[[0, 1, 2], [2]].ravel():
self._check_visible(ax.get_yticklabels(), visible=False)
def test_subplots_sharex_axes_existing_axes(self):
# GH 9158
d = {"A": [1.0, 2.0, 3.0, 4.0], "B": [4.0, 3.0, 2.0, 1.0], "C": [5, 1, 3, 4]}
df = DataFrame(d, index=date_range("2014 10 11", "2014 10 14"))
axes = df[["A", "B"]].plot(subplots=True)
df["C"].plot(ax=axes[0], secondary_y=True)
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
for ax in axes.ravel():
self._check_visible(ax.get_yticklabels(), visible=True)
@pytest.mark.slow
def test_subplots_dup_columns(self):
# GH 10962
df = DataFrame(np.random.rand(5, 5), columns=list("aaaaa"))
axes = df.plot(subplots=True)
for ax in axes:
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
axes = df.plot(subplots=True, secondary_y="a")
for ax in axes:
# (right) is only attached when subplots=False
self._check_legend_labels(ax, labels=["a"])
assert len(ax.lines) == 1
tm.close()
ax = df.plot(secondary_y="a")
self._check_legend_labels(ax, labels=["a (right)"] * 5)
assert len(ax.lines) == 0
assert len(ax.right_ax.lines) == 5
def test_negative_log(self):
df = -DataFrame(
rand(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
with pytest.raises(ValueError):
df.plot.area(logy=True)
with pytest.raises(ValueError):
df.plot.area(loglog=True)
def _compare_stacked_y_cood(self, normal_lines, stacked_lines):
base = np.zeros(len(normal_lines[0].get_data()[1]))
for nl, sl in zip(normal_lines, stacked_lines):
base += nl.get_data()[1] # get y coordinates
sy = sl.get_data()[1]
tm.assert_numpy_array_equal(base, sy)
def test_line_area_stacked(self):
with tm.RNGContext(42):
df = DataFrame(rand(6, 4), columns=["w", "x", "y", "z"])
neg_df = -df
# each column has either positive or negative value
sep_df = DataFrame(
{"w": rand(6), "x": rand(6), "y": -rand(6), "z": -rand(6)}
)
# each column has positive-negative mixed value
mixed_df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["w", "x", "y", "z"],
)
for kind in ["line", "area"]:
ax1 = _check_plot_works(df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(neg_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(neg_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines, ax2.lines)
ax1 = _check_plot_works(sep_df.plot, kind=kind, stacked=False)
ax2 = _check_plot_works(sep_df.plot, kind=kind, stacked=True)
self._compare_stacked_y_cood(ax1.lines[:2], ax2.lines[:2])
self._compare_stacked_y_cood(ax1.lines[2:], ax2.lines[2:])
_check_plot_works(mixed_df.plot, stacked=False)
with pytest.raises(ValueError):
mixed_df.plot(stacked=True)
# Use an index with strictly positive values, preventing
# matplotlib from warning about ignoring xlim
df2 = df.set_index(df.index + 1)
_check_plot_works(df2.plot, kind=kind, logx=True, stacked=True)
def test_line_area_nan_df(self):
values1 = [1, 2, np.nan, 3]
values2 = [3, np.nan, 2, 1]
df = DataFrame({"a": values1, "b": values2})
tdf = DataFrame({"a": values1, "b": values2}, index=tm.makeDateIndex(k=4))
for d in [df, tdf]:
ax = _check_plot_works(d.plot)
masked1 = ax.lines[0].get_ydata()
masked2 = ax.lines[1].get_ydata()
# remove nan for comparison purpose
exp = np.array([1, 2, 3], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked1.data, 2), exp)
exp = np.array([3, 2, 1], dtype=np.float64)
tm.assert_numpy_array_equal(np.delete(masked2.data, 1), exp)
tm.assert_numpy_array_equal(
masked1.mask, np.array([False, False, True, False])
)
tm.assert_numpy_array_equal(
masked2.mask, np.array([False, True, False, False])
)
expected1 = np.array([1, 2, 0, 3], dtype=np.float64)
expected2 = np.array([3, 0, 2, 1], dtype=np.float64)
ax = _check_plot_works(d.plot, stacked=True)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected1 + expected2)
ax = _check_plot_works(d.plot.area, stacked=False)
tm.assert_numpy_array_equal(ax.lines[0].get_ydata(), expected1)
tm.assert_numpy_array_equal(ax.lines[1].get_ydata(), expected2)
def test_line_lim(self):
df = DataFrame(rand(6, 3), columns=["x", "y", "z"])
ax = df.plot()
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
ax = df.plot(secondary_y=True)
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
axes = df.plot(secondary_y=True, subplots=True)
self._check_axes_shape(axes, axes_num=3, layout=(3, 1))
for ax in axes:
assert hasattr(ax, "left_ax")
assert not hasattr(ax, "right_ax")
xmin, xmax = ax.get_xlim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
def test_area_lim(self):
df = DataFrame(rand(6, 4), columns=["x", "y", "z", "four"])
neg_df = -df
for stacked in [True, False]:
ax = _check_plot_works(df.plot.area, stacked=stacked)
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
lines = ax.get_lines()
assert xmin <= lines[0].get_data()[0][0]
assert xmax >= lines[0].get_data()[0][-1]
assert ymin == 0
ax = _check_plot_works(neg_df.plot.area, stacked=stacked)
ymin, ymax = ax.get_ylim()
assert ymax == 0
@pytest.mark.slow
def test_bar_colors(self):
import matplotlib.pyplot as plt
default_colors = self._unpack_cycler(plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.bar()
self._check_colors(ax.patches[::5], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.bar(color=custom_colors)
self._check_colors(ax.patches[::5], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.bar(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.bar(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::5], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.bar(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
tm.close()
ax = df.plot(kind="bar", color="green")
self._check_colors(ax.patches[::5], facecolors=["green"] * 5)
tm.close()
def test_bar_user_colors(self):
df = pd.DataFrame(
{"A": range(4), "B": range(1, 5), "color": ["red", "blue", "blue", "red"]}
)
# This should *only* work when `y` is specified, else
# we use one color per column
ax = df.plot.bar(y="A", color=df["color"])
result = [p.get_facecolor() for p in ax.patches]
expected = [
(1.0, 0.0, 0.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(0.0, 0.0, 1.0, 1.0),
(1.0, 0.0, 0.0, 1.0),
]
assert result == expected
@pytest.mark.slow
def test_bar_linewidth(self):
df = DataFrame(randn(5, 5))
# regular
ax = df.plot.bar(linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# stacked
ax = df.plot.bar(stacked=True, linewidth=2)
for r in ax.patches:
assert r.get_linewidth() == 2
# subplots
axes = df.plot.bar(linewidth=2, subplots=True)
self._check_axes_shape(axes, axes_num=5, layout=(5, 1))
for ax in axes:
for r in ax.patches:
assert r.get_linewidth() == 2
@pytest.mark.slow
def test_bar_barwidth(self):
df = DataFrame(randn(5, 5))
width = 0.9
# regular
ax = df.plot.bar(width=width)
for r in ax.patches:
assert r.get_width() == width / len(df.columns)
# stacked
ax = df.plot.bar(stacked=True, width=width)
for r in ax.patches:
assert r.get_width() == width
# horizontal regular
ax = df.plot.barh(width=width)
for r in ax.patches:
assert r.get_height() == width / len(df.columns)
# horizontal stacked
ax = df.plot.barh(stacked=True, width=width)
for r in ax.patches:
assert r.get_height() == width
# subplots
axes = df.plot.bar(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_width() == width
# horizontal subplots
axes = df.plot.barh(width=width, subplots=True)
for ax in axes:
for r in ax.patches:
assert r.get_height() == width
@pytest.mark.slow
def test_bar_barwidth_position(self):
df = DataFrame(randn(5, 5))
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, position=0.2)
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, position=0.2
)
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, position=0.2
)
@pytest.mark.slow
def test_bar_barwidth_position_int(self):
# GH 12979
df = DataFrame(randn(5, 5))
for w in [1, 1.0]:
ax = df.plot.bar(stacked=True, width=w)
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4]))
assert ax.get_xlim() == (-0.75, 4.75)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.5
assert ax.patches[-1].get_x() == 3.5
self._check_bar_alignment(df, kind="bar", stacked=True, width=1)
self._check_bar_alignment(df, kind="barh", stacked=False, width=1)
self._check_bar_alignment(df, kind="barh", stacked=True, width=1)
self._check_bar_alignment(df, kind="bar", subplots=True, width=1)
self._check_bar_alignment(df, kind="barh", subplots=True, width=1)
@pytest.mark.slow
def test_bar_bottom_left(self):
df = DataFrame(rand(5, 5))
ax = df.plot.bar(stacked=False, bottom=1)
result = [p.get_y() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.bar(stacked=True, bottom=[-1, -2, -3, -4, -5])
result = [p.get_y() for p in ax.patches[:5]]
assert result == [-1, -2, -3, -4, -5]
ax = df.plot.barh(stacked=False, left=np.array([1, 1, 1, 1, 1]))
result = [p.get_x() for p in ax.patches]
assert result == [1] * 25
ax = df.plot.barh(stacked=True, left=[1, 2, 3, 4, 5])
result = [p.get_x() for p in ax.patches[:5]]
assert result == [1, 2, 3, 4, 5]
axes = df.plot.bar(subplots=True, bottom=-1)
for ax in axes:
result = [p.get_y() for p in ax.patches]
assert result == [-1] * 5
axes = df.plot.barh(subplots=True, left=np.array([1, 1, 1, 1, 1]))
for ax in axes:
result = [p.get_x() for p in ax.patches]
assert result == [1] * 5
@pytest.mark.slow
def test_bar_nan(self):
df = DataFrame({"A": [10, np.nan, 20], "B": [5, 10, 20], "C": [1, 2, 3]})
ax = df.plot.bar()
expected = [10, 0, 20, 5, 10, 20, 1, 2, 3]
result = [p.get_height() for p in ax.patches]
assert result == expected
ax = df.plot.bar(stacked=True)
result = [p.get_height() for p in ax.patches]
assert result == expected
result = [p.get_y() for p in ax.patches]
expected = [0.0, 0.0, 0.0, 10.0, 0.0, 20.0, 15.0, 10.0, 40.0]
assert result == expected
@pytest.mark.slow
def test_bar_categorical(self):
# GH 13019
df1 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.Index(list("ABCDEF")),
columns=pd.Index(list("abcde")),
)
# categorical index must behave the same
df2 = pd.DataFrame(
np.random.randn(6, 5),
index=pd.CategoricalIndex(list("ABCDEF")),
columns=pd.CategoricalIndex(list("abcde")),
)
for df in [df1, df2]:
ax = df.plot.bar()
ticks = ax.xaxis.get_ticklocs()
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
# check left-edge of bars
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 5.15
ax = df.plot.bar(stacked=True)
tm.assert_numpy_array_equal(ticks, np.array([0, 1, 2, 3, 4, 5]))
assert ax.get_xlim() == (-0.5, 5.5)
assert ax.patches[0].get_x() == -0.25
assert ax.patches[-1].get_x() == 4.75
@pytest.mark.slow
def test_plot_scatter(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
_check_plot_works(df.plot.scatter, x="x", y="y")
_check_plot_works(df.plot.scatter, x=1, y=2)
with pytest.raises(TypeError):
df.plot.scatter(x="x")
with pytest.raises(TypeError):
df.plot.scatter(y="y")
# GH 6951
axes = df.plot(x="x", y="y", kind="scatter", subplots=True)
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
def test_raise_error_on_datetime_time_data(self):
# GH 8113, datetime.time type is not supported by matplotlib in scatter
df = pd.DataFrame(np.random.randn(10), columns=["a"])
df["dtime"] = pd.date_range(start="2014-01-01", freq="h", periods=10).time
msg = "must be a string or a number, not 'datetime.time'"
with pytest.raises(TypeError, match=msg):
df.plot(kind="scatter", x="dtime", y="a")
def test_scatterplot_datetime_data(self):
# GH 30391
dates = pd.date_range(start=date(2019, 1, 1), periods=12, freq="W")
vals = np.random.normal(0, 1, len(dates))
df = pd.DataFrame({"dates": dates, "vals": vals})
_check_plot_works(df.plot.scatter, x="dates", y="vals")
_check_plot_works(df.plot.scatter, x=0, y=1)
def test_scatterplot_object_data(self):
# GH 18755
df = pd.DataFrame(dict(a=["A", "B", "C"], b=[2, 3, 4]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
df = pd.DataFrame(dict(a=["A", "B", "C"], b=["a", "b", "c"]))
_check_plot_works(df.plot.scatter, x="a", y="b")
_check_plot_works(df.plot.scatter, x=0, y=1)
@pytest.mark.slow
def test_if_scatterplot_colorbar_affects_xaxis_visibility(self):
# addressing issue #10611, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax1 = df.plot.scatter(x="A label", y="B label")
ax2 = df.plot.scatter(x="A label", y="B label", c="C label")
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_minorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_minorticklabels()]
assert vis1 == vis2
vis1 = [vis.get_visible() for vis in ax1.xaxis.get_majorticklabels()]
vis2 = [vis.get_visible() for vis in ax2.xaxis.get_majorticklabels()]
assert vis1 == vis2
assert (
ax1.xaxis.get_label().get_visible() == ax2.xaxis.get_label().get_visible()
)
@pytest.mark.slow
def test_if_hexbin_xaxis_label_is_visible(self):
# addressing issue #10678, to ensure colobar does not
# interfere with x-axis label and ticklabels with
# ipython inline backend.
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
ax = df.plot.hexbin("A label", "B label", gridsize=12)
assert all(vis.get_visible() for vis in ax.xaxis.get_minorticklabels())
assert all(vis.get_visible() for vis in ax.xaxis.get_majorticklabels())
assert ax.xaxis.get_label().get_visible()
@pytest.mark.slow
def test_if_scatterplot_colorbars_are_next_to_parent_axes(self):
import matplotlib.pyplot as plt
random_array = np.random.random((1000, 3))
df = pd.DataFrame(random_array, columns=["A label", "B label", "C label"])
fig, axes = plt.subplots(1, 2)
df.plot.scatter("A label", "B label", c="C label", ax=axes[0])
df.plot.scatter("A label", "B label", c="C label", ax=axes[1])
plt.tight_layout()
points = np.array([ax.get_position().get_points() for ax in fig.axes])
axes_x_coords = points[:, :, 0]
parent_distance = axes_x_coords[1, :] - axes_x_coords[0, :]
colorbar_distance = axes_x_coords[3, :] - axes_x_coords[2, :]
assert np.isclose(parent_distance, colorbar_distance, atol=1e-7).all()
@pytest.mark.parametrize("x, y", [("x", "y"), ("y", "x"), ("y", "y")])
@pytest.mark.slow
def test_plot_scatter_with_categorical_data(self, x, y):
# after fixing GH 18755, should be able to plot categorical data
df = pd.DataFrame(
{"x": [1, 2, 3, 4], "y": pd.Categorical(["a", "b", "a", "c"])}
)
_check_plot_works(df.plot.scatter, x=x, y=y)
@pytest.mark.slow
def test_plot_scatter_with_c(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["x", "y", "z", "four"],
)
axes = [df.plot.scatter(x="x", y="y", c="z"), df.plot.scatter(x=0, y=1, c=2)]
for ax in axes:
# default to Greys
assert ax.collections[0].cmap.name == "Greys"
# n.b. there appears to be no public method
# to get the colorbar label
assert ax.collections[0].colorbar._label == "z"
cm = "cubehelix"
ax = df.plot.scatter(x="x", y="y", c="z", colormap=cm)
assert ax.collections[0].cmap.name == cm
# verify turning off colorbar works
ax = df.plot.scatter(x="x", y="y", c="z", colorbar=False)
assert ax.collections[0].colorbar is None
# verify that we can still plot a solid color
ax = df.plot.scatter(x=0, y=1, c="red")
assert ax.collections[0].colorbar is None
self._check_colors(ax.collections, facecolors=["r"])
# Ensure that we can pass an np.array straight through to matplotlib,
# this functionality was accidentally removed previously.
# See https://github.com/pandas-dev/pandas/issues/8852 for bug report
#
# Exercise colormap path and non-colormap path as they are independent
#
df = DataFrame({"A": [1, 2], "B": [3, 4]})
red_rgba = [1.0, 0.0, 0.0, 1.0]
green_rgba = [0.0, 1.0, 0.0, 1.0]
rgba_array = np.array([red_rgba, green_rgba])
ax = df.plot.scatter(x="A", y="B", c=rgba_array)
# expect the face colors of the points in the non-colormap path to be
# identical to the values we supplied, normally we'd be on shaky ground
tm.assert_numpy_array_equal(ax.collections[0].get_facecolor(), rgba_array)
# are dependent on the spring colormap, which may change its colors
# later.
float_array = np.array([0.0, 1.0])
df.plot.scatter(x="A", y="B", c=float_array, cmap="spring")
def test_scatter_colors(self):
df = DataFrame({"a": [1, 2, 3], "b": [1, 2, 3], "c": [1, 2, 3]})
with pytest.raises(TypeError):
df.plot.scatter(x="a", y="b", c="c", color="green")
default_colors = self._unpack_cycler(self.plt.rcParams)
ax = df.plot.scatter(x="a", y="b", c="c")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array(self.colorconverter.to_rgba(default_colors[0])),
)
ax = df.plot.scatter(x="a", y="b", color="white")
tm.assert_numpy_array_equal(
ax.collections[0].get_facecolor()[0],
np.array([1, 1, 1, 1], dtype=np.float64),
)
@pytest.mark.slow
def test_plot_bar(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
_check_plot_works(df.plot.bar)
_check_plot_works(df.plot.bar, legend=False)
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot.bar, subplots=True)
_check_plot_works(df.plot.bar, stacked=True)
df = DataFrame(
randn(10, 15), index=list(string.ascii_letters[:10]), columns=range(15)
)
_check_plot_works(df.plot.bar)
df = DataFrame({"a": [0, 1], "b": [1, 0]})
ax = _check_plot_works(df.plot.bar)
self._check_ticks_props(ax, xrot=90)
ax = df.plot.bar(rot=35, fontsize=10)
self._check_ticks_props(ax, xrot=35, xlabelsize=10, ylabelsize=10)
ax = _check_plot_works(df.plot.barh)
self._check_ticks_props(ax, yrot=0)
ax = df.plot.barh(rot=55, fontsize=11)
self._check_ticks_props(ax, yrot=55, ylabelsize=11, xlabelsize=11)
def _check_bar_alignment(
self,
df,
kind="bar",
stacked=False,
subplots=False,
align="center",
width=0.5,
position=0.5,
):
axes = df.plot(
kind=kind,
stacked=stacked,
subplots=subplots,
align=align,
width=width,
position=position,
grid=True,
)
axes = self._flatten_visible(axes)
for ax in axes:
if kind == "bar":
axis = ax.xaxis
ax_min, ax_max = ax.get_xlim()
min_edge = min(p.get_x() for p in ax.patches)
max_edge = max(p.get_x() + p.get_width() for p in ax.patches)
elif kind == "barh":
axis = ax.yaxis
ax_min, ax_max = ax.get_ylim()
min_edge = min(p.get_y() for p in ax.patches)
max_edge = max(p.get_y() + p.get_height() for p in ax.patches)
else:
raise ValueError
# GH 7498
# compare margins between lim and bar edges
tm.assert_almost_equal(ax_min, min_edge - 0.25)
tm.assert_almost_equal(ax_max, max_edge + 0.25)
p = ax.patches[0]
if kind == "bar" and (stacked is True or subplots is True):
edge = p.get_x()
center = edge + p.get_width() * position
elif kind == "bar" and stacked is False:
center = p.get_x() + p.get_width() * len(df.columns) * position
edge = p.get_x()
elif kind == "barh" and (stacked is True or subplots is True):
center = p.get_y() + p.get_height() * position
edge = p.get_y()
elif kind == "barh" and stacked is False:
center = p.get_y() + p.get_height() * len(df.columns) * position
edge = p.get_y()
else:
raise ValueError
# Check the ticks locates on integer
assert (axis.get_ticklocs() == np.arange(len(df))).all()
if align == "center":
# Check whether the bar locates on center
tm.assert_almost_equal(axis.get_ticklocs()[0], center)
elif align == "edge":
# Check whether the bar's edge starts from the tick
tm.assert_almost_equal(axis.get_ticklocs()[0], edge)
else:
raise ValueError
return axes
@pytest.mark.slow
def test_bar_stacked_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=True, width=0.9)
@pytest.mark.slow
def test_bar_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=False, width=0.9)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=False, width=0.9)
@pytest.mark.slow
def test_bar_subplots_center(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="bar", subplots=True, width=0.9)
self._check_bar_alignment(df, kind="barh", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True, width=0.9)
@pytest.mark.slow
def test_bar_align_single_column(self):
df = DataFrame(randn(5))
self._check_bar_alignment(df, kind="bar", stacked=False)
self._check_bar_alignment(df, kind="bar", stacked=True)
self._check_bar_alignment(df, kind="barh", stacked=False)
self._check_bar_alignment(df, kind="barh", stacked=True)
self._check_bar_alignment(df, kind="bar", subplots=True)
self._check_bar_alignment(df, kind="barh", subplots=True)
@pytest.mark.slow
def test_bar_edge(self):
df = DataFrame({"A": [3] * 5, "B": list(range(5))}, index=range(5))
self._check_bar_alignment(df, kind="bar", stacked=True, align="edge")
self._check_bar_alignment(df, kind="bar", stacked=True, width=0.9, align="edge")
self._check_bar_alignment(df, kind="barh", stacked=True, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="bar", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", stacked=False, align="edge")
self._check_bar_alignment(
df, kind="barh", stacked=False, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="bar", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="bar", subplots=True, width=0.9, align="edge"
)
self._check_bar_alignment(df, kind="barh", subplots=True, align="edge")
self._check_bar_alignment(
df, kind="barh", subplots=True, width=0.9, align="edge"
)
@pytest.mark.slow
def test_bar_log_no_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100])
df = DataFrame({"A": [3] * 5, "B": list(range(1, 6))}, index=range(5))
ax = df.plot.bar(grid=True, log=True)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_bar_log_subplots(self):
expected = np.array([0.1, 1.0, 10.0, 100.0, 1000.0, 1e4])
ax = DataFrame([Series([200, 300]), Series([300, 500])]).plot.bar(
log=True, subplots=True
)
tm.assert_numpy_array_equal(ax[0].yaxis.get_ticklocs(), expected)
tm.assert_numpy_array_equal(ax[1].yaxis.get_ticklocs(), expected)
@pytest.mark.slow
def test_boxplot(self):
df = self.hist_df
series = df["height"]
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = _check_plot_works(df.plot.box)
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(
ax.xaxis.get_ticklocs(), np.arange(1, len(numeric_cols) + 1)
)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
axes = series.plot.box(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = _check_plot_works(series.plot.box)
positions = np.array([1, 6, 7])
ax = df.plot.box(positions=positions)
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
self._check_text_labels(ax.get_xticklabels(), labels)
tm.assert_numpy_array_equal(ax.xaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_vertical(self):
df = self.hist_df
numeric_cols = df._get_numeric_data().columns
labels = [pprint_thing(c) for c in numeric_cols]
ax = df.plot.box(rot=50, fontsize=8, vert=False)
self._check_ticks_props(ax, xrot=0, yrot=50, ylabelsize=8)
self._check_text_labels(ax.get_yticklabels(), labels)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.box, subplots=True, vert=False, logx=True)
self._check_axes_shape(axes, axes_num=3, layout=(1, 3))
self._check_ax_scales(axes, xaxis="log")
for ax, label in zip(axes, labels):
self._check_text_labels(ax.get_yticklabels(), [label])
assert len(ax.lines) == self.bp_n_objects
positions = np.array([3, 2, 8])
ax = df.plot.box(positions=positions, vert=False)
self._check_text_labels(ax.get_yticklabels(), labels)
tm.assert_numpy_array_equal(ax.yaxis.get_ticklocs(), positions)
assert len(ax.lines) == self.bp_n_objects * len(numeric_cols)
@pytest.mark.slow
def test_boxplot_return_type(self):
df = DataFrame(
randn(6, 4),
index=list(string.ascii_letters[:6]),
columns=["one", "two", "three", "four"],
)
with pytest.raises(ValueError):
df.plot.box(return_type="NOTATYPE")
result = df.plot.box(return_type="dict")
self._check_box_return_type(result, "dict")
result = df.plot.box(return_type="axes")
self._check_box_return_type(result, "axes")
result = df.plot.box()
self._check_box_return_type(result, "axes")
result = df.plot.box(return_type="both")
self._check_box_return_type(result, "both")
@pytest.mark.slow
def test_boxplot_subplots_return_type(self):
df = self.hist_df
result = df.plot.box(subplots=True)
assert isinstance(result, Series)
self._check_box_return_type(
result, None, expected_keys=["height", "weight", "category"]
)
for t in ["dict", "axes", "both"]:
returned = df.plot.box(return_type=t, subplots=True)
self._check_box_return_type(
returned,
t,
expected_keys=["height", "weight", "category"],
check_ax_title=False,
)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_df(self):
df = DataFrame(randn(100, 4))
ax = _check_plot_works(df.plot, kind="kde")
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
self._check_ticks_props(ax, xrot=0)
ax = df.plot(kind="kde", rot=20, fontsize=5)
self._check_ticks_props(ax, xrot=20, xlabelsize=5, ylabelsize=5)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot, kind="kde", subplots=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
axes = df.plot(kind="kde", logy=True, subplots=True)
self._check_ax_scales(axes, yaxis="log")
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_missing_vals(self):
df = DataFrame(np.random.uniform(size=(100, 4)))
df.loc[0, 0] = np.nan
_check_plot_works(df.plot, kind="kde")
@pytest.mark.slow
def test_hist_df(self):
from matplotlib.patches import Rectangle
df = DataFrame(randn(100, 4))
series = df[0]
ax = _check_plot_works(df.plot.hist)
expected = [pprint_thing(c) for c in df.columns]
self._check_legend_labels(ax, labels=expected)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.hist, subplots=True, logy=True)
self._check_axes_shape(axes, axes_num=4, layout=(4, 1))
self._check_ax_scales(axes, yaxis="log")
axes = series.plot.hist(rot=40)
self._check_ticks_props(axes, xrot=40, yrot=0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4, density=True)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-1].get_height(), 1.0)
tm.close()
ax = series.plot.hist(cumulative=True, bins=4)
rects = [x for x in ax.get_children() if isinstance(x, Rectangle)]
tm.assert_almost_equal(rects[-2].get_height(), 100.0)
tm.close()
axes = df.plot.hist(rot=50, fontsize=8, orientation="horizontal")
self._check_ticks_props(axes, xrot=0, yrot=50, ylabelsize=8)
def _check_box_coord(
self,
patches,
expected_y=None,
expected_h=None,
expected_x=None,
expected_w=None,
):
result_y = np.array([p.get_y() for p in patches])
result_height = np.array([p.get_height() for p in patches])
result_x = np.array([p.get_x() for p in patches])
result_width = np.array([p.get_width() for p in patches])
if expected_y is not None:
tm.assert_numpy_array_equal(result_y, expected_y, check_dtype=False)
if expected_h is not None:
tm.assert_numpy_array_equal(result_height, expected_h, check_dtype=False)
if expected_x is not None:
tm.assert_numpy_array_equal(result_x, expected_x, check_dtype=False)
if expected_w is not None:
tm.assert_numpy_array_equal(result_width, expected_w, check_dtype=False)
@pytest.mark.slow
def test_hist_df_coord(self):
normal_df = DataFrame(
{
"A": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([10, 9, 8, 7, 6])),
"B": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([8, 8, 8, 8, 8])),
"C": np.repeat(np.array([1, 2, 3, 4, 5]), np.array([6, 7, 8, 9, 10])),
},
columns=["A", "B", "C"],
)
nan_df = DataFrame(
{
"A": np.repeat(
np.array([np.nan, 1, 2, 3, 4, 5]), np.array([3, 10, 9, 8, 7, 6])
),
"B": np.repeat(
np.array([1, np.nan, 2, 3, 4, 5]), np.array([8, 3, 8, 8, 8, 8])
),
"C": np.repeat(
np.array([1, 2, 3, np.nan, 4, 5]), np.array([6, 7, 8, 3, 9, 10])
),
},
columns=["A", "B", "C"],
)
for df in [normal_df, nan_df]:
ax = df.plot.hist(bins=5)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True)
self._check_box_coord(
ax.patches[:5],
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_y=np.array([10, 9, 8, 7, 6]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_y=np.array([18, 17, 16, 15, 14]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(bins=5, stacked=True, subplots=True)
self._check_box_coord(
axes[0].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_y=np.array([0, 0, 0, 0, 0]),
expected_h=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
ax = df.plot.hist(bins=5, stacked=True, orientation="horizontal")
self._check_box_coord(
ax.patches[:5],
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
ax.patches[5:10],
expected_x=np.array([10, 9, 8, 7, 6]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
ax.patches[10:],
expected_x=np.array([18, 17, 16, 15, 14]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
axes = df.plot.hist(
bins=5, stacked=True, subplots=True, orientation="horizontal"
)
self._check_box_coord(
axes[0].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([10, 9, 8, 7, 6]),
)
self._check_box_coord(
axes[1].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([8, 8, 8, 8, 8]),
)
self._check_box_coord(
axes[2].patches,
expected_x=np.array([0, 0, 0, 0, 0]),
expected_w=np.array([6, 7, 8, 9, 10]),
)
@pytest.mark.slow
def test_plot_int_columns(self):
df = DataFrame(randn(100, 4)).cumsum()
_check_plot_works(df.plot, legend=True)
@pytest.mark.slow
def test_df_legend_labels(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
df2 = DataFrame(rand(3, 3), columns=["d", "e", "f"])
df3 = DataFrame(rand(3, 3), columns=["g", "h", "i"])
df4 = DataFrame(rand(3, 3), columns=["j", "k", "l"])
for kind in kinds:
ax = df.plot(kind=kind, legend=True)
self._check_legend_labels(ax, labels=df.columns)
ax = df2.plot(kind=kind, legend=False, ax=ax)
self._check_legend_labels(ax, labels=df.columns)
ax = df3.plot(kind=kind, legend=True, ax=ax)
self._check_legend_labels(ax, labels=df.columns.union(df3.columns))
ax = df4.plot(kind=kind, legend="reverse", ax=ax)
expected = list(df.columns.union(df3.columns)) + list(reversed(df4.columns))
self._check_legend_labels(ax, labels=expected)
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(kind="bar", legend=True, secondary_y="h", ax=ax)
self._check_legend_labels(
ax, labels=["a", "b (right)", "c", "g", "h (right)", "i"]
)
ind = date_range("1/1/2014", periods=3)
df = DataFrame(randn(3, 3), columns=["a", "b", "c"], index=ind)
df2 = DataFrame(randn(3, 3), columns=["d", "e", "f"], index=ind)
df3 = DataFrame(randn(3, 3), columns=["g", "h", "i"], index=ind)
ax = df.plot(legend=True, secondary_y="b")
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df2.plot(legend=False, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c"])
ax = df3.plot(legend=True, ax=ax)
self._check_legend_labels(ax, labels=["a", "b (right)", "c", "g", "h", "i"])
ax = df.plot.scatter(x="a", y="b", label="data1")
self._check_legend_labels(ax, labels=["data1"])
ax = df2.plot.scatter(x="d", y="e", legend=False, label="data2", ax=ax)
self._check_legend_labels(ax, labels=["data1"])
ax = df3.plot.scatter(x="g", y="h", label="data3", ax=ax)
self._check_legend_labels(ax, labels=["data1", "data3"])
df5 = df.set_index("a")
ax = df5.plot(y="b")
self._check_legend_labels(ax, labels=["b"])
ax = df5.plot(y="b", label="LABEL_b")
self._check_legend_labels(ax, labels=["LABEL_b"])
self._check_text_labels(ax.xaxis.get_label(), "a")
ax = df5.plot(y="c", label="LABEL_c", ax=ax)
self._check_legend_labels(ax, labels=["LABEL_b", "LABEL_c"])
assert df5.columns.tolist() == ["b", "c"]
def test_missing_marker_multi_plots_on_same_ax(self):
# GH 18222
df = pd.DataFrame(
data=[[1, 1, 1, 1], [2, 2, 4, 8]], columns=["x", "r", "g", "b"]
)
fig, ax = self.plt.subplots(nrows=1, ncols=3)
# Left plot
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[0])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[0])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[0])
self._check_legend_labels(ax[0], labels=["r", "g", "b"])
self._check_legend_marker(ax[0], expected_markers=["o", "x", "o"])
# Center plot
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[1])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[1])
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[1])
self._check_legend_labels(ax[1], labels=["b", "r", "g"])
self._check_legend_marker(ax[1], expected_markers=["o", "o", "x"])
# Right plot
df.plot(x="x", y="g", linewidth=1, marker="x", color="g", ax=ax[2])
df.plot(x="x", y="b", linewidth=1, marker="o", color="b", ax=ax[2])
df.plot(x="x", y="r", linewidth=0, marker="o", color="r", ax=ax[2])
self._check_legend_labels(ax[2], labels=["g", "b", "r"])
self._check_legend_marker(ax[2], expected_markers=["x", "o", "o"])
def test_legend_name(self):
multi = DataFrame(
randn(4, 4),
columns=[np.array(["a", "a", "b", "b"]), np.array(["x", "y", "x", "y"])],
)
multi.columns.names = ["group", "individual"]
ax = multi.plot()
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df = DataFrame(randn(5, 5))
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
df.columns.name = "new"
ax = df.plot(legend=False, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "group,individual")
ax = df.plot(legend=True, ax=ax)
leg_title = ax.legend_.get_title()
self._check_text_labels(leg_title, "new")
@pytest.mark.slow
def test_no_legend(self):
kinds = ["line", "bar", "barh", "kde", "area", "hist"]
df = DataFrame(rand(3, 3), columns=["a", "b", "c"])
for kind in kinds:
ax = df.plot(kind=kind, legend=False)
self._check_legend_labels(ax, visible=False)
@pytest.mark.slow
def test_style_by_column(self):
import matplotlib.pyplot as plt
fig = plt.gcf()
df = DataFrame(randn(100, 3))
for markers in [
{0: "^", 1: "+", 2: "o"},
{0: "^", 1: "+"},
["^", "+", "o"],
["^", "+"],
]:
fig.clf()
fig.add_subplot(111)
ax = df.plot(style=markers)
for i, l in enumerate(ax.get_lines()[: len(markers)]):
assert l.get_marker() == markers[i]
@pytest.mark.slow
def test_line_label_none(self):
s = Series([1, 2])
ax = s.plot()
assert ax.get_legend() is None
ax = s.plot(legend=True)
assert ax.get_legend().get_texts()[0].get_text() == "None"
@pytest.mark.slow
def test_line_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(randn(5, 5))
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax2 = df.plot(color=custom_colors)
lines2 = ax2.get_lines()
for l1, l2 in zip(ax.get_lines(), lines2):
assert l1.get_color() == l2.get_color()
tm.close()
ax = df.plot(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
ax = df.loc[:, [0]].plot(color="DodgerBlue")
self._check_colors(ax.lines, linecolors=["DodgerBlue"])
ax = df.plot(color="red")
self._check_colors(ax.get_lines(), linecolors=["red"] * 5)
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
ax = df.plot(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"]
# Forced show plot
_check_plot_works(df.plot, color=custom_colors)
@pytest.mark.slow
def test_dont_modify_colors(self):
colors = ["r", "g", "b"]
pd.DataFrame(np.random.rand(10, 2)).plot(color=colors)
assert len(colors) == 3
@pytest.mark.slow
def test_line_colors_and_styles_subplots(self):
# GH 9894
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(subplots=True)
for ax, c in zip(axes, list(default_colors)):
c = [c]
self._check_colors(ax.get_lines(), linecolors=c)
tm.close()
# single color char
axes = df.plot(subplots=True, color="k")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(subplots=True, color="green")
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["green"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
axes = df.plot(color=list(custom_colors), subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# GH 10299
custom_colors = ["#FF0000", "#0000FF", "#FFFF00", "#000000", "#FFFFFF"]
axes = df.plot(color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
with pytest.raises(ValueError):
# Color contains shorthand hex value results in ValueError
custom_colors = ["#F00", "#00F", "#FF0", "#000", "#FFF"]
# Forced show plot
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning):
_check_plot_works(df.plot, color=custom_colors, subplots=True)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_area_colors(self):
from matplotlib import cm
from matplotlib.collections import PolyCollection
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.area(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=custom_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=custom_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
ax = df.plot.area(colormap="jet")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
self._check_colors(poly, facecolors=jet_colors)
handles, labels = ax.get_legend_handles_labels()
self._check_colors(handles, facecolors=jet_colors)
for h in handles:
assert h.get_alpha() is None
tm.close()
# When stacked=False, alpha is set to 0.5
ax = df.plot.area(colormap=cm.jet, stacked=False)
self._check_colors(ax.get_lines(), linecolors=jet_colors)
poly = [o for o in ax.get_children() if isinstance(o, PolyCollection)]
jet_with_alpha = [(c[0], c[1], c[2], 0.5) for c in jet_colors]
self._check_colors(poly, facecolors=jet_with_alpha)
handles, labels = ax.get_legend_handles_labels()
linecolors = jet_with_alpha
self._check_colors(handles[: len(jet_colors)], linecolors=linecolors)
for h in handles:
assert h.get_alpha() == 0.5
@pytest.mark.slow
def test_hist_colors(self):
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
ax = df.plot.hist()
self._check_colors(ax.patches[::10], facecolors=default_colors[:5])
tm.close()
custom_colors = "rgcby"
ax = df.plot.hist(color=custom_colors)
self._check_colors(ax.patches[::10], facecolors=custom_colors)
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
ax = df.plot.hist(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
# Test colormap functionality
ax = df.plot.hist(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, 5)]
self._check_colors(ax.patches[::10], facecolors=rgba_colors)
tm.close()
ax = df.loc[:, [0]].plot.hist(color="DodgerBlue")
self._check_colors([ax.patches[0]], facecolors=["DodgerBlue"])
ax = df.plot(kind="hist", color="green")
self._check_colors(ax.patches[::10], facecolors=["green"] * 5)
tm.close()
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors(self):
from matplotlib import cm
custom_colors = "rgcby"
df = DataFrame(rand(5, 5))
ax = df.plot.kde(color=custom_colors)
self._check_colors(ax.get_lines(), linecolors=custom_colors)
tm.close()
ax = df.plot.kde(colormap="jet")
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
tm.close()
ax = df.plot.kde(colormap=cm.jet)
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
self._check_colors(ax.get_lines(), linecolors=rgba_colors)
@pytest.mark.slow
@td.skip_if_no_scipy
def test_kde_colors_and_styles_subplots(self):
from matplotlib import cm
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
axes = df.plot(kind="kde", subplots=True)
for ax, c in zip(axes, list(default_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# single color char
axes = df.plot(kind="kde", color="k", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["k"])
tm.close()
# single color str
axes = df.plot(kind="kde", color="red", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["red"])
tm.close()
custom_colors = "rgcby"
axes = df.plot(kind="kde", color=custom_colors, subplots=True)
for ax, c in zip(axes, list(custom_colors)):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
rgba_colors = [cm.jet(n) for n in np.linspace(0, 1, len(df))]
for cmap in ["jet", cm.jet]:
axes = df.plot(kind="kde", colormap=cmap, subplots=True)
for ax, c in zip(axes, rgba_colors):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
# make color a list if plotting one column frame
# handles cases like df.plot(color='DodgerBlue')
axes = df.loc[:, [0]].plot(kind="kde", color="DodgerBlue", subplots=True)
self._check_colors(axes[0].lines, linecolors=["DodgerBlue"])
# single character style
axes = df.plot(kind="kde", style="r", subplots=True)
for ax in axes:
self._check_colors(ax.get_lines(), linecolors=["r"])
tm.close()
# list of styles
styles = list("rgcby")
axes = df.plot(kind="kde", style=styles, subplots=True)
for ax, c in zip(axes, styles):
self._check_colors(ax.get_lines(), linecolors=[c])
tm.close()
@pytest.mark.slow
def test_boxplot_colors(self):
def _check_colors(bp, box_c, whiskers_c, medians_c, caps_c="k", fliers_c=None):
# TODO: outside this func?
if fliers_c is None:
fliers_c = "k"
self._check_colors(bp["boxes"], linecolors=[box_c] * len(bp["boxes"]))
self._check_colors(
bp["whiskers"], linecolors=[whiskers_c] * len(bp["whiskers"])
)
self._check_colors(
bp["medians"], linecolors=[medians_c] * len(bp["medians"])
)
self._check_colors(bp["fliers"], linecolors=[fliers_c] * len(bp["fliers"]))
self._check_colors(bp["caps"], linecolors=[caps_c] * len(bp["caps"]))
default_colors = self._unpack_cycler(self.plt.rcParams)
df = DataFrame(randn(5, 5))
bp = df.plot.box(return_type="dict")
_check_colors(bp, default_colors[0], default_colors[0], default_colors[2])
tm.close()
dict_colors = dict(
boxes="#572923", whiskers="#982042", medians="#804823", caps="#123456"
)
bp = df.plot.box(color=dict_colors, sym="r+", return_type="dict")
_check_colors(
bp,
dict_colors["boxes"],
dict_colors["whiskers"],
dict_colors["medians"],
dict_colors["caps"],
"r",
)
tm.close()
# partial colors
dict_colors = dict(whiskers="c", medians="m")
bp = df.plot.box(color=dict_colors, return_type="dict")
_check_colors(bp, default_colors[0], "c", "m")
tm.close()
from matplotlib import cm
# Test str -> colormap functionality
bp = df.plot.box(colormap="jet", return_type="dict")
jet_colors = [cm.jet(n) for n in np.linspace(0, 1, 3)]
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# Test colormap functionality
bp = df.plot.box(colormap=cm.jet, return_type="dict")
_check_colors(bp, jet_colors[0], jet_colors[0], jet_colors[2])
tm.close()
# string color is applied to all artists except fliers
bp = df.plot.box(color="DodgerBlue", return_type="dict")
_check_colors(bp, "DodgerBlue", "DodgerBlue", "DodgerBlue", "DodgerBlue")
# tuple is also applied to all artists except fliers
bp = df.plot.box(color=(0, 1, 0), sym="#123456", return_type="dict")
_check_colors(bp, (0, 1, 0), (0, 1, 0), (0, 1, 0), (0, 1, 0), "#123456")
with pytest.raises(ValueError):
# Color contains invalid key results in ValueError
df.plot.box(color=dict(boxes="red", xxxx="blue"))
def test_default_color_cycle(self):
import matplotlib.pyplot as plt
import cycler
colors = list("rgbk")
plt.rcParams["axes.prop_cycle"] = cycler.cycler("color", colors)
df = DataFrame(randn(5, 3))
ax = df.plot()
expected = self._unpack_cycler(plt.rcParams)[:3]
self._check_colors(ax.get_lines(), linecolors=expected)
def test_unordered_ts(self):
df = DataFrame(
np.array([3.0, 2.0, 1.0]),
index=[date(2012, 10, 1), date(2012, 9, 1), date(2012, 8, 1)],
columns=["test"],
)
ax = df.plot()
xticks = ax.lines[0].get_xdata()
assert xticks[0] < xticks[1]
ydata = ax.lines[0].get_ydata()
tm.assert_numpy_array_equal(ydata, np.array([1.0, 2.0, 3.0]))
@td.skip_if_no_scipy
def test_kind_both_ways(self):
df = DataFrame({"x": [1, 2, 3]})
for kind in plotting.PlotAccessor._common_kinds:
df.plot(kind=kind)
getattr(df.plot, kind)()
for kind in ["scatter", "hexbin"]:
df.plot("x", "x", kind=kind)
getattr(df.plot, kind)("x", "x")
def test_all_invalid_plot_data(self):
df = DataFrame(list("abcd"))
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
@pytest.mark.slow
def test_partially_invalid_plot_data(self):
with tm.RNGContext(42):
df = DataFrame(randn(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in plotting.PlotAccessor._common_kinds:
msg = "no numeric data to plot"
with pytest.raises(TypeError, match=msg):
df.plot(kind=kind)
with tm.RNGContext(42):
# area plot doesn't support positive/negative mixed data
kinds = ["area"]
df = DataFrame(rand(10, 2), dtype=object)
df[np.random.rand(df.shape[0]) > 0.5] = "a"
for kind in kinds:
with pytest.raises(TypeError):
df.plot(kind=kind)
def test_invalid_kind(self):
df = DataFrame(randn(10, 2))
with pytest.raises(ValueError):
df.plot(kind="aasdf")
@pytest.mark.parametrize(
"x,y,lbl",
[
(["B", "C"], "A", "a"),
(["A"], ["B", "C"], ["b", "c"]),
("A", ["B", "C"], "badlabel"),
],
)
def test_invalid_xy_args(self, x, y, lbl):
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
with pytest.raises(ValueError):
df.plot(x=x, y=y, label=lbl)
@pytest.mark.parametrize("x,y", [("A", "B"), (["A"], "B")])
def test_invalid_xy_args_dup_cols(self, x, y):
df = DataFrame([[1, 3, 5], [2, 4, 6]], columns=list("AAB"))
with pytest.raises(ValueError):
df.plot(x=x, y=y)
@pytest.mark.parametrize(
"x,y,lbl,colors",
[
("A", ["B"], ["b"], ["red"]),
("A", ["B", "C"], ["b", "c"], ["red", "blue"]),
(0, [1, 2], ["bokeh", "cython"], ["green", "yellow"]),
],
)
def test_y_listlike(self, x, y, lbl, colors):
df = DataFrame({"A": [1, 2], "B": [3, 4], "C": [5, 6]})
_check_plot_works(df.plot, x="A", y=y, label=lbl)
ax = df.plot(x=x, y=y, label=lbl, color=colors)
assert len(ax.lines) == len(y)
self._check_colors(ax.get_lines(), linecolors=colors)
@pytest.mark.parametrize("x,y,colnames", [(0, 1, ["A", "B"]), (1, 0, [0, 1])])
def test_xy_args_integer(self, x, y, colnames):
df = DataFrame({"A": [1, 2], "B": [3, 4]})
df.columns = colnames
_check_plot_works(df.plot, x=x, y=y)
@pytest.mark.slow
def test_hexbin_basic(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", gridsize=10)
assert len(ax.collections) == 1
axes = df.plot.hexbin(x="A", y="B", subplots=True)
assert len(axes[0].figure.axes) == 2
self._check_axes_shape(axes, axes_num=1, layout=(1, 1))
@pytest.mark.slow
def test_hexbin_with_c(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", C="C")
assert len(ax.collections) == 1
ax = df.plot.hexbin(x="A", y="B", C="C", reduce_C_function=np.std)
assert len(ax.collections) == 1
@pytest.mark.slow
def test_hexbin_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B")
assert ax.collections[0].cmap.name == "BuGn"
cm = "cubehelix"
ax = df.plot.hexbin(x="A", y="B", colormap=cm)
assert ax.collections[0].cmap.name == cm
@pytest.mark.slow
def test_no_color_bar(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", colorbar=None)
assert ax.collections[0].colorbar is None
@pytest.mark.slow
def test_allow_cmap(self):
df = self.hexbin_df
ax = df.plot.hexbin(x="A", y="B", cmap="YlGn")
assert ax.collections[0].cmap.name == "YlGn"
with pytest.raises(TypeError):
df.plot.hexbin(x="A", y="B", cmap="YlGn", colormap="BuGn")
@pytest.mark.slow
def test_pie_df(self):
df = DataFrame(
np.random.rand(5, 3),
columns=["X", "Y", "Z"],
index=["a", "b", "c", "d", "e"],
)
with pytest.raises(ValueError):
df.plot.pie()
ax = _check_plot_works(df.plot.pie, y="Y")
self._check_text_labels(ax.texts, df.index)
ax = _check_plot_works(df.plot.pie, y=2)
self._check_text_labels(ax.texts, df.index)
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(df.plot.pie, subplots=True)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, df.index)
for ax, ylabel in zip(axes, df.columns):
assert ax.get_ylabel() == ylabel
labels = ["A", "B", "C", "D", "E"]
color_args = ["r", "g", "b", "c", "m"]
with tm.assert_produces_warning(UserWarning):
axes = _check_plot_works(
df.plot.pie, subplots=True, labels=labels, colors=color_args
)
assert len(axes) == len(df.columns)
for ax in axes:
self._check_text_labels(ax.texts, labels)
self._check_colors(ax.patches, facecolors=color_args)
def test_pie_df_nan(self):
df = DataFrame(np.random.rand(4, 4))
for i in range(4):
df.iloc[i, i] = np.nan
fig, axes = self.plt.subplots(ncols=4)
df.plot.pie(subplots=True, ax=axes, legend=True)
base_expected = ["0", "1", "2", "3"]
for i, ax in enumerate(axes):
expected = list(base_expected)
expected[i] = ""
result = [x.get_text() for x in ax.texts]
assert result == expected
# see https://github.com/pandas-dev/pandas/issues/8390
assert [x.get_text() for x in ax.get_legend().get_texts()] == base_expected[
:i
] + base_expected[i + 1 :]
@pytest.mark.slow
def test_errorbar_plot(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
# check line plots
ax = _check_plot_works(df.plot, yerr=df_err, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, logx=True, logy=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, loglog=True)
self._check_has_errorbars(ax, xerr=0, yerr=2)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, yerr=df_err, xerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(
df.plot, yerr=df_err["x"], xerr=df_err["x"], kind=kind
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
ax = _check_plot_works(df.plot, xerr=0.2, yerr=0.2, kind=kind)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
df.plot, yerr=df_err, xerr=df_err, subplots=True, kind=kind
)
self._check_has_errorbars(axes, xerr=1, yerr=1)
ax = _check_plot_works(
(df + 1).plot, yerr=df_err, xerr=df_err, kind="bar", log=True
)
self._check_has_errorbars(ax, xerr=2, yerr=2)
# yerr is raw error values
ax = _check_plot_works(df["y"].plot, yerr=np.ones(12) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot, yerr=np.ones((2, 12)) * 0.4)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# yerr is column name
for yerr in ["yerr", "誤差"]:
s_df = df.copy()
s_df[yerr] = np.ones(12) * 0.2
ax = _check_plot_works(s_df.plot, yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(s_df.plot, y="y", x="x", yerr=yerr)
self._check_has_errorbars(ax, xerr=0, yerr=1)
with pytest.raises(ValueError):
df.plot(yerr=np.random.randn(11))
df_err = DataFrame({"x": ["zzz"] * 12, "y": ["zzz"] * 12})
with pytest.raises((ValueError, TypeError)):
df.plot(yerr=df_err)
@pytest.mark.xfail(reason="Iterator is consumed", raises=ValueError)
@pytest.mark.slow
def test_errorbar_plot_iterator(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
# yerr is iterator
ax = _check_plot_works(df.plot, yerr=itertools.repeat(0.1, len(df)))
self._check_has_errorbars(ax, xerr=0, yerr=2)
@pytest.mark.slow
def test_errorbar_with_integer_column_names(self):
# test with integer column names
df = DataFrame(np.random.randn(10, 2))
df_err = DataFrame(np.random.randn(10, 2))
ax = _check_plot_works(df.plot, yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(df.plot, y=0, yerr=1)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_with_partial_columns(self):
df = DataFrame(np.random.randn(10, 3))
df_err = DataFrame(np.random.randn(10, 2), columns=[0, 2])
kinds = ["line", "bar"]
for kind in kinds:
ax = _check_plot_works(df.plot, yerr=df_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ix = date_range("1/1/2000", periods=10, freq="M")
df.set_index(ix, inplace=True)
df_err.set_index(ix, inplace=True)
ax = _check_plot_works(df.plot, yerr=df_err, kind="line")
self._check_has_errorbars(ax, xerr=0, yerr=2)
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
df = DataFrame(d)
d_err = {"x": np.ones(12) * 0.2, "z": np.ones(12) * 0.4}
df_err = DataFrame(d_err)
for err in [d_err, df_err]:
ax = _check_plot_works(df.plot, yerr=err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
@pytest.mark.slow
def test_errorbar_timeseries(self):
with warnings.catch_warnings():
d = {"x": np.arange(12), "y": np.arange(12, 0, -1)}
d_err = {"x": np.ones(12) * 0.2, "y": np.ones(12) * 0.4}
# check time-series plots
ix = date_range("1/1/2000", "1/1/2001", freq="M")
tdf = DataFrame(d, index=ix)
tdf_err = DataFrame(d_err, index=ix)
kinds = ["line", "bar", "barh"]
for kind in kinds:
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, yerr=d_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
ax = _check_plot_works(tdf.plot, y="y", yerr=tdf_err["x"], kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, y="y", yerr="x", kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(tdf.plot, yerr=tdf_err, kind=kind)
self._check_has_errorbars(ax, xerr=0, yerr=2)
# _check_plot_works adds an ax so catch warning. see GH #13188
axes = _check_plot_works(
tdf.plot, kind=kind, yerr=tdf_err, subplots=True
)
self._check_has_errorbars(axes, xerr=0, yerr=1)
def test_errorbar_asymmetrical(self):
np.random.seed(0)
err = np.random.rand(3, 2, 5)
# each column is [0, 1, 2, 3, 4], [3, 4, 5, 6, 7]...
df = DataFrame(np.arange(15).reshape(3, 5)).T
ax = df.plot(yerr=err, xerr=err / 2)
yerr_0_0 = ax.collections[1].get_paths()[0].vertices[:, 1]
expected_0_0 = err[0, :, 0] * np.array([-1, 1])
tm.assert_almost_equal(yerr_0_0, expected_0_0)
with pytest.raises(ValueError):
df.plot(yerr=err.T)
tm.close()
def test_table(self):
df = DataFrame(np.random.rand(10, 3), index=list(string.ascii_letters[:10]))
_check_plot_works(df.plot, table=True)
_check_plot_works(df.plot, table=df)
ax = df.plot()
assert len(ax.tables) == 0
plotting.table(ax, df.T)
assert len(ax.tables) == 1
def test_errorbar_scatter(self):
df = DataFrame(np.random.randn(5, 2), index=range(5), columns=["x", "y"])
df_err = DataFrame(
np.random.randn(5, 2) / 5, index=range(5), columns=["x", "y"]
)
ax = _check_plot_works(df.plot.scatter, x="x", y="y")
self._check_has_errorbars(ax, xerr=0, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=0)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", yerr=df_err)
self._check_has_errorbars(ax, xerr=0, yerr=1)
ax = _check_plot_works(df.plot.scatter, x="x", y="y", xerr=df_err, yerr=df_err)
self._check_has_errorbars(ax, xerr=1, yerr=1)
def _check_errorbar_color(containers, expected, has_err="has_xerr"):
lines = []
errs = [c.lines for c in ax.containers if getattr(c, has_err, False)][0]
for el in errs:
if is_list_like(el):
lines.extend(el)
else:
lines.append(el)
err_lines = [x for x in lines if x in ax.collections]
self._check_colors(
err_lines, linecolors=np.array([expected] * len(err_lines))
)
# GH 8081
df = DataFrame(np.random.randn(10, 5), columns=["a", "b", "c", "d", "e"])
ax = df.plot.scatter(x="a", y="b", xerr="d", yerr="e", c="red")
self._check_has_errorbars(ax, xerr=1, yerr=1)
_check_errorbar_color(ax.containers, "red", has_err="has_xerr")
_check_errorbar_color(ax.containers, "red", has_err="has_yerr")
ax = df.plot.scatter(x="a", y="b", yerr="e", color="green")
self._check_has_errorbars(ax, xerr=0, yerr=1)
_check_errorbar_color(ax.containers, "green", has_err="has_yerr")
@pytest.mark.slow
def test_sharex_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
plt.close("all")
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[0], axes[2]]:
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[1], axes[3]]:
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharex=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_sharey_and_ax(self):
# https://github.com/pandas-dev/pandas/issues/9737 using gridspec,
# the axis in fig.get_axis() are sorted differently than pandas
# expected them, so make sure that only the right ones are removed
import matplotlib.pyplot as plt
gs, axes = _generate_4_axes_via_gridspec()
df = DataFrame(
{
"a": [1, 2, 3, 4, 5, 6],
"b": [1, 2, 3, 4, 5, 6],
"c": [1, 2, 3, 4, 5, 6],
"d": [1, 2, 3, 4, 5, 6],
}
)
def _check(axes):
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
for ax in [axes[0], axes[1]]:
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[2], axes[3]]:
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharey=True)
gs.tight_layout(plt.gcf())
_check(axes)
tm.close()
gs, axes = _generate_4_axes_via_gridspec()
# without sharex, no labels should be touched!
for ax in axes:
df.plot(x="a", y="b", title="title", ax=ax)
gs.tight_layout(plt.gcf())
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
@td.skip_if_no_scipy
def test_memory_leak(self):
import weakref
import gc
results = {}
for kind in plotting.PlotAccessor._all_kinds:
args = {}
if kind in ["hexbin", "scatter", "pie"]:
df = self.hexbin_df
args = {"x": "A", "y": "B"}
elif kind == "area":
df = self.tdf.abs()
else:
df = self.tdf
# Use a weakref so we can see if the object gets collected without
# also preventing it from being collected
results[kind] = weakref.proxy(df.plot(kind=kind, **args))
# have matplotlib delete all the figures
tm.close()
# force a garbage collection
gc.collect()
for key in results:
# check that every plot was collected
with pytest.raises(ReferenceError):
# need to actually access something to get an error
results[key].lines
@pytest.mark.slow
def test_df_subplots_patterns_minorticks(self):
# GH 10657
import matplotlib.pyplot as plt
df = DataFrame(
np.random.randn(10, 2),
index=date_range("1/1/2000", periods=10),
columns=list("AB"),
)
# shared subplots
fig, axes = plt.subplots(2, 1, sharex=True)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
fig, axes = plt.subplots(2, 1)
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of 1st ax must be hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# not shared
fig, axes = plt.subplots(2, 1)
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_gridspec_patterns(self):
# GH 10819
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
ts = Series(np.random.randn(10), index=date_range("1/1/2000", periods=10))
df = DataFrame(np.random.randn(10, 2), index=ts.index, columns=list("AB"))
def _get_vertical_grid():
gs = gridspec.GridSpec(3, 1)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :])
ax2 = fig.add_subplot(gs[2, :])
return ax1, ax2
def _get_horizontal_grid():
gs = gridspec.GridSpec(1, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:, :2])
ax2 = fig.add_subplot(gs[:, 2])
return ax1, ax2
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
ax1 = ts.plot(ax=ax1)
assert len(ax1.lines) == 1
ax2 = df.plot(ax=ax2)
assert len(ax2.lines) == 2
for ax in [ax1, ax2]:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots=True
for ax1, ax2 in [_get_vertical_grid(), _get_horizontal_grid()]:
axes = df.plot(subplots=True, ax=[ax1, ax2])
assert len(ax1.lines) == 1
assert len(ax2.lines) == 1
for ax in axes:
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# vertical / subplots / sharex=True / sharey=True
ax1, ax2 = _get_vertical_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
for ax in [ax1, ax2]:
# yaxis are visible because there is only one column
self._check_visible(ax.get_yticklabels(), visible=True)
# xaxis of axes0 (top) are hidden
self._check_visible(axes[0].get_xticklabels(), visible=False)
self._check_visible(axes[0].get_xticklabels(minor=True), visible=False)
self._check_visible(axes[1].get_xticklabels(), visible=True)
self._check_visible(axes[1].get_xticklabels(minor=True), visible=True)
tm.close()
# horizontal / subplots / sharex=True / sharey=True
ax1, ax2 = _get_horizontal_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=[ax1, ax2], sharex=True, sharey=True)
assert len(axes[0].lines) == 1
assert len(axes[1].lines) == 1
self._check_visible(axes[0].get_yticklabels(), visible=True)
# yaxis of axes1 (right) are hidden
self._check_visible(axes[1].get_yticklabels(), visible=False)
for ax in [ax1, ax2]:
# xaxis are visible because there is only one column
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# boxed
def _get_boxed_grid():
gs = gridspec.GridSpec(3, 3)
fig = plt.figure()
ax1 = fig.add_subplot(gs[:2, :2])
ax2 = fig.add_subplot(gs[:2, 2])
ax3 = fig.add_subplot(gs[2, :2])
ax4 = fig.add_subplot(gs[2, 2])
return ax1, ax2, ax3, ax4
axes = _get_boxed_grid()
df = DataFrame(np.random.randn(10, 4), index=ts.index, columns=list("ABCD"))
axes = df.plot(subplots=True, ax=axes)
for ax in axes:
assert len(ax.lines) == 1
# axis are visible because these are not shared
self._check_visible(ax.get_yticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
# subplots / sharex=True / sharey=True
axes = _get_boxed_grid()
with tm.assert_produces_warning(UserWarning):
axes = df.plot(subplots=True, ax=axes, sharex=True, sharey=True)
for ax in axes:
assert len(ax.lines) == 1
for ax in [axes[0], axes[2]]: # left column
self._check_visible(ax.get_yticklabels(), visible=True)
for ax in [axes[1], axes[3]]: # right column
self._check_visible(ax.get_yticklabels(), visible=False)
for ax in [axes[0], axes[1]]: # top row
self._check_visible(ax.get_xticklabels(), visible=False)
self._check_visible(ax.get_xticklabels(minor=True), visible=False)
for ax in [axes[2], axes[3]]: # bottom row
self._check_visible(ax.get_xticklabels(), visible=True)
self._check_visible(ax.get_xticklabels(minor=True), visible=True)
tm.close()
@pytest.mark.slow
def test_df_grid_settings(self):
# Make sure plot defaults to rcParams['axes.grid'] setting, GH 9792
self._check_grid_settings(
DataFrame({"a": [1, 2, 3], "b": [2, 3, 4]}),
plotting.PlotAccessor._dataframe_kinds,
kws={"x": "a", "y": "b"},
)
def test_invalid_colormap(self):
df = DataFrame(randn(3, 2), columns=["A", "B"])
with pytest.raises(ValueError):
df.plot(colormap="invalid_colormap")
def test_plain_axes(self):
# supplied ax itself is a SubplotAxes, but figure contains also
# a plain Axes object (GH11556)
fig, ax = self.plt.subplots()
fig.add_axes([0.2, 0.2, 0.2, 0.2])
Series(rand(10)).plot(ax=ax)
# supplied ax itself is a plain Axes, but because the cmap keyword
# a new ax is created for the colorbar -> also multiples axes (GH11520)
df = DataFrame({"a": randn(8), "b": randn(8)})
fig = self.plt.figure()
ax = fig.add_axes((0, 0, 1, 1))
df.plot(kind="scatter", ax=ax, x="a", y="b", c="a", cmap="hsv")
# other examples
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1 import make_axes_locatable
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=cax)
fig, ax = self.plt.subplots()
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
iax = inset_axes(ax, width="30%", height=1.0, loc=3)
Series(rand(10)).plot(ax=ax)
Series(rand(10)).plot(ax=iax)
def test_passed_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
colormap = mpl.colors.ListedColormap(color_tuples)
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar", cmap=colormap)
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
def test_rcParams_bar_colors(self):
import matplotlib as mpl
color_tuples = [(0.9, 0, 0, 1), (0, 0.9, 0, 1), (0, 0, 0.9, 1)]
with mpl.rc_context(rc={"axes.prop_cycle": mpl.cycler("color", color_tuples)}):
barplot = pd.DataFrame([[1, 2, 3]]).plot(kind="bar")
assert color_tuples == [c.get_facecolor() for c in barplot.patches]
@pytest.mark.parametrize("method", ["line", "barh", "bar"])
def test_secondary_axis_font_size(self, method):
# GH: 12565
df = (
pd.DataFrame(np.random.randn(15, 2), columns=list("AB"))
.assign(C=lambda df: df.B.cumsum())
.assign(D=lambda df: df.C * 1.1)
)
fontsize = 20
sy = ["C", "D"]
kwargs = dict(secondary_y=sy, fontsize=fontsize, mark_right=True)
ax = getattr(df.plot, method)(**kwargs)
self._check_ticks_props(axes=ax.right_ax, ylabelsize=fontsize)
@pytest.mark.slow
def test_x_string_values_ticks(self):
# Test if string plot index have a fixed xtick position
# GH: 7612, GH: 22334
df = pd.DataFrame(
{
"sales": [3, 2, 3],
"visits": [20, 42, 28],
"day": ["Monday", "Tuesday", "Wednesday"],
}
)
ax = df.plot.area(x="day")
ax.set_xlim(-1, 3)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["Monday"] == 0.0
assert labels_position["Tuesday"] == 1.0
assert labels_position["Wednesday"] == 2.0
@pytest.mark.slow
def test_x_multiindex_values_ticks(self):
# Test if multiindex plot index have a fixed xtick position
# GH: 15912
index = pd.MultiIndex.from_product([[2012, 2013], [1, 2]])
df = pd.DataFrame(np.random.randn(4, 2), columns=["A", "B"], index=index)
ax = df.plot()
ax.set_xlim(-1, 4)
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
labels_position = dict(zip(xticklabels, ax.get_xticks()))
# Testing if the label stayed at the right position
assert labels_position["(2012, 1)"] == 0.0
assert labels_position["(2012, 2)"] == 1.0
assert labels_position["(2013, 1)"] == 2.0
assert labels_position["(2013, 2)"] == 3.0
@pytest.mark.parametrize("kind", ["line", "area"])
def test_xlim_plot_line(self, kind):
# test if xlim is set correctly in plot.line and plot.area
# GH 27686
df = pd.DataFrame([2, 4], index=[1, 2])
ax = df.plot(kind=kind)
xlims = ax.get_xlim()
assert xlims[0] < 1
assert xlims[1] > 2
def test_xlim_plot_line_correctly_in_mixed_plot_type(self):
# test if xlim is set correctly when ax contains multiple different kinds
# of plots, GH 27686
fig, ax = self.plt.subplots()
indexes = ["k1", "k2", "k3", "k4"]
df = pd.DataFrame(
{
"s1": [1000, 2000, 1500, 2000],
"s2": [900, 1400, 2000, 3000],
"s3": [1500, 1500, 1600, 1200],
"secondary_y": [1, 3, 4, 3],
},
index=indexes,
)
df[["s1", "s2", "s3"]].plot.bar(ax=ax, stacked=False)
df[["secondary_y"]].plot(ax=ax, secondary_y=True)
xlims = ax.get_xlim()
assert xlims[0] < 0
assert xlims[1] > 3
# make sure axis labels are plotted correctly as well
xticklabels = [t.get_text() for t in ax.get_xticklabels()]
assert xticklabels == indexes
def test_subplots_sharex_false(self):
# test when sharex is set to False, two plots should have different
# labels, GH 25160
df = pd.DataFrame(np.random.rand(10, 2))
df.iloc[5:, 1] = np.nan
df.iloc[:5, 0] = np.nan
figs, axs = self.plt.subplots(2, 1)
df.plot.line(ax=axs, subplots=True, sharex=False)
expected_ax1 = np.arange(4.5, 10, 0.5)
expected_ax2 = np.arange(-0.5, 5, 0.5)
tm.assert_numpy_array_equal(axs[0].get_xticks(), expected_ax1)
tm.assert_numpy_array_equal(axs[1].get_xticks(), expected_ax2)
def test_plot_no_rows(self):
# GH 27758
df = pd.DataFrame(columns=["foo"], dtype=int)
assert df.empty
ax = df.plot()
assert len(ax.get_lines()) == 1
line = ax.get_lines()[0]
assert len(line.get_xdata()) == 0
assert len(line.get_ydata()) == 0
def test_plot_no_numeric_data(self):
df = pd.DataFrame(["a", "b", "c"])
with pytest.raises(TypeError):
df.plot()
def test_missing_markers_legend(self):
# 14958
df = pd.DataFrame(np.random.randn(8, 3), columns=["A", "B", "C"])
ax = df.plot(y=["A"], marker="x", linestyle="solid")
df.plot(y=["B"], marker="o", linestyle="dotted", ax=ax)
df.plot(y=["C"], marker="<", linestyle="dotted", ax=ax)
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=["x", "o", "<"])
def test_missing_markers_legend_using_style(self):
# 14563
df = pd.DataFrame(
{
"A": [1, 2, 3, 4, 5, 6],
"B": [2, 4, 1, 3, 2, 4],
"C": [3, 3, 2, 6, 4, 2],
"X": [1, 2, 3, 4, 5, 6],
}
)
fig, ax = self.plt.subplots()
for kind in "ABC":
df.plot("X", kind, label=kind, ax=ax, style=".")
self._check_legend_labels(ax, labels=["A", "B", "C"])
self._check_legend_marker(ax, expected_markers=[".", ".", "."])
def _generate_4_axes_via_gridspec():
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.gridspec # noqa
gs = mpl.gridspec.GridSpec(2, 2)
ax_tl = plt.subplot(gs[0, 0])
ax_ll = plt.subplot(gs[1, 0])
ax_tr = plt.subplot(gs[0, 1])
ax_lr = plt.subplot(gs[1, 1])
return gs, [ax_tl, ax_ll, ax_tr, ax_lr]
| true
| true
|
1c429be3e6cc511b6861bec09506e19de1a9abb1
| 2,808
|
py
|
Python
|
reinforcement/bpsm.py
|
jguilhermeam/cnn-iets
|
0636fc8803ec46a1874a7c6dda6590cd9d7a7c53
|
[
"MIT"
] | null | null | null |
reinforcement/bpsm.py
|
jguilhermeam/cnn-iets
|
0636fc8803ec46a1874a7c6dda6590cd9d7a7c53
|
[
"MIT"
] | null | null | null |
reinforcement/bpsm.py
|
jguilhermeam/cnn-iets
|
0636fc8803ec46a1874a7c6dda6590cd9d7a7c53
|
[
"MIT"
] | null | null | null |
class BPSM:
def __init__(self, records, attribute_list):
print("Calculating BPSM matrices...")
attribute_list.extend(['begin','end'])
self.f_matrix = self.init_f_matrix(records, attribute_list)
self.b_matrix = self.init_b_matrix(records, attribute_list)
attribute_list.remove('end')
attribute_list.remove('begin')
self.p_matrix = self.init_p_matrix(records, attribute_list)
def init_f_matrix(self, records, attribute_list):
transitions = {}
matrix = {}
for i in attribute_list:
transitions[i] = 0
matrix[i] = {}
for j in attribute_list:
matrix[i][j] = 0
for blocks in records:
for aux in range(len(blocks)):
if aux == 0:
i = 'begin'
else:
i = blocks[aux-1].attr
j = blocks[aux].attr
if i != 'none' and j != 'none' and i != j:
matrix[i][j] += 1
transitions[i] += 1
for i in attribute_list:
for j in attribute_list:
if transitions[i] > 0:
matrix[i][j] /= transitions[i]
return matrix
def init_b_matrix(self, records, attribute_list):
transitions = {}
matrix = {}
for k in attribute_list:
transitions[k] = 0
matrix[k] = {}
for j in attribute_list:
matrix[k][j] = 0
for blocks in records:
for aux in reversed(range(len(blocks))):
if aux == len(blocks)-1:
k = 'end'
else:
k = blocks[aux+1].attr
j = blocks[aux].attr
if k != 'none' and j != 'none' and k != j:
matrix[k][j] += 1
transitions[k] += 1
for k in attribute_list:
for j in attribute_list:
if transitions[k] > 0:
matrix[k][j] /= transitions[k]
return matrix
def init_p_matrix(self, records, attribute_list):
max_positions = max([len(v) for v in records])
total_in_pos = [0 for i in range(max_positions)]
matrix = {}
for attr in attribute_list:
matrix[attr] = [0 for i in range(max_positions)]
for blocks in records:
for i in range(0,len(blocks)):
attr = blocks[i].attr
if attr != 'none':
matrix[attr][i] += 1
total_in_pos[i] += 1
for attr in attribute_list:
for i in range(max_positions):
if total_in_pos[i] > 0:
matrix[attr][i] /= total_in_pos[i]
return matrix
| 32.275862
| 67
| 0.48433
|
class BPSM:
def __init__(self, records, attribute_list):
print("Calculating BPSM matrices...")
attribute_list.extend(['begin','end'])
self.f_matrix = self.init_f_matrix(records, attribute_list)
self.b_matrix = self.init_b_matrix(records, attribute_list)
attribute_list.remove('end')
attribute_list.remove('begin')
self.p_matrix = self.init_p_matrix(records, attribute_list)
def init_f_matrix(self, records, attribute_list):
transitions = {}
matrix = {}
for i in attribute_list:
transitions[i] = 0
matrix[i] = {}
for j in attribute_list:
matrix[i][j] = 0
for blocks in records:
for aux in range(len(blocks)):
if aux == 0:
i = 'begin'
else:
i = blocks[aux-1].attr
j = blocks[aux].attr
if i != 'none' and j != 'none' and i != j:
matrix[i][j] += 1
transitions[i] += 1
for i in attribute_list:
for j in attribute_list:
if transitions[i] > 0:
matrix[i][j] /= transitions[i]
return matrix
def init_b_matrix(self, records, attribute_list):
transitions = {}
matrix = {}
for k in attribute_list:
transitions[k] = 0
matrix[k] = {}
for j in attribute_list:
matrix[k][j] = 0
for blocks in records:
for aux in reversed(range(len(blocks))):
if aux == len(blocks)-1:
k = 'end'
else:
k = blocks[aux+1].attr
j = blocks[aux].attr
if k != 'none' and j != 'none' and k != j:
matrix[k][j] += 1
transitions[k] += 1
for k in attribute_list:
for j in attribute_list:
if transitions[k] > 0:
matrix[k][j] /= transitions[k]
return matrix
def init_p_matrix(self, records, attribute_list):
max_positions = max([len(v) for v in records])
total_in_pos = [0 for i in range(max_positions)]
matrix = {}
for attr in attribute_list:
matrix[attr] = [0 for i in range(max_positions)]
for blocks in records:
for i in range(0,len(blocks)):
attr = blocks[i].attr
if attr != 'none':
matrix[attr][i] += 1
total_in_pos[i] += 1
for attr in attribute_list:
for i in range(max_positions):
if total_in_pos[i] > 0:
matrix[attr][i] /= total_in_pos[i]
return matrix
| true
| true
|
1c429cf406f178da536dd142c086cf9ce74ebae9
| 2,840
|
py
|
Python
|
args/regex/test_args.py
|
smalepati99/pytest-bdd-python
|
6fd5b09452403cdd8373c5aea623cc5be1e627ba
|
[
"Apache-2.0"
] | 944
|
2015-02-28T20:13:01.000Z
|
2022-03-29T16:04:58.000Z
|
args/regex/test_args.py
|
smalepati99/pytest-bdd-python
|
6fd5b09452403cdd8373c5aea623cc5be1e627ba
|
[
"Apache-2.0"
] | 393
|
2015-03-02T21:01:12.000Z
|
2022-03-30T13:04:04.000Z
|
args/regex/test_args.py
|
smalepati99/pytest-bdd-python
|
6fd5b09452403cdd8373c5aea623cc5be1e627ba
|
[
"Apache-2.0"
] | 178
|
2015-03-12T21:46:51.000Z
|
2022-03-24T11:46:15.000Z
|
"""Step arguments tests."""
import textwrap
def test_every_steps_takes_param_with_the_same_name(testdir):
testdir.makefile(
".feature",
arguments=textwrap.dedent(
"""\
Feature: Step arguments
Scenario: Every step takes a parameter with the same name
Given I have 1 Euro
When I pay 2 Euro
And I pay 1 Euro
Then I should have 0 Euro
And I should have 999999 Euro # In my dream...
"""
),
)
testdir.makepyfile(
textwrap.dedent(
r"""
import pytest
from pytest_bdd import parsers, given, when, then, scenario
@scenario("arguments.feature", "Every step takes a parameter with the same name")
def test_arguments():
pass
@pytest.fixture
def values():
return [1, 2, 1, 0, 999999]
@given(parsers.re(r"I have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.re(r"I pay (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.re(r"I should have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_should_have(euro, values):
assert euro == values.pop(0)
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_argument_in_when(testdir):
testdir.makefile(
".feature",
arguments=textwrap.dedent(
"""\
Feature: Step arguments
Scenario: Argument in when, step 1
Given I have an argument 1
When I get argument 5
Then My argument should be 5
"""
),
)
testdir.makepyfile(
textwrap.dedent(
r"""
import pytest
from pytest_bdd import parsers, given, when, then, scenario
@pytest.fixture
def arguments():
return dict()
@scenario("arguments.feature", "Argument in when, step 1")
def test_arguments():
pass
@given(parsers.re(r"I have an argument (?P<arg>\d+)"))
def argument(arguments, arg):
arguments["arg"] = arg
@when(parsers.re(r"I get argument (?P<arg>\d+)"))
def get_argument(arguments, arg):
arguments["arg"] = arg
@then(parsers.re(r"My argument should be (?P<arg>\d+)"))
def assert_that_my_argument_is_arg(arguments, arg):
assert arguments["arg"] == arg
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
| 26.542056
| 89
| 0.534507
|
import textwrap
def test_every_steps_takes_param_with_the_same_name(testdir):
testdir.makefile(
".feature",
arguments=textwrap.dedent(
"""\
Feature: Step arguments
Scenario: Every step takes a parameter with the same name
Given I have 1 Euro
When I pay 2 Euro
And I pay 1 Euro
Then I should have 0 Euro
And I should have 999999 Euro # In my dream...
"""
),
)
testdir.makepyfile(
textwrap.dedent(
r"""
import pytest
from pytest_bdd import parsers, given, when, then, scenario
@scenario("arguments.feature", "Every step takes a parameter with the same name")
def test_arguments():
pass
@pytest.fixture
def values():
return [1, 2, 1, 0, 999999]
@given(parsers.re(r"I have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_have(euro, values):
assert euro == values.pop(0)
@when(parsers.re(r"I pay (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_pay(euro, values, request):
assert euro == values.pop(0)
@then(parsers.re(r"I should have (?P<euro>\d+) Euro"), converters=dict(euro=int))
def i_should_have(euro, values):
assert euro == values.pop(0)
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
def test_argument_in_when(testdir):
testdir.makefile(
".feature",
arguments=textwrap.dedent(
"""\
Feature: Step arguments
Scenario: Argument in when, step 1
Given I have an argument 1
When I get argument 5
Then My argument should be 5
"""
),
)
testdir.makepyfile(
textwrap.dedent(
r"""
import pytest
from pytest_bdd import parsers, given, when, then, scenario
@pytest.fixture
def arguments():
return dict()
@scenario("arguments.feature", "Argument in when, step 1")
def test_arguments():
pass
@given(parsers.re(r"I have an argument (?P<arg>\d+)"))
def argument(arguments, arg):
arguments["arg"] = arg
@when(parsers.re(r"I get argument (?P<arg>\d+)"))
def get_argument(arguments, arg):
arguments["arg"] = arg
@then(parsers.re(r"My argument should be (?P<arg>\d+)"))
def assert_that_my_argument_is_arg(arguments, arg):
assert arguments["arg"] == arg
"""
)
)
result = testdir.runpytest()
result.assert_outcomes(passed=1)
| true
| true
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.