text
stringlengths 4
1.02M
| meta
dict |
|---|---|
"""Parallel workflow execution via IPython controller
"""
from future import standard_library
standard_library.install_aliases()
from pickle import dumps
import sys
IPython_not_loaded = False
try:
from IPython import __version__ as IPyversion
from IPython.parallel.error import TimeoutError
except:
IPython_not_loaded = True
from .base import (DistributedPluginBase, logger, report_crash)
def execute_task(pckld_task, node_config, updatehash):
from socket import gethostname
from traceback import format_exc
from nipype import config, logging
traceback = None
result = None
import os
cwd = os.getcwd()
try:
config.update_config(node_config)
logging.update_logging(config)
from pickle import loads
task = loads(pckld_task)
result = task.run(updatehash=updatehash)
except:
traceback = format_exc()
result = task.result
os.chdir(cwd)
return result, traceback, gethostname()
class IPythonPlugin(DistributedPluginBase):
"""Execute workflow with ipython
"""
def __init__(self, plugin_args=None):
if IPython_not_loaded:
raise ImportError('IPython parallel could not be imported')
super(IPythonPlugin, self).__init__(plugin_args=plugin_args)
self.iparallel = None
self.taskclient = None
self.taskmap = {}
self._taskid = 0
def run(self, graph, config, updatehash=False):
"""Executes a pre-defined pipeline is distributed approaches
based on IPython's parallel processing interface
"""
# retrieve clients again
try:
name = 'IPython.parallel'
__import__(name)
self.iparallel = sys.modules[name]
except ImportError:
raise ImportError("Ipython kernel not found. Parallel execution "
"will be unavailable")
try:
self.taskclient = self.iparallel.Client()
except Exception as e:
if isinstance(e, TimeoutError):
raise Exception("No IPython clients found.")
if isinstance(e, IOError):
raise Exception("ipcluster/ipcontroller has not been started")
if isinstance(e, ValueError):
raise Exception("Ipython kernel not installed")
raise e
return super(IPythonPlugin, self).run(graph, config, updatehash=updatehash)
def _get_result(self, taskid):
if taskid not in self.taskmap:
raise ValueError('Task %d not in pending list' % taskid)
if self.taskmap[taskid].ready():
result, traceback, hostname = self.taskmap[taskid].get()
result_out = dict(result=None, traceback=None)
result_out['result'] = result
result_out['traceback'] = traceback
result_out['hostname'] = hostname
return result_out
else:
return None
def _submit_job(self, node, updatehash=False):
pckld_node = dumps(node, 2)
result_object = self.taskclient.load_balanced_view().apply(execute_task,
pckld_node,
node.config,
updatehash)
self._taskid += 1
self.taskmap[self._taskid] = result_object
return self._taskid
def _report_crash(self, node, result=None):
if result and result['traceback']:
node._result = result['result']
node._traceback = result['traceback']
return report_crash(node,
traceback=result['traceback'])
else:
return report_crash(node)
def _clear_task(self, taskid):
if IPyversion >= '0.11':
logger.debug("Clearing id: %d" % taskid)
self.taskclient.purge_results(self.taskmap[taskid])
del self.taskmap[taskid]
|
{
"content_hash": "9e3dcaf5daf2fbe0bee53038ebba1fcf",
"timestamp": "",
"source": "github",
"line_count": 115,
"max_line_length": 83,
"avg_line_length": 35.130434782608695,
"alnum_prop": 0.5856435643564356,
"repo_name": "dgellis90/nipype",
"id": "96f47fd6efa0164aee0d7404c56468ac3aa889d5",
"size": "4154",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "nipype/pipeline/plugins/ipython.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "HTML",
"bytes": "9823"
},
{
"name": "KiCad",
"bytes": "3797"
},
{
"name": "Makefile",
"bytes": "2106"
},
{
"name": "Matlab",
"bytes": "1717"
},
{
"name": "Python",
"bytes": "4857096"
},
{
"name": "Shell",
"bytes": "380"
},
{
"name": "Tcl",
"bytes": "43408"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from matplotlib.externals import six
import tempfile
from numpy.testing import assert_allclose, assert_array_equal
import numpy.ma.testutils as matest
import numpy as np
import datetime as datetime
from nose.tools import (assert_equal, assert_almost_equal, assert_not_equal,
assert_true, assert_raises)
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from matplotlib.testing.decorators import knownfailureif, CleanupTestCase
try:
from mpl_toolkits.natgrid import _natgrid
HAS_NATGRID = True
except ImportError:
HAS_NATGRID = False
class general_testcase(CleanupTestCase):
def test_colinear_pca(self):
a = mlab.PCA._get_colinear()
pca = mlab.PCA(a)
assert_allclose(pca.fracs[2:], 0., atol=1e-8)
assert_allclose(pca.Y[:, 2:], 0., atol=1e-8)
def test_prctile(self):
# test odd lengths
x = [1, 2, 3]
assert_equal(mlab.prctile(x, 50), np.median(x))
# test even lengths
x = [1, 2, 3, 4]
assert_equal(mlab.prctile(x, 50), np.median(x))
# derived from email sent by jason-sage to MPL-user on 20090914
ob1 = [1, 1, 2, 2, 1, 2, 4, 3, 2, 2, 2, 3,
4, 5, 6, 7, 8, 9, 7, 6, 4, 5, 5]
p = [0, 75, 100]
expected = [1, 5.5, 9]
# test vectorized
actual = mlab.prctile(ob1, p)
assert_allclose(expected, actual)
# test scalar
for pi, expectedi in zip(p, expected):
actuali = mlab.prctile(ob1, pi)
assert_allclose(expectedi, actuali)
def test_norm(self):
np.random.seed(0)
N = 1000
x = np.random.standard_normal(N)
targ = np.linalg.norm(x)
res = mlab._norm(x)
assert_almost_equal(targ, res)
class spacing_testcase(CleanupTestCase):
def test_logspace_tens(self):
xmin = .01
xmax = 1000.
N = 6
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_primes(self):
xmin = .03
xmax = 1313.
N = 7
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_allclose(targ, res)
def test_logspace_none(self):
xmin = .03
xmax = 1313.
N = 0
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 0)
def test_logspace_single(self):
xmin = .03
xmax = 1313.
N = 1
res = mlab.logspace(xmin, xmax, N)
targ = np.logspace(np.log10(xmin), np.log10(xmax), N)
assert_array_equal(targ, res)
assert_equal(res.size, 1)
class stride_testcase(CleanupTestCase):
def get_base(self, x):
y = x
while y.base is not None:
y = y.base
return y
def calc_window_target(self, x, NFFT, noverlap=0):
'''This is an adaptation of the original window extraction
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
# do the ffts of the slices
for i in range(n):
result[:, i] = x[ind[i]:ind[i]+NFFT]
return result
def test_stride_windows_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_0D_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_windows, x, 5)
def test_stride_windows_noverlap_gt_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 3)
def test_stride_windows_noverlap_eq_n_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 2, 2)
def test_stride_windows_n_gt_lenx_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 11)
def test_stride_windows_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_windows, x, 0)
def test_stride_repeat_2D_ValueError(self):
x = np.arange(10)[np.newaxis]
assert_raises(ValueError, mlab.stride_repeat, x, 5)
def test_stride_repeat_axis_lt_0_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=-1)
def test_stride_repeat_axis_gt_1_ValueError(self):
x = np.array(0)
assert_raises(ValueError, mlab.stride_repeat, x, 5, axis=2)
def test_stride_repeat_n_lt_1_ValueError(self):
x = np.arange(10)
assert_raises(ValueError, mlab.stride_repeat, x, 0)
def test_stride_repeat_n1_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1)
assert_equal((1, ) + x.shape, y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n1_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 1, axis=1)
assert_equal(x.shape + (1, ), y.shape)
assert_array_equal(x, y.flat)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis0(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5)
yr = np.repeat(x[np.newaxis], 5, axis=0)
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal((5, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_repeat_n5_axis1(self):
x = np.arange(10)
y = mlab.stride_repeat(x, 5, axis=1)
yr = np.repeat(x[np.newaxis], 5, axis=0).T
assert_equal(yr.shape, y.shape)
assert_array_equal(yr, y)
assert_equal(x.shape + (5, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis0(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1)
yt = self.calc_window_target(x, 1)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((1, ) + x.shape, y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n1_noverlap0_axis1(self):
x = np.arange(10)
y = mlab.stride_windows(x, 1, axis=1)
yt = self.calc_window_target(x, 1).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal(x.shape + (1, ), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5)
yt = self.calc_window_target(x, 5)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((5, 20), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n5_noverlap0_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 5, axis=1)
yt = self.calc_window_target(x, 5).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((20, 5), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2)
yt = self.calc_window_target(x, 15, 2)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((15, 7), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n15_noverlap2_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 15, 2, axis=1)
yt = self.calc_window_target(x, 15, 2).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((7, 15), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis0(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3)
yt = self.calc_window_target(x, 13, -3)
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((13, 6), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n13_noverlapn3_axis1(self):
x = np.arange(100)
y = mlab.stride_windows(x, 13, -3, axis=1)
yt = self.calc_window_target(x, 13, -3).T
assert_equal(yt.shape, y.shape)
assert_array_equal(yt, y)
assert_equal((6, 13), y.shape)
assert_true(self.get_base(y) is x)
def test_stride_windows_n32_noverlap0_axis0_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n)
assert_equal(y.shape, x1.T.shape)
assert_array_equal(y, x1.T)
def test_stride_windows_n32_noverlap0_axis1_unflatten(self):
n = 32
x = np.arange(n)[np.newaxis]
x1 = np.tile(x, (21, 1))
x2 = x1.flatten()
y = mlab.stride_windows(x2, n, axis=1)
assert_equal(y.shape, x1.shape)
assert_array_equal(y, x1)
def test_stride_ensure_integer_type(self):
N = 100
x = np.empty(N + 20, dtype='>f4')
x.fill(np.NaN)
y = x[10:-10]
y.fill(0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33, noverlap=0.6)
assert_array_equal(y_strided, 0.3)
# previous to #3845 lead to corrupt access
y_strided = mlab.stride_windows(y, n=33.3, noverlap=0)
assert_array_equal(y_strided, 0.3)
# even previous to #3845 could not find any problematic
# configuration however, let's be sure it's not accidentally
# introduced
y_strided = mlab.stride_repeat(y, n=33.815)
assert_array_equal(y_strided, 0.3)
class csv_testcase(CleanupTestCase):
def setUp(self):
if six.PY3:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="w+",
newline='')
else:
self.fd = tempfile.TemporaryFile(suffix='csv', mode="wb+")
def tearDown(self):
self.fd.close()
def test_recarray_csv_roundtrip(self):
expected = np.recarray((99,),
[(str('x'), np.float),
(str('y'), np.float),
(str('t'), np.float)])
# initialising all values: uninitialised memory sometimes produces
# floats that do not round-trip to string and back.
expected['x'][:] = np.linspace(-1e9, -1, 99)
expected['y'][:] = np.linspace(1, 1e9, 99)
expected['t'][:] = np.linspace(0, 0.01, 99)
mlab.rec2csv(expected, self.fd)
self.fd.seek(0)
actual = mlab.csv2rec(self.fd)
assert_allclose(expected['x'], actual['x'])
assert_allclose(expected['y'], actual['y'])
assert_allclose(expected['t'], actual['t'])
def test_rec2csv_bad_shape_ValueError(self):
bad = np.recarray((99, 4), [(str('x'), np.float),
(str('y'), np.float)])
# the bad recarray should trigger a ValueError for having ndim > 1.
assert_raises(ValueError, mlab.rec2csv, bad, self.fd)
def test_csv2rec_names_with_comments(self):
self.fd.write('# comment\n1,2,3\n4,5,6\n')
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a,b,c')
assert len(array) == 2
assert len(array.dtype) == 3
def test_csv2rec_usdate(self):
self.fd.write('01/11/14\n' +
'03/05/76 12:00:01 AM\n' +
'07/09/83 5:17:34 PM\n' +
'06/20/2054 2:31:45 PM\n' +
'10/31/00 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a')
assert_array_equal(array['a'].tolist(), expected)
def test_csv2rec_dayfirst(self):
self.fd.write('11/01/14\n' +
'05/03/76 12:00:01 AM\n' +
'09/07/83 5:17:34 PM\n' +
'20/06/2054 2:31:45 PM\n' +
'31/10/00 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a', dayfirst=True)
assert_array_equal(array['a'].tolist(), expected)
def test_csv2rec_yearfirst(self):
self.fd.write('14/01/11\n' +
'76/03/05 12:00:01 AM\n' +
'83/07/09 5:17:34 PM\n' +
'2054/06/20 2:31:45 PM\n' +
'00/10/31 11:50:23 AM\n')
expected = [datetime.datetime(2014, 1, 11, 0, 0),
datetime.datetime(1976, 3, 5, 0, 0, 1),
datetime.datetime(1983, 7, 9, 17, 17, 34),
datetime.datetime(2054, 6, 20, 14, 31, 45),
datetime.datetime(2000, 10, 31, 11, 50, 23)]
self.fd.seek(0)
array = mlab.csv2rec(self.fd, names='a', yearfirst=True)
assert_array_equal(array['a'].tolist(), expected)
class rec2txt_testcase(CleanupTestCase):
def test_csv2txt_basic(self):
# str() calls around field names necessary b/c as of numpy 1.11
# dtype doesn't like unicode names (caused by unicode_literals import)
a = np.array([(1.0, 2, 'foo', 'bing'),
(2.0, 3, 'bar', 'blah')],
dtype=np.dtype([(str('x'), np.float32),
(str('y'), np.int8),
(str('s'), str, 3),
(str('s2'), str, 4)]))
truth = (' x y s s2\n'
' 1.000 2 foo bing \n'
' 2.000 3 bar blah ').splitlines()
assert_equal(mlab.rec2txt(a).splitlines(), truth)
class window_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
self.n = 1000
self.x = np.arange(0., self.n)
self.sig_rand = np.random.standard_normal(self.n) + 100.
self.sig_ones = np.ones_like(self.x)
self.sig_slope = np.linspace(-10., 90., self.n)
def check_window_apply_repeat(self, x, window, NFFT, noverlap):
'''This is an adaptation of the original window application
algorithm. This is here to test to make sure the new implementation
has the same result'''
step = NFFT - noverlap
ind = np.arange(0, len(x) - NFFT + 1, step)
n = len(ind)
result = np.zeros((NFFT, n))
if cbook.iterable(window):
windowVals = window
else:
windowVals = window(np.ones((NFFT,), x.dtype))
# do the ffts of the slices
for i in range(n):
result[:, i] = windowVals * x[ind[i]:ind[i]+NFFT]
return result
def test_window_none_rand(self):
res = mlab.window_none(self.sig_ones)
assert_array_equal(res, self.sig_ones)
def test_window_none_ones(self):
res = mlab.window_none(self.sig_rand)
assert_array_equal(res, self.sig_rand)
def test_window_hanning_rand(self):
targ = np.hanning(len(self.sig_rand)) * self.sig_rand
res = mlab.window_hanning(self.sig_rand)
assert_allclose(targ, res, atol=1e-06)
def test_window_hanning_ones(self):
targ = np.hanning(len(self.sig_ones))
res = mlab.window_hanning(self.sig_ones)
assert_allclose(targ, res, atol=1e-06)
def test_apply_window_1D_axis1_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_1D_els_wrongsize_ValueError(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]-1))
assert_raises(ValueError, mlab.apply_window, x, window)
def test_apply_window_0D_ValueError(self):
x = np.array(0)
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_3D_ValueError(self):
x = self.sig_rand[np.newaxis][np.newaxis]
window = mlab.window_hanning
assert_raises(ValueError, mlab.apply_window, x, window, axis=1,
return_window=False)
def test_apply_window_hanning_1D(self):
x = self.sig_rand
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, return_window=True)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els_1D_axis0(self):
x = self.sig_rand
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = window1(x)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els1_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning(np.ones(x.shape[0]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=0, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1(x[:, i])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_els2_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[1]):
yt[:, i] = window1*x[:, i]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_els3_2D_axis0(self):
x = np.random.standard_normal([1000, 10]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[0]))
y, window2 = mlab.apply_window(x, window, axis=0, return_window=True)
yt = mlab.apply_window(x, window1, axis=0, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D__els1_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning(np.ones(x.shape[1]))
window1 = mlab.window_hanning
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1(x[i, :])
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_els2_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y, window2 = mlab.apply_window(x, window, axis=1, return_window=True)
yt = np.zeros_like(x)
for i in range(x.shape[0]):
yt[i, :] = window1 * x[i, :]
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
assert_array_equal(window1, window2)
def test_apply_window_hanning_2D_els3_axis1(self):
x = np.random.standard_normal([10, 1000]) + 100.
window = mlab.window_hanning
window1 = mlab.window_hanning(np.ones(x.shape[1]))
y = mlab.apply_window(x, window, axis=1, return_window=False)
yt = mlab.apply_window(x, window1, axis=1, return_window=False)
assert_equal(yt.shape, y.shape)
assert_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_stride_windows_hanning_2D_n13_noverlapn3_axis0(self):
x = self.sig_rand
window = mlab.window_hanning
yi = mlab.stride_windows(x, n=13, noverlap=2, axis=0)
y = mlab.apply_window(yi, window, axis=0, return_window=False)
yt = self.check_window_apply_repeat(x, window, 13, 2)
assert_equal(yt.shape, y.shape)
assert_not_equal(x.shape, y.shape)
assert_allclose(yt, y, atol=1e-06)
def test_apply_window_hanning_2D_stack_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1(self):
ydata = np.arange(32)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
result = mlab.apply_window(ydata, mlab.window_hanning, axis=1,
return_window=False)
assert_allclose(ycontrol, result, atol=1e-08)
def test_apply_window_hanning_2D_stack_windows_axis1_unflatten(self):
n = 32
ydata = np.arange(n)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = mlab.apply_window(ydata1, mlab.window_hanning)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydata = ydata.flatten()
ydata1 = mlab.stride_windows(ydata, 32, noverlap=0, axis=0)
result = mlab.apply_window(ydata1, mlab.window_hanning, axis=0,
return_window=False)
assert_allclose(ycontrol.T, result, atol=1e-08)
class detrend_testcase(CleanupTestCase):
def setUp(self):
np.random.seed(0)
n = 1000
x = np.linspace(0., 100, n)
self.sig_zeros = np.zeros(n)
self.sig_off = self.sig_zeros + 100.
self.sig_slope = np.linspace(-10., 90., n)
self.sig_slope_mean = x - x.mean()
sig_rand = np.random.standard_normal(n)
sig_sin = np.sin(x*2*np.pi/(n/100))
sig_rand -= sig_rand.mean()
sig_sin -= sig_sin.mean()
self.sig_base = sig_rand + sig_sin
self.atol = 1e-08
def test_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_0D_zeros_axis1(self):
input = 0.
targ = input
res = mlab.detrend_none(input, axis=1)
assert_equal(input, targ)
def test_detrend_str_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key='none')
assert_equal(input, targ)
def test_detrend_detrend_none_0D_zeros(self):
input = 0.
targ = input
res = mlab.detrend(input, key=mlab.detrend_none)
assert_equal(input, targ)
def test_detrend_none_0D_off(self):
input = 5.5
targ = input
res = mlab.detrend_none(input)
assert_equal(input, targ)
def test_detrend_none_1D_off(self):
input = self.sig_off
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_slope(self):
input = self.sig_slope
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base(self):
input = self.sig_base
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = input.tolist()
res = mlab.detrend_none(input.tolist())
assert_equal(res, targ)
def test_detrend_none_2D(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input)
assert_array_equal(res, targ)
def test_detrend_none_2D_T(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
input = np.vstack(arri)
targ = input
res = mlab.detrend_none(input.T)
assert_array_equal(res.T, targ)
def test_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_mean(input)
assert_almost_equal(res, targ)
def test_detrend_str_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='mean')
assert_almost_equal(res, targ)
def test_detrend_detrend_mean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_mean)
assert_almost_equal(res, targ)
def test_detrend_mean_1D_zeros(self):
input = self.sig_zeros
targ = self.sig_zeros
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base(self):
input = self.sig_base
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_off(self):
input = self.sig_base + self.sig_off
targ = self.sig_base
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope(self):
input = self.sig_base + self.sig_slope
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_mean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_1D_base_slope_off_list_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.detrend_mean(input.tolist(), axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_0D_off(self):
input = 5.5
targ = 0.
res = mlab.demean(input, axis=None)
assert_almost_equal(res, targ)
def test_demean_1D_base_slope_off(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_axis0(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input, axis=0)
assert_allclose(res, targ, atol=1e-08)
def test_demean_1D_base_slope_off_list(self):
input = self.sig_base + self.sig_slope + self.sig_off
targ = self.sig_base + self.sig_slope_mean
res = mlab.demean(input.tolist())
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend_mean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_mean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend_mean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_2D_default(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, axis=None)
assert_allclose(res, targ, atol=1e-08)
def test_detrend_str_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='mean', axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_str_constant_2D_none_T(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri).T
targ = np.vstack(arrt)
res = mlab.detrend(input, key='constant', axis=None)
assert_allclose(res.T, targ,
atol=1e-08)
def test_detrend_str_default_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='default', axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_detrend_mean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_mean, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_default(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_none(self):
arri = [self.sig_off,
self.sig_base + self.sig_off]
arrt = [self.sig_zeros,
self.sig_base]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=None)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis0(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.demean(input, axis=0)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axis1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=1)
assert_allclose(res, targ,
atol=1e-08)
def test_demean_2D_axism1(self):
arri = [self.sig_base,
self.sig_base + self.sig_off,
self.sig_base + self.sig_slope,
self.sig_base + self.sig_off + self.sig_slope]
arrt = [self.sig_base,
self.sig_base,
self.sig_base + self.sig_slope_mean,
self.sig_base + self.sig_slope_mean]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.demean(input, axis=-1)
assert_allclose(res, targ,
atol=1e-08)
def test_detrend_bad_key_str_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key='spam')
def test_detrend_bad_key_var_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, key=5)
def test_detrend_mean_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend_mean, input, axis=0)
def test_detrend_0D_d0_ValueError(self):
input = 5.5
assert_raises(ValueError, mlab.detrend, input, axis=0)
def test_detrend_mean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend_mean, input, axis=1)
def test_detrend_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.detrend, input, axis=1)
def test_demean_1D_d1_ValueError(self):
input = self.sig_slope
assert_raises(ValueError, mlab.demean, input, axis=1)
def test_detrend_mean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_mean, input, axis=2)
def test_detrend_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend, input, axis=2)
def test_demean_2D_d2_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.demean, input, axis=2)
def test_detrend_linear_0D_zeros(self):
input = 0.
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend_linear(input)
assert_almost_equal(res, targ)
def test_detrend_str_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key='linear')
assert_almost_equal(res, targ)
def test_detrend_detrend_linear_0D_off(self):
input = 5.5
targ = 0.
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_almost_equal(res, targ)
def test_detrend_linear_1d_off(self):
input = self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope(self):
input = self.sig_slope
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key='linear')
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend(input, key=mlab.detrend_linear)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_1d_slope_off_list(self):
input = self.sig_slope + self.sig_off
targ = self.sig_zeros
res = mlab.detrend_linear(input.tolist())
assert_allclose(res, targ, atol=self.atol)
def test_detrend_linear_2D_ValueError(self):
input = self.sig_slope[np.newaxis]
assert_raises(ValueError, mlab.detrend_linear, input)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key='linear', axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri).T
targ = np.vstack(arrt).T
res = mlab.detrend(input, key=mlab.detrend_linear, axis=0)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_str_linear_2d_slope_off_axis0(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key='linear', axis=1)
assert_allclose(res, targ, atol=self.atol)
def test_detrend_detrend_linear_1d_slope_off_axis1(self):
arri = [self.sig_off,
self.sig_slope,
self.sig_slope + self.sig_off]
arrt = [self.sig_zeros,
self.sig_zeros,
self.sig_zeros]
input = np.vstack(arri)
targ = np.vstack(arrt)
res = mlab.detrend(input, key=mlab.detrend_linear, axis=1)
assert_allclose(res, targ, atol=self.atol)
class spectral_testcase_nosig_real_onesided(CleanupTestCase):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='onesided', nsides=1)
def createStim(self, fstims, iscomplex, sides, nsides, len_x=None,
NFFT_density=-1, nover_density=-1, pad_to_density=-1,
pad_to_spectrum=-1):
Fs = 100.
x = np.arange(0, 10, 1/Fs)
if len_x is not None:
x = x[:len_x]
# get the stimulus frequencies, defaulting to None
fstims = [Fs/fstim for fstim in fstims]
# get the constants, default to calculated values
if NFFT_density is None:
NFFT_density_real = 256
elif NFFT_density < 0:
NFFT_density_real = NFFT_density = 100
else:
NFFT_density_real = NFFT_density
if nover_density is None:
nover_density_real = 0
elif nover_density < 0:
nover_density_real = nover_density = NFFT_density_real//2
else:
nover_density_real = nover_density
if pad_to_density is None:
pad_to_density_real = NFFT_density_real
elif pad_to_density < 0:
pad_to_density = int(2**np.ceil(np.log2(NFFT_density_real)))
pad_to_density_real = pad_to_density
else:
pad_to_density_real = pad_to_density
if pad_to_spectrum is None:
pad_to_spectrum_real = len(x)
elif pad_to_spectrum < 0:
pad_to_spectrum_real = pad_to_spectrum = len(x)
else:
pad_to_spectrum_real = pad_to_spectrum
if pad_to_spectrum is None:
NFFT_spectrum_real = NFFT_spectrum = pad_to_spectrum_real
else:
NFFT_spectrum_real = NFFT_spectrum = len(x)
nover_spectrum_real = nover_spectrum = 0
NFFT_specgram = NFFT_density
nover_specgram = nover_density
pad_to_specgram = pad_to_density
NFFT_specgram_real = NFFT_density_real
nover_specgram_real = nover_density_real
if nsides == 1:
# frequencies for specgram, psd, and csd
# need to handle even and odd differently
if pad_to_density_real % 2:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real,
endpoint=False)[::2]
else:
freqs_density = np.linspace(0, Fs/2,
num=pad_to_density_real//2+1)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)[::2]
else:
freqs_spectrum = np.linspace(0, Fs/2,
num=pad_to_spectrum_real//2+1)
else:
# frequencies for specgram, psd, and csd
# need to handle even and odd differentl
if pad_to_density_real % 2:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_density_real,
endpoint=False)[1::2]
else:
freqs_density = np.linspace(-Fs/2, Fs/2,
num=pad_to_density_real,
endpoint=False)
# frequencies for complex, magnitude, angle, and phase spectrums
# need to handle even and odd differently
if pad_to_spectrum_real % 2:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=2*pad_to_spectrum_real,
endpoint=False)[1::2]
else:
freqs_spectrum = np.linspace(-Fs/2, Fs/2,
num=pad_to_spectrum_real,
endpoint=False)
freqs_specgram = freqs_density
# time points for specgram
t_start = NFFT_specgram_real//2
t_stop = len(x) - NFFT_specgram_real//2+1
t_step = NFFT_specgram_real - nover_specgram_real
t_specgram = x[t_start:t_stop:t_step]
if NFFT_specgram_real % 2:
t_specgram += 1/Fs/2
if len(t_specgram) == 0:
t_specgram = np.array([NFFT_specgram_real/(2*Fs)])
t_spectrum = np.array([NFFT_spectrum_real/(2*Fs)])
t_density = t_specgram
y = np.zeros_like(x)
for i, fstim in enumerate(fstims):
y += np.sin(fstim * x * np.pi * 2) * 10**i
if iscomplex:
y = y.astype('complex')
self.Fs = Fs
self.sides = sides
self.fstims = fstims
self.NFFT_density = NFFT_density
self.nover_density = nover_density
self.pad_to_density = pad_to_density
self.NFFT_spectrum = NFFT_spectrum
self.nover_spectrum = nover_spectrum
self.pad_to_spectrum = pad_to_spectrum
self.NFFT_specgram = NFFT_specgram
self.nover_specgram = nover_specgram
self.pad_to_specgram = pad_to_specgram
self.t_specgram = t_specgram
self.t_density = t_density
self.t_spectrum = t_spectrum
self.y = y
self.freqs_density = freqs_density
self.freqs_spectrum = freqs_spectrum
self.freqs_specgram = freqs_specgram
self.NFFT_density_real = NFFT_density_real
def check_freqs(self, vals, targfreqs, resfreqs, fstims):
assert_true(resfreqs.argmin() == 0)
assert_true(resfreqs.argmax() == len(resfreqs)-1)
assert_allclose(resfreqs, targfreqs, atol=1e-06)
for fstim in fstims:
i = np.abs(resfreqs - fstim).argmin()
assert_true(vals[i] > vals[i+2])
assert_true(vals[i] > vals[i-2])
def check_maxfreq(self, spec, fsp, fstims):
# skip the test if there are no frequencies
if len(fstims) == 0:
return
# if twosided, do the test for each side
if fsp.min() < 0:
fspa = np.abs(fsp)
zeroind = fspa.argmin()
self.check_maxfreq(spec[:zeroind], fspa[:zeroind], fstims)
self.check_maxfreq(spec[zeroind:], fspa[zeroind:], fstims)
return
fstimst = fstims[:]
spect = spec.copy()
# go through each peak and make sure it is correctly the maximum peak
while fstimst:
maxind = spect.argmax()
maxfreq = fsp[maxind]
assert_almost_equal(maxfreq, fstimst[-1])
del fstimst[-1]
spect[maxind-5:maxind+5] = 0
def test_spectral_helper_raises_complex_same_data(self):
# test that mode 'complex' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='complex')
def test_spectral_helper_raises_magnitude_same_data(self):
# test that mode 'magnitude' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='magnitude')
def test_spectral_helper_raises_angle_same_data(self):
# test that mode 'angle' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='angle')
def test_spectral_helper_raises_phase_same_data(self):
# test that mode 'phase' cannot be used if x is not y
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y+1, mode='phase')
def test_spectral_helper_raises_unknown_mode(self):
# test that unknown value for mode cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, mode='spam')
def test_spectral_helper_raises_unknown_sides(self):
# test that unknown value for sides cannot be used
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, sides='eggs')
def test_spectral_helper_raises_noverlap_gt_NFFT(self):
# test that noverlap cannot be larger than NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, noverlap=20)
def test_spectral_helper_raises_noverlap_eq_NFFT(self):
# test that noverlap cannot be equal to NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, NFFT=10, noverlap=10)
def test_spectral_helper_raises_winlen_ne_NFFT(self):
# test that the window length cannot be different from NFFT
assert_raises(ValueError, mlab._spectral_helper,
x=self.y, y=self.y, NFFT=10, window=np.ones(9))
def test_single_spectrum_helper_raises_mode_default(self):
# test that mode 'default' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='default')
def test_single_spectrum_helper_raises_mode_psd(self):
# test that mode 'psd' cannot be used with _single_spectrum_helper
assert_raises(ValueError, mlab._single_spectrum_helper,
x=self.y, mode='psd')
def test_spectral_helper_psd(self):
freqs = self.freqs_density
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
mode='psd')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_density, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_specgram(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_spectral_helper_magnitude_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp, t = mlab._spectral_helper(x=self.y, y=self.y,
NFFT=self.NFFT_spectrum,
Fs=self.Fs,
noverlap=self.nover_spectrum,
pad_to=self.pad_to_spectrum,
sides=self.sides,
mode='magnitude')
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_spectrum, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], 1)
def test_csd(self):
freqs = self.freqs_density
spec, fsp = mlab.csd(x=self.y, y=self.y+1,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_equal(spec.shape, freqs.shape)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_psd_detrend_mean_func_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_mean)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_mean_str_offset(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='mean')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_func_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_detrend_linear_str_trend(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ydata = np.vstack([ydata1, ydata2])
ydata = np.tile(ydata, (20, 1))
ydatab = ydata.T.flatten()
ydata = ydata.flatten()
ycontrol = np.zeros_like(ydata)
spec_g, fsp_g = mlab.psd(x=ydata,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend='linear')
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides)
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1, windowVals = mlab.apply_window(ydata1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ydata2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_window_hanning_detrend_linear(self):
if self.NFFT_density is None:
return
freqs = self.freqs_density
ydata = np.arange(self.NFFT_density)
ycontrol = np.zeros(self.NFFT_density)
ydata1 = ydata+5
ydata2 = ydata+3.3
ycontrol1 = ycontrol
ycontrol2 = ycontrol
ycontrol1, windowVals = mlab.apply_window(ycontrol1,
mlab.window_hanning,
return_window=True)
ycontrol2 = mlab.window_hanning(ycontrol2)
ydata = np.vstack([ydata1, ydata2])
ycontrol = np.vstack([ycontrol1, ycontrol2])
ydata = np.tile(ydata, (20, 1))
ycontrol = np.tile(ycontrol, (20, 1))
ydatab = ydata.T.flatten()
ydataf = ydata.flatten()
ycontrol = ycontrol.flatten()
spec_g, fsp_g = mlab.psd(x=ydataf,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_b, fsp_b = mlab.psd(x=ydatab,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
detrend=mlab.detrend_linear,
window=mlab.window_hanning)
spec_c, fsp_c = mlab.psd(x=ycontrol,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=0,
sides=self.sides,
window=mlab.window_none)
spec_c *= len(ycontrol1)/(np.abs(windowVals)**2).sum()
assert_array_equal(fsp_g, fsp_c)
assert_array_equal(fsp_b, fsp_c)
assert_allclose(spec_g, spec_c, atol=1e-08)
# these should not be almost equal
assert_raises(AssertionError,
assert_allclose, spec_b, spec_c, atol=1e-08)
def test_psd_windowarray(self):
freqs = self.freqs_density
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=np.ones(self.NFFT_density_real))
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_psd_windowarray_scale_by_freq(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
spec, fsp = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning)
spec_s, fsp_s = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=True)
spec_n, fsp_n = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=mlab.window_hanning,
scale_by_freq=False)
assert_array_equal(fsp, fsp_s)
assert_array_equal(fsp, fsp_n)
assert_array_equal(spec, spec_s)
assert_allclose(spec_s*(win**2).sum(),
spec_n/self.Fs*win.sum()**2,
atol=1e-08)
def test_complex_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.complex_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_magnitude_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.magnitude_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_equal(spec.shape, freqs.shape)
self.check_maxfreq(spec, fsp, self.fstims)
self.check_freqs(spec, freqs, fsp, self.fstims)
def test_angle_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.angle_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_phase_spectrum(self):
freqs = self.freqs_spectrum
spec, fsp = mlab.phase_spectrum(x=self.y,
Fs=self.Fs,
sides=self.sides,
pad_to=self.pad_to_spectrum)
assert_allclose(fsp, freqs, atol=1e-06)
assert_equal(spec.shape, freqs.shape)
def test_specgram_auto(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_default(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_psd(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_complex(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm = np.mean(np.abs(spec), axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_magnitude(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
# since we are using a single freq, all time slices
# should be about the same
if np.abs(spec.max()) != 0:
assert_allclose(np.diff(spec, axis=1).max()/np.abs(spec.max()), 0,
atol=1e-02)
self.check_freqs(specm, freqs, fsp, self.fstims)
def test_specgram_angle(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_specgram_phase(self):
freqs = self.freqs_specgram
spec, fsp, t = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
specm = np.mean(spec, axis=1)
assert_allclose(fsp, freqs, atol=1e-06)
assert_allclose(t, self.t_specgram, atol=1e-06)
assert_equal(spec.shape[0], freqs.shape[0])
assert_equal(spec.shape[1], self.t_specgram.shape[0])
def test_psd_csd_equal(self):
freqs = self.freqs_density
Pxx, freqsxx = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
Pxy, freqsxy = mlab.csd(x=self.y, y=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(Pxx, Pxy)
assert_array_equal(freqsxx, freqsxy)
def test_specgram_auto_default_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specb, freqspecb, tb = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='default')
assert_array_equal(speca, specb)
assert_array_equal(freqspeca, freqspecb)
assert_array_equal(ta, tb)
def test_specgram_auto_psd_equal(self):
'''test that mlab.specgram without mode and with mode 'default' and
'psd' are all the same'''
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides)
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='psd')
assert_array_equal(speca, specc)
assert_array_equal(freqspeca, freqspecc)
assert_array_equal(ta, tc)
def test_specgram_complex_mag_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specm, freqspecm, tm = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='magnitude')
assert_array_equal(freqspecc, freqspecm)
assert_array_equal(tc, tm)
assert_allclose(np.abs(specc), specm, atol=1e-06)
def test_specgram_complex_angle_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
assert_array_equal(freqspecc, freqspeca)
assert_array_equal(tc, ta)
assert_allclose(np.angle(specc), speca, atol=1e-06)
def test_specgram_complex_phase_equivalent(self):
freqs = self.freqs_specgram
specc, freqspecc, tc = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='complex')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspecc, freqspecp)
assert_array_equal(tc, tp)
assert_allclose(np.unwrap(np.angle(specc), axis=0), specp,
atol=1e-06)
def test_specgram_angle_phase_equivalent(self):
freqs = self.freqs_specgram
speca, freqspeca, ta = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='angle')
specp, freqspecp, tp = mlab.specgram(x=self.y,
NFFT=self.NFFT_specgram,
Fs=self.Fs,
noverlap=self.nover_specgram,
pad_to=self.pad_to_specgram,
sides=self.sides,
mode='phase')
assert_array_equal(freqspeca, freqspecp)
assert_array_equal(ta, tp)
assert_allclose(np.unwrap(speca, axis=0), specp,
atol=1e-06)
def test_psd_windowarray_equal(self):
freqs = self.freqs_density
win = mlab.window_hanning(np.ones(self.NFFT_density_real))
speca, fspa = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides,
window=win)
specb, fspb = mlab.psd(x=self.y,
NFFT=self.NFFT_density,
Fs=self.Fs,
noverlap=self.nover_density,
pad_to=self.pad_to_density,
sides=self.sides)
assert_array_equal(fspa, fspb)
assert_allclose(speca, specb, atol=1e-08)
class spectral_testcase_nosig_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_Fs4_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_Fs4_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_Fs4_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_Fs4_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_Fs4_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_Fs4_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_FsAll_real_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_FsAll_real_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_FsAll_real_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_FsAll_complex_onesided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_FsAll_complex_twosided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_FsAll_complex_defaultsided(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[4, 5, 10],
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_nopad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_noNFFT_no_pad_to(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
NFFT_density=None,
pad_to_density=None, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=512, pad_to_spectrum=128,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_trim(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
NFFT_density=128, pad_to_spectrum=128,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_odd(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=256,
pad_to_density=33, pad_to_spectrum=257,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=33, pad_to_spectrum=None,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_oddlen(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=255,
NFFT_density=128, pad_to_spectrum=None,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_stretch(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
len_x=128,
NFFT_density=128,
pad_to_density=256, pad_to_spectrum=256,
iscomplex=True, sides='default', nsides=2)
class spectral_testcase_nosig_real_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='onesided', nsides=1)
class spectral_testcase_nosig_real_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='twosided', nsides=2)
class spectral_testcase_nosig_real_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=False, sides='default', nsides=1)
class spectral_testcase_nosig_complex_onesided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='onesided', nsides=1)
class spectral_testcase_nosig_complex_twosided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='twosided', nsides=2)
class spectral_testcase_nosig_complex_defaultsided_overlap(
spectral_testcase_nosig_real_onesided):
def setUp(self):
self.createStim(fstims=[],
nover_density=32,
iscomplex=True, sides='default', nsides=2)
def test_griddata_linear():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
xi, yi = np.meshgrid(xi, yi)
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Passing 2D xi and yi arrays to griddata.
zi = mlab.griddata(x, y, z, xi, yi, interp='linear')
np.testing.assert_array_almost_equal(zi, get_z(xi, yi))
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, get_z(xi, yi))
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='linear')
matest.assert_array_almost_equal(zi, correct_zi_masked)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
@knownfailureif(not HAS_NATGRID)
def test_griddata_nn():
# z is a linear function of x and y.
def get_z(x, y):
return 3.0*x - y
# Passing 1D xi and yi arrays to griddata.
x = np.asarray([0.0, 1.0, 0.0, 1.0, 0.5])
y = np.asarray([0.0, 0.0, 1.0, 1.0, 0.5])
z = get_z(x, y)
xi = [0.2, 0.4, 0.6, 0.8]
yi = [0.1, 0.3, 0.7, 0.9]
correct_zi = [[0.49999252, 1.0999978, 1.7000030, 2.3000080],
[0.29999208, 0.8999978, 1.5000029, 2.1000059],
[-0.1000099, 0.4999943, 1.0999964, 1.6999979],
[-0.3000128, 0.2999894, 0.8999913, 1.4999933]]
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi, 5)
# Decreasing xi or yi should raise ValueError.
assert_raises(ValueError, mlab.griddata, x, y, z, xi[::-1], yi,
interp='nn')
assert_raises(ValueError, mlab.griddata, x, y, z, xi, yi[::-1],
interp='nn')
# Passing 2D xi and yi arrays to griddata.
xi, yi = np.meshgrid(xi, yi)
zi = mlab.griddata(x, y, z, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi, 5)
# Masking z array.
z_masked = np.ma.array(z, mask=[False, False, False, True, False])
correct_zi_masked = np.ma.masked_where(xi + yi > 1.0, correct_zi)
zi = mlab.griddata(x, y, z_masked, xi, yi, interp='nn')
np.testing.assert_array_almost_equal(zi, correct_zi_masked, 5)
np.testing.assert_array_equal(np.ma.getmask(zi),
np.ma.getmask(correct_zi_masked))
#*****************************************************************
# These Tests where taken from SCIPY with some minor modifications
# this can be retreived from:
# https://github.com/scipy/scipy/blob/master/scipy/stats/tests/test_kdeoth.py
#*****************************************************************
class gaussian_kde_tests():
def test_kde_integer_input(self):
"""Regression test for #1181."""
x1 = np.arange(5)
kde = mlab.GaussianKDE(x1)
y_expected = [0.13480721, 0.18222869, 0.19514935, 0.18222869,
0.13480721]
np.testing.assert_array_almost_equal(kde(x1), y_expected, decimal=6)
def test_gaussian_kde_covariance_caching(self):
x1 = np.array([-7, -5, 1, 4, 5], dtype=np.float)
xs = np.linspace(-10, 10, num=5)
# These expected values are from scipy 0.10, before some changes to
# gaussian_kde. They were not compared with any external reference.
y_expected = [0.02463386, 0.04689208, 0.05395444, 0.05337754,
0.01664475]
# set it to the default bandwidth.
kde2 = mlab.GaussianKDE(x1, 'scott')
y2 = kde2(xs)
np.testing.assert_array_almost_equal(y_expected, y2, decimal=7)
def test_kde_bandwidth_method(self):
np.random.seed(8765678)
n_basesample = 50
xn = np.random.randn(n_basesample)
# Default
gkde = mlab.GaussianKDE(xn)
# Supply a callable
gkde2 = mlab.GaussianKDE(xn, 'scott')
# Supply a scalar
gkde3 = mlab.GaussianKDE(xn, bw_method=gkde.factor)
xs = np.linspace(-7, 7, 51)
kdepdf = gkde.evaluate(xs)
kdepdf2 = gkde2.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf2.all())
kdepdf3 = gkde3.evaluate(xs)
assert_almost_equal(kdepdf.all(), kdepdf3.all())
class gaussian_kde_custom_tests(object):
def test_no_data(self):
"""Pass no data into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [])
def test_single_dataset_element(self):
"""Pass a single dataset element into the GaussianKDE class."""
assert_raises(ValueError, mlab.GaussianKDE, [42])
def test_silverman_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test silverman's
output"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "silverman")
def test_silverman_singledim_dataset(self):
"""Use a single dimension list as the dataset and test silverman's
output."""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "silverman")
y_expected = 0.76770389927475502
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scott_multidim_dataset(self):
"""Use a multi-dimensional array as the dataset and test scott's output
"""
x1 = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]])
assert_raises(np.linalg.LinAlgError, mlab.GaussianKDE, x1, "scott")
def test_scott_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test scott's
output"""
x1 = np.array([-7, -5, 1, 4, 5])
mygauss = mlab.GaussianKDE(x1, "scott")
y_expected = 0.72477966367769553
assert_almost_equal(mygauss.covariance_factor(), y_expected, 7)
def test_scalar_empty_dataset(self):
"""Use an empty array as the dataset and test the scalar's cov factor
"""
assert_raises(ValueError, mlab.GaussianKDE, [], bw_method=5)
def test_scalar_covariance_dataset(self):
"""Use a dataset and test a scalar's cov factor
"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
kde = mlab.GaussianKDE(multidim_data, bw_method=0.5)
assert_equal(kde.covariance_factor(), 0.5)
def test_callable_covariance_dataset(self):
"""Use a multi-dimensional array as the dataset and test the callable's
cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = [np.random.randn(n_basesample) for i in range(5)]
def callable_fun(x):
return 0.55
kde = mlab.GaussianKDE(multidim_data, bw_method=callable_fun)
assert_equal(kde.covariance_factor(), 0.55)
def test_callable_singledim_dataset(self):
"""Use a single-dimensional array as the dataset and test the
callable's cov factor"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data, bw_method='silverman')
y_expected = 0.48438841363348911
assert_almost_equal(kde.covariance_factor(), y_expected, 7)
def test_wrong_bw_method(self):
"""Test the error message that should be called when bw is invalid."""
np.random.seed(8765678)
n_basesample = 50
data = np.random.randn(n_basesample)
assert_raises(ValueError, mlab.GaussianKDE, data, bw_method="invalid")
class gaussian_kde_evaluate_tests(object):
def test_evaluate_diff_dim(self):
"""Test the evaluate method when the dim's of dataset and points are
different dimensions"""
x1 = np.arange(3, 10, 2)
kde = mlab.GaussianKDE(x1)
x2 = np.arange(3, 12, 2)
y_expected = [
0.08797252, 0.11774109, 0.11774109, 0.08797252, 0.0370153
]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_inv_dim(self):
""" Invert the dimensions. i.e., Give the dataset a dimension of
1 [3,2,4], and the points will have a dimension of 3 [[3],[2],[4]].
ValueError should be raised"""
np.random.seed(8765678)
n_basesample = 50
multidim_data = np.random.randn(n_basesample)
kde = mlab.GaussianKDE(multidim_data)
x2 = [[1], [2], [3]]
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_dim_and_num(self):
""" Tests if evaluated against a one by one array"""
x1 = np.arange(3, 10, 2)
x2 = np.array([3])
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_evaluate_point_dim_not_one(self):
"""Test"""
x1 = np.arange(3, 10, 2)
x2 = [np.arange(3, 10, 2), np.arange(3, 10, 2)]
kde = mlab.GaussianKDE(x1)
assert_raises(ValueError, kde.evaluate, x2)
def test_evaluate_equal_dim_and_num_lt(self):
"""Test when line 3810 fails"""
x1 = np.arange(3, 10, 2)
x2 = np.arange(3, 8, 2)
kde = mlab.GaussianKDE(x1)
y_expected = [0.08797252, 0.11774109, 0.11774109]
y = kde.evaluate(x2)
np.testing.assert_array_almost_equal(y, y_expected, 7)
def test_contiguous_regions():
a, b, c = 3, 4, 5
# Starts and ends with True
mask = [True]*a + [False]*b + [True]*c
expected = [(0, a), (a+b, a+b+c)]
assert_equal(mlab.contiguous_regions(mask), expected)
d, e = 6, 7
# Starts with True ends with False
mask = mask + [False]*e
assert_equal(mlab.contiguous_regions(mask), expected)
# Starts with False ends with True
mask = [False]*d + mask[:-e]
expected = [(d, d+a), (d+a+b, d+a+b+c)]
assert_equal(mlab.contiguous_regions(mask), expected)
# Starts and ends with False
mask = mask + [False]*e
assert_equal(mlab.contiguous_regions(mask), expected)
# No True in mask
assert_equal(mlab.contiguous_regions([False]*5), [])
# Empty mask
assert_equal(mlab.contiguous_regions([]), [])
def test_psd_onesided_norm():
u = np.array([0, 1, 2, 3, 1, 2, 1])
dt = 1.0
Su = np.abs(np.fft.fft(u) * dt)**2 / float(dt * u.size)
P, f = mlab.psd(u, NFFT=u.size, Fs=1/dt, window=mlab.window_none,
detrend=mlab.detrend_none, noverlap=0, pad_to=None,
scale_by_freq=None,
sides='onesided')
Su_1side = np.append([Su[0]], Su[1:4] + Su[4:][::-1])
assert_allclose(P, Su_1side, atol=1e-06)
if __name__ == '__main__':
import nose
import sys
args = ['-s', '--with-doctest']
argv = sys.argv
argv = argv[:1] + args + argv[1:]
nose.runmodule(argv=argv, exit=False)
|
{
"content_hash": "e4c1eb85a917d5bf8c218f123c79f065",
"timestamp": "",
"source": "github",
"line_count": 3082,
"max_line_length": 79,
"avg_line_length": 39.656716417910445,
"alnum_prop": 0.5208309469653581,
"repo_name": "lancezlin/ml_template_py",
"id": "5eca224813c898e2f08a51160468a221c3132ef6",
"size": "122222",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "lib/python2.7/site-packages/matplotlib/tests/test_mlab.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "326933"
},
{
"name": "C++",
"bytes": "14430"
},
{
"name": "CSS",
"bytes": "7806"
},
{
"name": "FORTRAN",
"bytes": "3200"
},
{
"name": "HTML",
"bytes": "596861"
},
{
"name": "JavaScript",
"bytes": "4020233"
},
{
"name": "Jupyter Notebook",
"bytes": "517957"
},
{
"name": "Objective-C",
"bytes": "567"
},
{
"name": "Python",
"bytes": "41191064"
},
{
"name": "Shell",
"bytes": "3373"
},
{
"name": "Smarty",
"bytes": "26298"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Post.mood_positive'
db.add_column(u'social_post', 'mood_positive',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding field 'Post.mood_negative'
db.add_column(u'social_post', 'mood_negative',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding field 'Post.mood_neutral'
db.add_column(u'social_post', 'mood_neutral',
self.gf('django.db.models.fields.SmallIntegerField')(default=0),
keep_default=False)
# Adding index on 'Post', fields ['mood']
db.create_index(u'social_post', ['mood'])
def backwards(self, orm):
# Removing index on 'Post', fields ['mood']
db.delete_index(u'social_post', ['mood'])
# Deleting field 'Post.mood_positive'
db.delete_column(u'social_post', 'mood_positive')
# Deleting field 'Post.mood_negative'
db.delete_column(u'social_post', 'mood_negative')
# Deleting field 'Post.mood_neutral'
db.delete_column(u'social_post', 'mood_neutral')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'social.post': {
'Meta': {'object_name': 'Post'},
'content': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'link': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mood': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'mood_negative': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'mood_neutral': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'mood_positive': ('django.db.models.fields.SmallIntegerField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['social']
|
{
"content_hash": "71c6e8f1cf41b3c8cf79538721ee04cc",
"timestamp": "",
"source": "github",
"line_count": 94,
"max_line_length": 187,
"avg_line_length": 58.138297872340424,
"alnum_prop": 0.5586459286367795,
"repo_name": "mindinpanic/codingmood",
"id": "e5ab1ef5f192b85aa7ad3fcc80248b56f5441f3b",
"size": "5489",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "codemood/social/migrations/0002_auto__add_field_post_mood_positive__add_field_post_mood_negative__add_.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "224983"
},
{
"name": "JavaScript",
"bytes": "77160"
},
{
"name": "Python",
"bytes": "85292"
}
],
"symlink_target": ""
}
|
import os
import pytest
from unittest import TestCase
import torch
import torchmetrics
from torch import nn
from bigdl.nano.pytorch.lightning import LightningModule
from bigdl.nano.pytorch import Trainer
from bigdl.nano.common import check_avx512
from bigdl.nano.pytorch.utils import TORCH_VERSION_LESS_1_10
from test.pytorch.utils._train_torch_lightning import create_data_loader, data_transform
from test.pytorch.utils._train_torch_lightning import create_test_data_loader
from test.pytorch.utils._train_ipex_callback import CheckIPEXCallback, CheckIPEXFusedStepCallback
from test.pytorch.tests.test_lightning import ResNet18
num_classes = 10
batch_size = 32
dataset_size = 256
num_workers = 0
data_dir = os.path.join(os.path.dirname(__file__), "../data")
class TestPlugin(TestCase):
model = ResNet18(pretrained=False, include_top=False, freeze=True)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
data_loader = create_data_loader(data_dir, batch_size, num_workers,
data_transform, subset=dataset_size)
test_data_loader = create_test_data_loader(data_dir, batch_size, num_workers,
data_transform, subset=dataset_size)
def setUp(self):
test_dir = os.path.dirname(__file__)
project_test_dir = os.path.abspath(
os.path.join(os.path.join(os.path.join(test_dir, ".."), ".."), "..")
)
os.environ['PYTHONPATH'] = project_test_dir
def test_trainer_subprocess_plugin(self):
pl_model = LightningModule(
self.model, self.loss, self.optimizer,
metrics=[torchmetrics.F1(num_classes), torchmetrics.Accuracy(num_classes=10)]
)
trainer = Trainer(num_processes=2, distributed_backend="subprocess",
max_epochs=4, use_ipex=True,
callbacks=[CheckIPEXCallback()])
trainer.fit(pl_model, self.data_loader, self.test_data_loader)
trainer.test(pl_model, self.test_data_loader)
def test_trainer_spawn_plugin_bf16(self):
# IPEX BF16 weight prepack needs the cpu support avx512bw, avx512vl and avx512dq
model = ResNet18(pretrained=False, include_top=False, freeze=True)
loss = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=0.01)
pl_model = LightningModule(
model, loss, optimizer,
metrics=[torchmetrics.F1(num_classes), torchmetrics.Accuracy(num_classes=10)]
)
trainer = Trainer(num_processes=2, distributed_backend="spawn",
max_epochs=4, use_ipex=True, precision="bf16",
callbacks=[CheckIPEXCallback(), CheckIPEXFusedStepCallback()])
trainer.fit(pl_model, self.data_loader, self.test_data_loader)
trainer.test(pl_model, self.test_data_loader)
if trainer.use_ipex and TORCH_VERSION_LESS_1_10:
import intel_pytorch_extension as ipex
# Diable IPEX AMP
# Avoid affecting other tests
ipex.enable_auto_mixed_precision(None)
if __name__ == '__main__':
pytest.main([__file__])
|
{
"content_hash": "91557987482b8fb66dea8064571b85fe",
"timestamp": "",
"source": "github",
"line_count": 75,
"max_line_length": 97,
"avg_line_length": 42.76,
"alnum_prop": 0.656688493919551,
"repo_name": "yangw1234/BigDL",
"id": "7d9015aca7d599cb6af1b8cdc0b79b3df95bf3c7",
"size": "3794",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "python/nano/test/pytorch/tests/test_plugin_ipex.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5342"
},
{
"name": "Dockerfile",
"bytes": "138760"
},
{
"name": "Java",
"bytes": "1321348"
},
{
"name": "Jupyter Notebook",
"bytes": "54063856"
},
{
"name": "Lua",
"bytes": "1904"
},
{
"name": "Makefile",
"bytes": "19253"
},
{
"name": "PowerShell",
"bytes": "1137"
},
{
"name": "PureBasic",
"bytes": "593"
},
{
"name": "Python",
"bytes": "8762180"
},
{
"name": "RobotFramework",
"bytes": "16117"
},
{
"name": "Scala",
"bytes": "13216038"
},
{
"name": "Shell",
"bytes": "844916"
}
],
"symlink_target": ""
}
|
def song():
for beer_count in range(99, -1, -1):
verse(beer_count)
def verse(beer_count):
if beer_count > 1:
print beer_count, "bottles of beer on the wall,", beer_count, "bottles of beer.\n"
current_count = beer_count - 1
print "Take one down and pass it around,", current_count, "bottles of beer on the wall.\n"
if beer_count > 2:
suffix = str(beer_count - 1) + " bottles of beer on the wall.\n"
else:
suffix = "1 bottle of beer on the wall.\n"
elif beer_count == 1:
print "1 bottle of beer on the wall, 1 bottle of beer.\n"
suffix = "no more beer on the wall!\n"
print "Take one down, pass it around,", suffix
elif beer_count == 0:
print "No more bottles of beer on the wall, no more bottles of beer.\n"
print "Go to the store and buy some more, 99 bottles of beer on the wall.\n"
|
{
"content_hash": "6f83e19f80b4dc5c3c209b4db559f245",
"timestamp": "",
"source": "github",
"line_count": 22,
"max_line_length": 98,
"avg_line_length": 41.59090909090909,
"alnum_prop": 0.5956284153005464,
"repo_name": "amalshehu/exercism-python",
"id": "e136042a406d89a92a617566c0cb8ed1d148afd2",
"size": "1195",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "beer-song/beer_song.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "117526"
}
],
"symlink_target": ""
}
|
"""
relief.schema.meta
~~~~~~~~~~~~~~~~~~
:copyright: 2013 by Daniel Neuhäuser
:license: BSD, see LICENSE.rst for details
"""
from relief.utils import class_cloner
from relief.constants import Unspecified
from relief.schema.core import BaseElement
class Maybe(BaseElement):
"""
A meta element that represents an element that is optional. The value of
this element will be `None` unless the contained element's value is not
`Unspecified`.
>>> from relief import Unicode
>>> Maybe.of(Unicode)().value
None
>>> Maybe.of(Unicode)(u'foobar').value
u'foobar
.. versionadded:: 2.1.0
"""
member_schema = None
@class_cloner
def of(cls, schema):
cls.member_schema = schema
return cls
def __init__(self, value=Unspecified):
self.member = self.member_schema()
super(Maybe, self).__init__(value)
if self.member_schema is None:
raise TypeError('member_schema is unknown')
@property
def value(self):
return None if self.member.value is Unspecified else self.member.value
@value.setter
def value(self, new_value):
if new_value is not Unspecified:
raise AttributeError("can't set attribute")
def serialize(self, value):
if value is None:
return Unspecified
return self.member.serialize(value)
def unserialize(self, raw_value):
value = self.member.unserialize(raw_value)
return None if value is Unspecified else value
def set_from_raw(self, raw_value):
self.raw_value = raw_value
value = self.unserialize(raw_value)
if value is None:
self.member.set_from_raw(Unspecified)
else:
self.member.set_from_raw(raw_value)
self.is_valid = None
def set_from_native(self, value):
self.member.set_from_native(value)
self.raw_value = self.member.raw_value
self.is_valid = None
def validate(self, context=None):
if context is None:
context = {}
self.is_valid = self.member.validate() or self.value is None
return self.is_valid
|
{
"content_hash": "bbc965975be60db1f6c19f3ada479c22",
"timestamp": "",
"source": "github",
"line_count": 76,
"max_line_length": 78,
"avg_line_length": 28.539473684210527,
"alnum_prop": 0.6251728907330567,
"repo_name": "DasIch/relief",
"id": "3587702365831b464512bf75714095614e3c9290",
"size": "2186",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "relief/schema/meta.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "117437"
},
{
"name": "Shell",
"bytes": "6701"
}
],
"symlink_target": ""
}
|
"""ParenMatch -- for parenthesis matching.
When you hit a right paren, the cursor should move briefly to the left
paren. Paren here is used generically; the matching applies to
parentheses, square brackets, and curly braces.
"""
from idlelib.hyperparser import HyperParser
from idlelib.config import idleConf
_openers = {')':'(',']':'[','}':'{'}
CHECK_DELAY = 100 # milliseconds
class ParenMatch:
"""Highlight matching openers and closers, (), [], and {}.
There are three supported styles of paren matching. When a right
paren (opener) is typed:
opener -- highlight the matching left paren (closer);
parens -- highlight the left and right parens (opener and closer);
expression -- highlight the entire expression from opener to closer.
(For back compatibility, 'default' is a synonym for 'opener').
Flash-delay is the maximum milliseconds the highlighting remains.
Any cursor movement (key press or click) before that removes the
highlight. If flash-delay is 0, there is no maximum.
TODO:
- Augment bell() with mismatch warning in status window.
- Highlight when cursor is moved to the right of a closer.
This might be too expensive to check.
"""
RESTORE_VIRTUAL_EVENT_NAME = "<<parenmatch-check-restore>>"
# We want the restore event be called before the usual return and
# backspace events.
RESTORE_SEQUENCES = ("<KeyPress>", "<ButtonPress>",
"<Key-Return>", "<Key-BackSpace>")
def __init__(self, editwin):
self.editwin = editwin
self.text = editwin.text
# Bind the check-restore event to the function restore_event,
# so that we can then use activate_restore (which calls event_add)
# and deactivate_restore (which calls event_delete).
editwin.text.bind(self.RESTORE_VIRTUAL_EVENT_NAME,
self.restore_event)
self.counter = 0
self.is_restore_active = 0
@classmethod
def reload(cls):
cls.STYLE = idleConf.GetOption(
'extensions','ParenMatch','style', default='opener')
cls.FLASH_DELAY = idleConf.GetOption(
'extensions','ParenMatch','flash-delay', type='int',default=500)
cls.BELL = idleConf.GetOption(
'extensions','ParenMatch','bell', type='bool', default=1)
cls.HILITE_CONFIG = idleConf.GetHighlight(idleConf.CurrentTheme(),
'hilite')
def activate_restore(self):
"Activate mechanism to restore text from highlighting."
if not self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_add(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = True
def deactivate_restore(self):
"Remove restore event bindings."
if self.is_restore_active:
for seq in self.RESTORE_SEQUENCES:
self.text.event_delete(self.RESTORE_VIRTUAL_EVENT_NAME, seq)
self.is_restore_active = False
def flash_paren_event(self, event):
"Handle editor 'show surrounding parens' event (menu or shortcut)."
indices = (HyperParser(self.editwin, "insert")
.get_surrounding_brackets())
self.finish_paren_event(indices)
return "break"
def paren_closed_event(self, event):
"Handle user input of closer."
# If user bound non-closer to <<paren-closed>>, quit.
closer = self.text.get("insert-1c")
if closer not in _openers:
return
hp = HyperParser(self.editwin, "insert-1c")
if not hp.is_in_code():
return
indices = hp.get_surrounding_brackets(_openers[closer], True)
self.finish_paren_event(indices)
return # Allow calltips to see ')'
def finish_paren_event(self, indices):
if indices is None and self.BELL:
self.text.bell()
return
self.activate_restore()
# self.create_tag(indices)
self.tagfuncs.get(self.STYLE, self.create_tag_expression)(self, indices)
# self.set_timeout()
(self.set_timeout_last if self.FLASH_DELAY else
self.set_timeout_none)()
def restore_event(self, event=None):
"Remove effect of doing match."
self.text.tag_delete("paren")
self.deactivate_restore()
self.counter += 1 # disable the last timer, if there is one.
def handle_restore_timer(self, timer_count):
if timer_count == self.counter:
self.restore_event()
# any one of the create_tag_XXX methods can be used depending on
# the style
def create_tag_opener(self, indices):
"""Highlight the single paren that matches"""
self.text.tag_add("paren", indices[0])
self.text.tag_config("paren", self.HILITE_CONFIG)
def create_tag_parens(self, indices):
"""Highlight the left and right parens"""
if self.text.get(indices[1]) in (')', ']', '}'):
rightindex = indices[1]+"+1c"
else:
rightindex = indices[1]
self.text.tag_add("paren", indices[0], indices[0]+"+1c", rightindex+"-1c", rightindex)
self.text.tag_config("paren", self.HILITE_CONFIG)
def create_tag_expression(self, indices):
"""Highlight the entire expression"""
if self.text.get(indices[1]) in (')', ']', '}'):
rightindex = indices[1]+"+1c"
else:
rightindex = indices[1]
self.text.tag_add("paren", indices[0], rightindex)
self.text.tag_config("paren", self.HILITE_CONFIG)
tagfuncs = {
'opener': create_tag_opener,
'default': create_tag_opener,
'parens': create_tag_parens,
'expression': create_tag_expression,
}
# any one of the set_timeout_XXX methods can be used depending on
# the style
def set_timeout_none(self):
"""Highlight will remain until user input turns it off
or the insert has moved"""
# After CHECK_DELAY, call a function which disables the "paren" tag
# if the event is for the most recent timer and the insert has changed,
# or schedules another call for itself.
self.counter += 1
def callme(callme, self=self, c=self.counter,
index=self.text.index("insert")):
if index != self.text.index("insert"):
self.handle_restore_timer(c)
else:
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
self.editwin.text_frame.after(CHECK_DELAY, callme, callme)
def set_timeout_last(self):
"""The last highlight created will be removed after FLASH_DELAY millisecs"""
# associate a counter with an event; only disable the "paren"
# tag if the event is for the most recent timer.
self.counter += 1
self.editwin.text_frame.after(
self.FLASH_DELAY,
lambda self=self, c=self.counter: self.handle_restore_timer(c))
ParenMatch.reload()
if __name__ == '__main__':
import unittest
unittest.main('idlelib.idle_test.test_parenmatch', verbosity=2)
|
{
"content_hash": "e8a4cb2a108ead00a89f3ca8f6b7fe71",
"timestamp": "",
"source": "github",
"line_count": 183,
"max_line_length": 94,
"avg_line_length": 39.36065573770492,
"alnum_prop": 0.61793697070665,
"repo_name": "Microsoft/PTVS",
"id": "983ca20675af1de3c378fb916a414fe5e14e29a4",
"size": "7203",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/parenmatch.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ASP",
"bytes": "109"
},
{
"name": "Batchfile",
"bytes": "10898"
},
{
"name": "C",
"bytes": "23236"
},
{
"name": "C#",
"bytes": "12235396"
},
{
"name": "C++",
"bytes": "212001"
},
{
"name": "CSS",
"bytes": "7025"
},
{
"name": "HTML",
"bytes": "34251"
},
{
"name": "JavaScript",
"bytes": "87257"
},
{
"name": "PowerShell",
"bytes": "44322"
},
{
"name": "Python",
"bytes": "847130"
},
{
"name": "Rich Text Format",
"bytes": "260880"
},
{
"name": "Smarty",
"bytes": "8156"
},
{
"name": "Tcl",
"bytes": "24968"
}
],
"symlink_target": ""
}
|
from django.db import models
from django.utils.translation import ugettext_lazy as _
from laws.models.vote_action import VoteAction
import logging
logger = logging.getLogger("open-knesset.laws.models")
class CandidateListVotingStatistics(models.Model):
# TODO: why is this a django model? could be method on CandidateList or a projection class
class Meta:
app_label = 'laws'
candidates_list = models.OneToOneField('polyorg.CandidateList', related_name='voting_statistics')
def votes_against_party_count(self):
return VoteAction.objects.filter(member__id__in=self.candidates_list.member_ids, against_party=True).count()
def votes_count(self):
return VoteAction.objects.filter(member__id__in=self.candidates_list.member_ids).exclude(type='no-vote').count()
def votes_per_seat(self):
return round(float(self.votes_count()) / len(self.candidates_list.member_ids))
def discipline(self):
total_votes = self.votes_count()
if total_votes:
votes_against_party = self.votes_against_party_count()
return round(100.0 * (total_votes - votes_against_party) / total_votes, 1)
return _('N/A')
|
{
"content_hash": "b2a18cb8c4dec34980726c34e40edf78",
"timestamp": "",
"source": "github",
"line_count": 31,
"max_line_length": 120,
"avg_line_length": 38.516129032258064,
"alnum_prop": 0.7035175879396985,
"repo_name": "daonb/Open-Knesset",
"id": "28b87ba56842b8390da3993abbdf723ca8929126",
"size": "1212",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "laws/models/candidate_list_model_statistics.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "350330"
},
{
"name": "HTML",
"bytes": "752361"
},
{
"name": "JavaScript",
"bytes": "220620"
},
{
"name": "Python",
"bytes": "4517092"
},
{
"name": "Shell",
"bytes": "383"
}
],
"symlink_target": ""
}
|
from flaky import flaky
import numpy
from numpy.testing import assert_array_almost_equal
from keras import backend as K
from keras.initializers import Constant
from keras.layers import Embedding, Input
from keras.models import Model
from keras import activations
from deep_qa.tensors.backend import apply_feed_forward
from deep_qa.layers.tuple_matchers import ThresholdTupleMatcher
from ...common.test_case import DeepQaTestCase
class TestThresholdTupleMatcher(DeepQaTestCase):
def setUp(self):
super(TestThresholdTupleMatcher, self).setUp()
self.num_slots = 3
self.num_words = 5
self.embed_dimension = 4
self.tuple1_input = Input(shape=(self.num_slots, self.num_words, self.embed_dimension), dtype='float32',
name="input_tuple1")
self.tuple2_input = Input(shape=(self.num_slots, self.num_words, self.embed_dimension), dtype='float32',
name="input_tuple2")
self.num_hidden_layers = 1
self.hidden_layer_width = 2
self.hidden_layer_activation = 'linear'
# tuple1 shape: (batch size, num_slots, num_words, embed_dimension)
self.tuple1 = numpy.random.rand(1, self.num_slots, self.num_words, self.embed_dimension)
# tuple2 shape: (batch size, num_slots, num_words, embed_dimension)
self.tuple2 = numpy.random.rand(1, self.num_slots, self.num_words, self.embed_dimension)
# cause some dimensions to have identical embeddings (i.e. cosine similarity of 1)
self.tuple1[0, 1, 1, :] = numpy.ones(4)
self.tuple1[0, 2, 0, :] = numpy.ones(4)
self.tuple2[0, 1, 0, :] = numpy.ones(4)
self.tuple2[0, 2, 3, :] = numpy.ones(4)
self.tuple2[0, 2, 4, :] = numpy.ones(4)
# This should result in 1 matched word in slot 1, and two in slot 2
# So the normalized overlaps should be (0, 1/5, 2/5)
@flaky
def test_general_case(self):
match_layer = ThresholdTupleMatcher({"type": "cosine_similarity"},
self.num_hidden_layers,
self.hidden_layer_width,
initialization=Constant(.999),
hidden_layer_activation=self.hidden_layer_activation)
output = match_layer([self.tuple1_input, self.tuple2_input])
model = Model([self.tuple1_input, self.tuple2_input], output)
# Get the initial weights for use in testing
layer_nn = match_layer.hidden_layer_weights
# Testing general unmasked case.
desired_overlap = K.variable(numpy.asarray([[0, 1/5, 2/5]]))
# Desired_overlap gets fed into the inner NN.
neural_network_feed_forward = apply_feed_forward(desired_overlap, layer_nn,
activations.get(match_layer.hidden_layer_activation))
# Apply the final activation function.
desired_result = activations.get(match_layer.final_activation)(K.dot(neural_network_feed_forward,
match_layer.score_layer))
result = model.predict([self.tuple1, self.tuple2])
assert_array_almost_equal(result, K.eval(desired_result))
def test_returns_masks_correctly(self):
# Test when one tuple is all padding.
# Here, since tuple2 is all padding, we want to return a mask value of 0 for this pair
# tuple1 shape: (batch size, num_slots, num_words, embed_dimension)
tuple1 = K.variable(self.tuple1)
mask1 = K.variable(numpy.ones((1, self.num_slots, self.num_words)))
tuple2 = K.variable(self.tuple2)
mask2 = K.variable(numpy.zeros((1, self.num_slots, self.num_words)))
calculated_mask_exclude = K.eval(
ThresholdTupleMatcher({"type": "cosine_similarity"},
self.num_hidden_layers,
self.hidden_layer_width,
hidden_layer_activation=self.hidden_layer_activation)
.compute_mask([tuple1, tuple2], [mask1, mask2]))
assert_array_almost_equal(calculated_mask_exclude, numpy.array([[0]], dtype='int32'))
assert calculated_mask_exclude.shape == (1, 1,)
# Test when tuple2 is valid.
# Here, since tuple2 is valid, we want to return a mask value of 1 for this pair
new_mask = numpy.ones((1, self.num_slots, self.num_words))
new_mask[:, :, 1] = 0
mask2 = K.variable(new_mask)
calculated_mask_include = K.eval(
ThresholdTupleMatcher({"type": "cosine_similarity"},
self.num_hidden_layers,
self.hidden_layer_width,
hidden_layer_activation=self.hidden_layer_activation)
.compute_mask([tuple1, tuple2], [mask1, mask2]))
assert_array_almost_equal(calculated_mask_include, numpy.array([[1]], dtype='int32'))
assert calculated_mask_include.shape == (1, 1,)
def test_handles_input_masks_correctly(self):
num_slots = 3
num_words = 5
embed_dimension = 4
tuple1_word_input = Input(shape=(num_slots, num_words), dtype='int32', name="input_tuple1")
tuple2_word_input = Input(shape=(num_slots, num_words), dtype='int32', name="input_tuple2")
embedding = Embedding(10, embed_dimension, mask_zero=True)
embedded_masked_tuple1 = embedding(tuple1_word_input)
embedded_masked_tuple2 = embedding(tuple2_word_input)
match_layer = ThresholdTupleMatcher({"type": "cosine_similarity"},
self.num_hidden_layers,
self.hidden_layer_width,
initialization=Constant(.999),
hidden_layer_activation=self.hidden_layer_activation)
output = match_layer([embedded_masked_tuple1, embedded_masked_tuple2])
mask_model = Model([tuple1_word_input, tuple2_word_input], output)
# Assign tuple1 to be all 4's and tuple2 to be all 3's so we can control lexical overlap
tuple1_words = numpy.ones((1, num_slots, num_words)) * 4
tuple2_words = numpy.ones((1, num_slots, num_words)) * 3
# Add a set of matching zeros to slot 1 in each tuple1 -- but shouldn't "match" because it's padding
tuple1_words[:, 1, :] = numpy.zeros(num_words)
tuple2_words[:, 1, :] = numpy.zeros(num_words)
# Get the initial weights for use in testing
layer_nn = match_layer.hidden_layer_weights
# Testing general unmasked case.
desired_overlap = K.variable(numpy.asarray([[0, 0, 0]]))
# Desired_overlap gets fed into the inner NN.
neural_network_feed_forward = apply_feed_forward(desired_overlap, layer_nn,
activations.get(match_layer.hidden_layer_activation))
# Apply the final activation function.
desired_result = activations.get(match_layer.final_activation)(K.dot(neural_network_feed_forward,
match_layer.score_layer))
result = mask_model.predict([tuple1_words, tuple2_words])
assert_array_almost_equal(result, K.eval(desired_result))
|
{
"content_hash": "5985d6736b10c832ccf048b1cc002720",
"timestamp": "",
"source": "github",
"line_count": 137,
"max_line_length": 112,
"avg_line_length": 54.956204379562045,
"alnum_prop": 0.5913135874618143,
"repo_name": "matt-gardner/deep_qa",
"id": "661742192e350a431194cc717c01e6982736cbd0",
"size": "7559",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/layers/tuple_matchers/threshold_tuple_matcher_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1236066"
},
{
"name": "Shell",
"bytes": "5494"
}
],
"symlink_target": ""
}
|
"""
Test cases relating to listVirtualMachine() relating to parameters - id,listall,isrecursive,account and domainid
"""
#Import Local Modules
import marvin
from marvin.cloudstackTestCase import *
from marvin.cloudstackAPI import *
from marvin.lib.utils import *
from marvin.lib.base import *
from marvin.lib.common import *
from nose.plugins.attrib import attr
#Import System modules
import time
_multiprocess_shared_ = True
class TestVMList(cloudstackTestCase):
@classmethod
def setUpClass(cls):
"""
Create the following domain tree and accounts that are reqiured for executing listVirtualMachine test cases:
Under ROOT - create 2 domaind D1 and D2
Under D1 - Create 2 subdomain D11 and D12
Under D11 - Create subdimain D111
Under each of the domain create 1 admin user and couple of regular users.
As each of these users , deploy Virtual machines.
"""
cls.testclient = super(TestVMList, cls).getClsTestClient()
cls.apiclient = cls.testclient.getApiClient()
cls.testdata = cls.testClient.getParsedTestDataConfig()
cls.acldata = cls.testdata["acl"]
cls.domain_1 = None
cls.domain_2 = None
cls.cleanup = []
try:
# backup default apikey and secretkey
cls.default_apikey = cls.apiclient.connection.apiKey
cls.default_secretkey = cls.apiclient.connection.securityKey
# Create domains
cls.domain_1 = Domain.create(
cls.apiclient,
cls.acldata["domain1"]
)
cls.domain_11 = Domain.create(
cls.apiclient,
cls.acldata["domain11"],
parentdomainid=cls.domain_1.id
)
cls.domain_111 = Domain.create(
cls.apiclient,
cls.acldata["domain111"],
parentdomainid=cls.domain_11.id,
)
cls.domain_12 = Domain.create(
cls.apiclient,
cls.acldata["domain12"],
parentdomainid=cls.domain_1.id
)
cls.domain_2 = Domain.create(
cls.apiclient,
cls.acldata["domain2"]
)
# Create 1 admin account and 2 user accounts for doamin_1
cls.account_d1 = Account.create(
cls.apiclient,
cls.acldata["accountD1"],
admin=True,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1)
cls.user_d1_apikey = user.apikey
cls.user_d1_secretkey = user.secretkey
cls.account_d1a = Account.create(
cls.apiclient,
cls.acldata["accountD1A"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1a)
cls.user_d1a_apikey = user.apikey
cls.user_d1a_secretkey = user.secretkey
cls.account_d1b = Account.create(
cls.apiclient,
cls.acldata["accountD1B"],
admin=False,
domainid=cls.domain_1.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d1b)
cls.user_d1b_apikey = user.apikey
cls.user_d1b_secretkey = user.secretkey
# Create 1 admin and 2 user accounts for doamin_11
cls.account_d11 = Account.create(
cls.apiclient,
cls.acldata["accountD11"],
admin=True,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11)
cls.user_d11_apikey = user.apikey
cls.user_d11_secretkey = user.secretkey
cls.account_d11a = Account.create(
cls.apiclient,
cls.acldata["accountD11A"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11a)
cls.user_d11a_apikey = user.apikey
cls.user_d11a_secretkey = user.secretkey
cls.account_d11b = Account.create(
cls.apiclient,
cls.acldata["accountD11B"],
admin=False,
domainid=cls.domain_11.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d11b)
cls.user_d11b_apikey = user.apikey
cls.user_d11b_secretkey = user.secretkey
# Create 1 user account for doamin_111
cls.account_d111a = Account.create(
cls.apiclient,
cls.acldata["accountD111A"],
admin=False,
domainid=cls.domain_111.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d111a)
cls.user_d111a_apikey = user.apikey
cls.user_d111a_secretkey = user.secretkey
# Create 2 user accounts for doamin_12
cls.account_d12a = Account.create(
cls.apiclient,
cls.acldata["accountD12A"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d12a)
cls.user_d12a_apikey = user.apikey
cls.user_d12a_secretkey = user.secretkey
cls.account_d12b = Account.create(
cls.apiclient,
cls.acldata["accountD12B"],
admin=False,
domainid=cls.domain_12.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d12b)
cls.user_d12b_apikey = user.apikey
cls.user_d12b_secretkey = user.secretkey
# Create 1 user account for domain_2
cls.account_d2a = Account.create(
cls.apiclient,
cls.acldata["accountD2"],
admin=False,
domainid=cls.domain_2.id
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_d2a)
cls.user_d2a_apikey = user.apikey
cls.user_d2a_secretkey = user.secretkey
# Create admin user account
cls.account_a = Account.create(
cls.apiclient,
cls.acldata["accountROOTA"],
admin=True,
)
user = cls.generateKeysForUser(cls.apiclient,cls.account_a)
cls.user_a_apikey = user.apikey
cls.user_a_secretkey = user.secretkey
# create service offering
cls.service_offering = ServiceOffering.create(
cls.apiclient,
cls.acldata["service_offering"]["small"]
)
cls.zone = get_zone(cls.apiclient,cls.testclient.getZoneForTests())
cls.acldata['mode'] = cls.zone.networktype
cls.template = get_template(cls.apiclient, cls.zone.id, cls.acldata["ostype"])
# deploy VM
cls.apiclient.connection.apiKey = cls.user_d1_apikey
cls.apiclient.connection.securityKey = cls.user_d1_secretkey
cls.vm_d1 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d1a_apikey
cls.apiclient.connection.securityKey = cls.user_d1a_secretkey
cls.vm_d1a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d1b_apikey
cls.apiclient.connection.securityKey = cls.user_d1b_secretkey
cls.vm_d1b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD1B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d11_apikey
cls.apiclient.connection.securityKey = cls.user_d11_secretkey
cls.vm_d11 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d11a_apikey
cls.apiclient.connection.securityKey = cls.user_d11a_secretkey
cls.vm_d11a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d11b_apikey
cls.apiclient.connection.securityKey = cls.user_d11b_secretkey
cls.vm_d11b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD11B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d111a_apikey
cls.apiclient.connection.securityKey = cls.user_d111a_secretkey
cls.vm_d111a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD111A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d12a_apikey
cls.apiclient.connection.securityKey = cls.user_d12a_secretkey
cls.vm_d12a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d12b_apikey
cls.apiclient.connection.securityKey = cls.user_d12b_secretkey
cls.vm_d12b = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD12B"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_d2a_apikey
cls.apiclient.connection.securityKey = cls.user_d2a_secretkey
cls.vm_d2 = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmD2A"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.apiclient.connection.apiKey = cls.user_a_apikey
cls.apiclient.connection.securityKey = cls.user_a_secretkey
cls.vm_a = VirtualMachine.create(
cls.apiclient,
cls.acldata["vmROOTA"],
zoneid=cls.zone.id,
serviceofferingid=cls.service_offering.id,
templateid=cls.template.id
)
cls.cleanup = [
cls.account_a,
cls.service_offering,
]
except Exception as e:
cls.domain_2.delete(cls.apiclient,cleanup="true")
cls.domain_1.delete(cls.apiclient,cleanup="true")
cleanup_resources(cls.apiclient, cls.cleanup)
raise Exception("Failed to create the setup required to execute the test cases: %s" % e)
@classmethod
def tearDownClass(cls):
cls.apiclient = super(TestVMList, cls).getClsTestClient().getApiClient()
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
try:
cls.domain_2.delete(cls.apiclient,cleanup="true")
cls.domain_1.delete(cls.apiclient,cleanup="true")
except: pass
cleanup_resources(cls.apiclient, cls.cleanup)
def setUp(cls):
cls.apiclient = cls.testClient.getApiClient()
cls.dbclient = cls.testClient.getDbConnection()
def tearDown(cls):
# restore back default apikey and secretkey
cls.apiclient.connection.apiKey = cls.default_apikey
cls.apiclient.connection.securityKey = cls.default_secretkey
return
## Domain Admin - Test cases with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_true(self):
"""
# Test listing of Vms by passing listall="true" parameter as domain admin
# Validate that it returns all the Vms that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true")
self.debug ("List as Domain Admin - listall=true - %s" % vmList)
self.assertEqual(len(vmList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_true_rec_true(self):
"""
# Test listing of Vms by passing listall="true"i and isrecusriv="true" parameter as domain admin
# Validate that it returns all the Vms that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="true")
self.debug ("List as Domain Admin - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_true_rec_false(self):
"""
# Test listing of Vms by passing listall="true" and isrecusriv="false" parameter as domain admin
# Validate that it returns all the Vms that is owned by accounts in this domain and all its subdomain
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="false")
self.debug ("List as Domain Admin - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 9,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_false(self):
"""
# Test listing of Vms by passing listall="false" parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false")
self.debug ("List as Domain Admin - listall=false - %s" % vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_false_rec_true(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="true" parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="true")
self.debug ("List as Domain Admin - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_listall_false_rec_false(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="false" parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="false")
self.debug ("List as Domain Admin - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin(self):
"""
# Test listing of Vms by passing no parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient)
self.debug ("List as Domain Admin - %s" % vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_rec_true(self):
"""
# Test listing of Vms by passing isrecusrive="true" parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="true")
self.debug ("List as Domain Admin - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_true_rec_false(self):
"""
# Test listing of Vms by passing isrecusrive="false" parameter as domain admin
# Validate that it returns all the Vms that is owned by the domain admin
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="false")
self.debug ("List as Domain Admin - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_true(self):
"""
# Test listing of Vms by passing domainId and listall="true" parameter as domain admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true")
self.debug ("List as Domain Admin passing domainId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing domainId ,listall="true" and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true",isrecursive="true")
self.debug ("List as Domain Admin passing domainId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing domainId ,listall="true" and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true",isrecursive="false")
self.debug ("List as Domain Admin passing domainId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_false(self):
"""
# Test listing of Vms by passing domainId ,listall="false" parameter as domain admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false")
self.debug ("List as Domain Admin passing domainId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing domainId ,listall="false" and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false",isrecursive="true")
self.debug ("List as Domain Admin passing domainId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing domainId ,listall="false" and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false",isrecursive="false")
self.debug ("List as Domain Admin passing domainId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid(self):
"""
# Test listing of Vms by passing domainId parameter as domain admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id)
self.debug ("List as Domain Admin passing domainId - %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_rec_true(self):
"""
# Test listing of Vms by passing domainId and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms in the subdomain and domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,isrecursive="true")
self.debug ("List as Domain Admin passing domainId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_rec_false(self):
"""
# Test listing of Vms by passing domainId and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms in the subdomain and domain passed
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,isrecursive="false")
self.debug ("List as Domain Admin passing domainId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_true(self):
"""
# Test listing of Vms by passing account ,domainId and listall="true" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true")
self.debug ("List as Domain Admin passing domainId and accountId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing account ,domainId and listall="true" and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true",isrecursive="true")
self.debug ("List as Domain Admin passing domainId and accountId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing account ,domainId , listall="true" and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true",isrecursive="false")
self.debug ("List as Domain Admin passing domainId and accountId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_false(self):
"""
# Test listing of Vms by passing account ,domainId and listall="false" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false")
self.debug ("List as Domain Admin passing domainId and accountId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing account ,domainId and listall="false" and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false",isrecursive="true")
self.debug ("List as Domain Admin passing domainId and accountId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing account ,domainId , listall="false" and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false",isrecursive="false")
self.debug ("List as Domain Admin passing domainId and accountId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Domain Admin - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid(self):
"""
# Test listing of Vms by passing account ,domainId parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id)
self.debug ("List as Domain Admin passing domainId and accountId - %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_rec_true(self):
"""
# Test listing of Vms by passing account ,domainId and isrecursive="true" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,isrecursive="true")
self.debug ("List as Domain Admin passing domainId and accountId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_domainid_accountid_rec_false(self):
"""
# Test listing of Vms by passing account ,domainId and isrecursive="false" parameter as domain admin
# Validate that it returns all the Vms owned by the account passed in account parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,isrecursive="false")
self.debug ("List as Domain Admin passing domainId and accountId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_true(self):
"""
# Test listing of Vms by passing listall="true" parameter as admin
# Validate that it returns all the Vms
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true")
self.debug ("List as ROOT Admin - listall=true %s" %vmList)
self.assertEqual(len(vmList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d2.id) and
self.checkForExistenceOfValue(vmList,self.vm_a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_true_rec_true(self):
"""
# Test listing of Vms by passing listall="true" and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="true")
self.debug ("List as ROOT Admin - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d2.id) and
self.checkForExistenceOfValue(vmList,self.vm_a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_true_rec_false(self):
"""
# Test listing of Vms by passing listall="true" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="false")
self.debug ("List as ROOT Admin - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) >= 11,
True,
"Number of items in list response check failed!!")
if (self.checkForExistenceOfValue(vmList,self.vm_d1.id),
self.checkForExistenceOfValue(vmList,self.vm_d1a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d1b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d12b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d2.id) and
self.checkForExistenceOfValue(vmList,self.vm_a.id)):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_false(self):
"""
# Test listing of Vms by passing listall="false" parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false")
self.debug ("List as ROOT Admin - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_false_rec_true(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="true")
self.debug ("List as ROOT Admin - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_listall_false_rec_false(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="false")
self.debug ("List as ROOT Admin - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin(self):
"""
# Test listing of Vms by passing no parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient)
self.debug ("List as ROOT Admin %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_rec_true(self):
"""
# Test listing of Vms by passing isrecusrive="true" parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="true")
self.debug ("List as ROOT Admin - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_rec_false(self):
"""
# Test listing of Vms by passing isrecusrive="false" parameter as admin
# Validate that it returns all the Vms owned by admin
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="false")
self.debug ("List as ROOT Admin passing domainId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_true(self):
"""
# Test listing of Vms by passing domainid and listall="true" parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true")
self.debug ("List as ROOT Admin passing domainId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing domainid , listall="true" and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms in the subdomain and the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true",isrecursive="true")
self.debug ("List as ROOT Admin passing domainId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing domainid, listall="true" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="true",isrecursive="false")
self.debug ("List as ROOT Admin passing domainId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_false(self):
"""
# Test listing of Vms by passing domainid, listall="false" parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false")
self.debug ("List as ROOT Admin passing domainId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing domainid, listall="false" and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms in the subdoamin and domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false",isrecursive="true")
self.debug ("List as ROOT Admin passing domainId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing domainid, listall="false" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,listall="false",isrecursive="false")
self.debug ("List as ROOT Admin passing domainId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid(self):
"""
# Test listing of Vms by passing domainid parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id)
self.debug ("List as ROOT Admin passing domainId - %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_rec_true(self):
"""
# Test listing of Vms by passing domainid and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms in the subdmain and domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,isrecursive="true")
self.debug ("List as ROOT Admin passing domainId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 4,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) and
self.checkForExistenceOfValue(vmList,self.vm_d111a.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_rec_false(self):
"""
# Test listing of Vms by passing domainid and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms in the domain passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_11.id,isrecursive="false")
self.debug ("List as ROOT Admin passing domainId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 3,
True,
"Number of items in list response check failed!!")
if ( self.checkForExistenceOfValue(vmList,self.vm_d11.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11a.id) and
self.checkForExistenceOfValue(vmList,self.vm_d11b.id) ):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_true(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "true" parameter as admin
# Validate that it returns all the Vms of account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "true" and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true",isrecursive="true")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "true" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="true",isrecursive="false")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_false(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "false" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "false" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false",isrecursive="true")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing domainid,account ,listall = "false" and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,listall="false",isrecursive="false")
self.debug ("List as ROOT Admin passing domainId and accountId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## ROOT Admin - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid(self):
"""
# Test listing of Vms by passing domainid,account parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id)
self.debug ("List as ROOT Admin passing domainId and accountId - %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_rec_true(self):
"""
# Test listing of Vms by passing domainid,account and isrecusrive="true" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,isrecursive="true")
self.debug ("List as ROOT Admin passing domainId and accountId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_rootadmin_domainid_accountid_rec_false(self):
"""
# Test listing of Vms by passing domainid,account and isrecusrive="false" parameter as admin
# Validate that it returns all the Vms of the account that is passed
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d11.user[0].username,domainid=self.domain_11.id,isrecursive="false")
self.debug ("List as ROOT Admin passing domainId and accountId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d11.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_true(self):
"""
# Test listing of Vms by passing listall="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true")
self.debug ("List as Regular User - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_true_rec_true(self):
"""
# Test listing of Vms by passing listall="true" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="true")
self.debug ("List as Regular User - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_true_rec_false(self):
"""
# Test listing of Vms by passing listall="true" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="true",isrecursive="false")
self.debug ("List as Regular User - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_false(self):
"""
# Test listing of Vms by passing domainid,account,listall="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false")
self.debug ("List as Regular User - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_false_rec_true(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="true")
self.debug ("List as Regular User - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_listall_false_rec_false(self):
"""
# Test listing of Vms by passing listall="false" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,listall="false",isrecursive="false")
self.debug ("List as Regular User - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases without passing listall paramter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser(self):
"""
# Test listing of Vms by passing no parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient)
self.debug ("List as Regular User %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_rec_true(self):
"""
# Test listing of Vms by passing isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="true")
self.debug ("List as Regular User - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_rec_false(self):
"""
# Test listing of Vms by passing isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,isrecursive="false")
self.debug ("List as Regular User passing domainId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_true(self):
"""
# Test listing of Vms by passing domainid,listall="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="true")
self.debug ("List as Regular User passing domainId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing domainid,listall="true" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="true",isrecursive="true")
self.debug ("List as Regular User passing domainId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing domainid,listall="true" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="true",isrecursive="false")
self.debug ("List as Regular User passing domainId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_false(self):
"""
# Test listing of Vms by passing domainid,listall="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="false")
self.debug ("List as Regular User passing domainId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing domainid,listall="false" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="false",isrecursive="true")
self.debug ("List as Regular User passing domainId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing domainid,listall="false" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,listall="false",isrecursive="false")
self.debug ("List as Regular User passing domainId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when domainId is passed with no listall parameter
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid(self):
"""
# Test listing of Vms by passing domainid parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id)
self.debug ("List as Regular User passing domainId %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_true_rec_true(self):
"""
# Test listing of Vms by passing domainid and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,isrecursive="true")
self.debug ("List as Regular User passing domainId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid__rec_false(self):
"""
# Test listing of Vms by passing domainid,isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_1.id,isrecursive="false")
self.debug ("List as Regular User passing domainId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall =true
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_true(self):
"""
# Test listing of Vms by passing domainid,account,listall="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="true")
self.debug ("List as Regular User passing domainId and accountId - listall=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_true_rec_true(self):
"""
# Test listing of Vms by passing domainid,account,listall="true" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="true",isrecursive="true")
self.debug ("List as Regular User passing domainId and accountId - listall=true,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_true_rec_false(self):
"""
# Test listing of Vms by passing domainid,account,listall="true" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="true",isrecursive="false")
self.debug ("List as Regular User passing domainId and accountId - listall=true,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall=false
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_false(self):
"""
# Test listing of Vms by passing domainid,account,listall="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="false")
self.debug ("List as Regular User passing domainId and accountId - listall=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_false_rec_true(self):
"""
# Test listing of Vms by passing domainid,account,listall="false" and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="false",isrecursive="true")
self.debug ("List as Regular User passing domainId and accountId - listall=false,isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_listall_false_rec_false(self):
"""
# Test listing of Vms by passing domainid,account,listall="false" and isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,listall="false",isrecursive="false")
self.debug ("List as Regular User passing domainId and accountId - listall=false,isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Regular User - Test cases when account and domainId is passed with listall not passed
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid(self):
"""
# Test listing of Vms by passing domainid,account parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id)
self.debug ("List as Regular User passing domainId and accountId %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_rec_true(self):
"""
# Test listing of Vms by passing domainid,account and isrecusrive="true" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,isrecursive="true")
self.debug ("List as Regular User passing domainId and accountId - isrecursive=true %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_domainid_accountid_rec_false(self):
"""
# Test listing of Vms by passing domainid,account isrecusrive="false" parameter as regular user
# Validate that it returns all the Vms of the account the user belongs to
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1a.user[0].username,domainid=self.domain_1.id,isrecursive="false")
self.debug ("List as Regular User passing domainId and accountId - isrecursive=false %s" %vmList)
self.assertEqual(len(vmList) == 1,
True,
"Number of items in list response check failed!!")
if self.checkForExistenceOfValue(vmList,self.vm_d1a.id):
accountAccess = True
else:
accountAccess = False
self.assertEqual(accountAccess,
True,
"Account access check failed!!")
## Cross Domain access check
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_cross_domainid_accountid(self):
"""
# Regular User should not be allowed to list Vms of other accounts in the same domain
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
vmList = VirtualMachine.list(self.apiclient,account=self.account_d1b.user[0].username,domainid=self.domain_1.id)
self.fail("Regular User is able to use another account with in the same domain in listVirtualMachine call")
except Exception as e:
self.debug ("List as Regular User passing domainId and accountId of another account %s" %e)
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_regularuser_cross_domainid(self):
"""
# Regular User should not be allowed to list Vms of other accounts in other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_2.id)
self.fail("Regular User is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug ("List as Regular User passing domainId of a domain that user does not belong to %s" %e)
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_cross_domainid_accountid(self):
"""
# Domain admin should not be allowed to list Vms of accounts in other domains
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
try:
vmList = VirtualMachine.list(self.apiclient,account=self.account_d2a.user[0].username,domainid=self.domain_2.id)
self.fail("Domain admin user is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug ("List as domain admin passing domainId and accountId of another account %s" %e)
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_as_domainadmin_cross_domainid(self):
"""
# Domain admin should not be allowed to list Vms from other domains
"""
self.apiclient.connection.apiKey = self.user_d1a_apikey
self.apiclient.connection.securityKey = self.user_d1a_secretkey
try:
vmList = VirtualMachine.list(self.apiclient,domainid=self.domain_2.id)
self.fail("Domain admin User is able to use another domain in listVirtualMachine call")
except Exception as e:
self.debug ("List as domain admin passing domainId of a domain that user does not belong to %s" %e)
## List test cases relating to filter - id
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_domainadmin_owns(self):
"""
# Domain admin should be able to list Vm that he owns by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
VMList = VirtualMachine.list(self.apiclient,id=self.vm_d1.id)
self.assertNotEqual(VMList,
None,
"Domain Admin is not able to list Vms that he owns")
self.assertEqual(len(VMList),
1,
"Domain Admin is not able to list Vms that belongs to him")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_domainadmin_ownedbyusersindomain(self):
"""
# Domain admin should be able to list Vm that is owned by any account in his domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
VMList1 = VirtualMachine.list(self.apiclient,id=self.vm_d1a.id)
self.assertNotEqual(VMList1,
None,
"Domain Admin is not able to list Vms from his domain")
self.assertEqual(len(VMList1),
1,
"Domain Admin is not able to list Vms from his domain")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain(self):
"""
# Domain admin should be able to list Vm that is owned by any account in his sub-domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
VMList2 = VirtualMachine.list(self.apiclient,id=self.vm_d12b.id)
self.assertNotEqual(VMList2,
None,
"Domain Admin is not able to list Vms from his sub domain")
self.assertEqual(len(VMList2),
1,
"Domain Admin is not able to list Vms from his sub domain")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_domainadmin_ownedbyusersnotindomain(self):
"""
# Domain admin should not be able to list Vm that is owned by account that is not in his domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
VMList3 = VirtualMachine.list(self.apiclient,id=self.vm_d2.id)
self.assertEqual(VMList3,
None,
"Domain Admin is able to list Vms from other domains!!!")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_domainadmin_ownedbyusersinsubdomain2(self):
"""
# Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d1_apikey
self.apiclient.connection.securityKey = self.user_d1_secretkey
VMList4 = VirtualMachine.list(self.apiclient,id=self.vm_d111a.id)
self.assertNotEqual(VMList4,
None,
"Domain Admin is not able to list Vms from his subdomain")
self.assertEqual(len(VMList4),
1,
"Domain Admin is not able to list Vms from his sub domains")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_rootadmin_owns(self):
"""
# Domain admin should be able to list Vm that is owned by account that is in his sub domains by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_a_apikey
self.apiclient.connection.securityKey = self.user_a_secretkey
VMList1 = VirtualMachine.list(self.apiclient,id=self.vm_a.id)
self.assertNotEqual(VMList1,
None,
"ROOT Admin not able to list Vms that he owns")
self.assertEqual(len(VMList1),
1,
"ROOT Admin not able to list Vms that he owns")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_rootadmin_Vmsownedbyothers(self):
"""
# ROOT admin should be able to list Vm that is owned by any account iby passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.default_apikey
self.apiclient.connection.securityKey = self.default_secretkey
VMList1 = VirtualMachine.list(self.apiclient,id=self.vm_d2.id)
VMList2 = VirtualMachine.list(self.apiclient,id=self.vm_d11a.id)
self.assertNotEqual(VMList1,
None,
"ROOT Admin not able to list Vms from other domains")
self.assertNotEqual(VMList2,
None,
"ROOT Admin not able to list Vms from other domains")
self.assertEqual(len(VMList1),
1,
"ROOT Admin not able to list Vms from other domains")
self.assertEqual(len(VMList2),
1,
"ROOT Admin not able to list Vms from other domains")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_user_own(self):
"""
# Regular user should be able to list Vm that is owned by him by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
VMList1 = VirtualMachine.list(self.apiclient,id=self.vm_d11a.id)
self.assertNotEqual(VMList1,
None,
"Regular User is not able to list Vms that he owns")
self.assertEqual(len(VMList1),
1,
"Regular User is not able to list Vms that he owns")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_user_vmfromsamedomaindifferentaccount(self):
"""
# Regular user should not be able to list Vm that is owned by different account in the same domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
VMList2 = VirtualMachine.list(self.apiclient,id=self.vm_d11b.id)
self.assertEqual(VMList2,
None,
"Regular User is able to list Vms from other accounts")
@attr("simulator_only", tags=["advanced"],required_hardware="false")
def test_listVM_by_id_as_user_vmsfromotherdomain(self):
"""
# Regular user should not be able to list Vm that is owned by different account in the different domain by passing uuid in "id" parameter
"""
self.apiclient.connection.apiKey = self.user_d11a_apikey
self.apiclient.connection.securityKey = self.user_d11a_secretkey
VMList3 = VirtualMachine.list(self.apiclient,id=self.vm_d2.id)
self.assertEqual(VMList3,
None,
"Regular User is able to list Vms from other domains")
@staticmethod
def generateKeysForUser(apiclient,account):
user = User.list(
apiclient,
account=account.name,
domainid=account.domainid
)[0]
return (User.registerUserKeys(
apiclient,
user.id
))
@staticmethod
def checkForExistenceOfValue(list,attributeValue):
if list is None:
return False
rowCount=len(list)
for num in range (rowCount):
if list[num].id == attributeValue:
return True
return False
|
{
"content_hash": "22ffbe3802b3cabab3df60acc2a905b4",
"timestamp": "",
"source": "github",
"line_count": 2815,
"max_line_length": 150,
"avg_line_length": 39.07246891651865,
"alnum_prop": 0.6887052341597796,
"repo_name": "DaanHoogland/cloudstack",
"id": "5ed937f2d4aa37394f95687ee81f97964912101e",
"size": "110776",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "test/integration/component/test_acl_listvm.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "9979"
},
{
"name": "C#",
"bytes": "2356211"
},
{
"name": "CSS",
"bytes": "343148"
},
{
"name": "Dockerfile",
"bytes": "2375"
},
{
"name": "FreeMarker",
"bytes": "4887"
},
{
"name": "Groovy",
"bytes": "146420"
},
{
"name": "HTML",
"bytes": "153560"
},
{
"name": "Java",
"bytes": "36818077"
},
{
"name": "JavaScript",
"bytes": "8264908"
},
{
"name": "Python",
"bytes": "12533840"
},
{
"name": "Ruby",
"bytes": "22732"
},
{
"name": "SCSS",
"bytes": "362625"
},
{
"name": "Shell",
"bytes": "708848"
},
{
"name": "XSLT",
"bytes": "57835"
}
],
"symlink_target": ""
}
|
import six
from pytest import mark, raises
from sqlalchemy_utils import Currency, i18n
@mark.skipif('i18n.babel is None')
class TestCurrency(object):
def setup_method(self, method):
i18n.get_locale = lambda: i18n.babel.Locale('en')
def test_init(self):
assert Currency('USD') == Currency(Currency('USD'))
def test_hashability(self):
assert len(set([Currency('USD'), Currency('USD')])) == 1
def test_invalid_currency_code(self):
with raises(ValueError):
Currency('Unknown code')
def test_invalid_currency_code_type(self):
with raises(TypeError):
Currency(None)
@mark.parametrize(
('code', 'name'),
(
('USD', 'US Dollar'),
('EUR', 'Euro')
)
)
def test_name_property(self, code, name):
assert Currency(code).name == name
@mark.parametrize(
('code', 'symbol'),
(
('USD', u'$'),
('EUR', u'€')
)
)
def test_symbol_property(self, code, symbol):
assert Currency(code).symbol == symbol
def test_equality_operator(self):
assert Currency('USD') == 'USD'
assert 'USD' == Currency('USD')
assert Currency('USD') == Currency('USD')
def test_non_equality_operator(self):
assert Currency('USD') != 'EUR'
assert not (Currency('USD') != 'USD')
def test_unicode(self):
currency = Currency('USD')
assert six.text_type(currency) == u'USD'
def test_str(self):
currency = Currency('USD')
assert str(currency) == 'USD'
def test_representation(self):
currency = Currency('USD')
assert repr(currency) == "Currency('USD')"
|
{
"content_hash": "a44a27b86a7fd58d00fc0e03169c4cec",
"timestamp": "",
"source": "github",
"line_count": 65,
"max_line_length": 64,
"avg_line_length": 26.723076923076924,
"alnum_prop": 0.5630397236614854,
"repo_name": "rmoorman/sqlalchemy-utils",
"id": "a6c4876ee91d95ba6dc7843f8ef8360a372a4a14",
"size": "1763",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/primitives/test_currency.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "492041"
}
],
"symlink_target": ""
}
|
import re
from sqlalchemy import __version__
from sqlalchemy.schema import ForeignKeyConstraint, CheckConstraint, Column
from sqlalchemy import types as sqltypes
from sqlalchemy import schema, sql
from sqlalchemy.sql.visitors import traverse
from sqlalchemy.ext.compiler import compiles
from sqlalchemy.sql.expression import _BindParamClause
from . import compat
def _safe_int(value):
try:
return int(value)
except:
return value
_vers = tuple(
[_safe_int(x) for x in re.findall(r'(\d+|[abc]\d)', __version__)])
sqla_07 = _vers > (0, 7, 2)
sqla_079 = _vers >= (0, 7, 9)
sqla_08 = _vers >= (0, 8, 0)
sqla_083 = _vers >= (0, 8, 3)
sqla_084 = _vers >= (0, 8, 4)
sqla_09 = _vers >= (0, 9, 0)
sqla_092 = _vers >= (0, 9, 2)
sqla_094 = _vers >= (0, 9, 4)
sqla_094 = _vers >= (0, 9, 4)
sqla_099 = _vers >= (0, 9, 9)
sqla_100 = _vers >= (1, 0, 0)
sqla_105 = _vers >= (1, 0, 5)
if sqla_08:
from sqlalchemy.sql.expression import TextClause
else:
from sqlalchemy.sql.expression import _TextClause as TextClause
def _table_for_constraint(constraint):
if isinstance(constraint, ForeignKeyConstraint):
return constraint.parent
else:
return constraint.table
def _columns_for_constraint(constraint):
if isinstance(constraint, ForeignKeyConstraint):
return [fk.parent for fk in constraint.elements]
elif isinstance(constraint, CheckConstraint):
return _find_columns(constraint.sqltext)
else:
return list(constraint.columns)
def _fk_spec(constraint):
if sqla_100:
source_columns = [
constraint.columns[key].name for key in constraint.column_keys]
else:
source_columns = [
element.parent.name for element in constraint.elements]
source_table = constraint.parent.name
source_schema = constraint.parent.schema
target_schema = constraint.elements[0].column.table.schema
target_table = constraint.elements[0].column.table.name
target_columns = [element.column.name for element in constraint.elements]
ondelete = constraint.ondelete
onupdate = constraint.onupdate
deferrable = constraint.deferrable
initially = constraint.initially
return (
source_schema, source_table,
source_columns, target_schema, target_table, target_columns,
onupdate, ondelete, deferrable, initially)
def _fk_is_self_referential(constraint):
spec = constraint.elements[0]._get_colspec()
tokens = spec.split(".")
tokens.pop(-1) # colname
tablekey = ".".join(tokens)
return tablekey == constraint.parent.key
def _is_type_bound(constraint):
# this deals with SQLAlchemy #3260, don't copy CHECK constraints
# that will be generated by the type.
if sqla_100:
# new feature added for #3260
return constraint._type_bound
else:
# old way, look at what we know Boolean/Enum to use
return (
constraint._create_rule is not None and
isinstance(
getattr(constraint._create_rule, "target", None),
sqltypes.SchemaType)
)
def _find_columns(clause):
"""locate Column objects within the given expression."""
cols = set()
traverse(clause, {}, {'column': cols.add})
return cols
def _textual_index_column(table, text_):
"""a workaround for the Index construct's severe lack of flexibility"""
if isinstance(text_, compat.string_types):
c = Column(text_, sqltypes.NULLTYPE)
table.append_column(c)
return c
elif isinstance(text_, TextClause):
return _textual_index_element(table, text_)
else:
raise ValueError("String or text() construct expected")
class _textual_index_element(sql.ColumnElement):
"""Wrap around a sqlalchemy text() construct in such a way that
we appear like a column-oriented SQL expression to an Index
construct.
The issue here is that currently the Postgresql dialect, the biggest
recipient of functional indexes, keys all the index expressions to
the corresponding column expressions when rendering CREATE INDEX,
so the Index we create here needs to have a .columns collection that
is the same length as the .expressions collection. Ultimately
SQLAlchemy should support text() expressions in indexes.
See https://bitbucket.org/zzzeek/sqlalchemy/issue/3174/\
support-text-sent-to-indexes
"""
__visit_name__ = '_textual_idx_element'
def __init__(self, table, text):
self.table = table
self.text = text
self.key = text.text
self.fake_column = schema.Column(self.text.text, sqltypes.NULLTYPE)
table.append_column(self.fake_column)
def get_children(self):
return [self.fake_column]
@compiles(_textual_index_element)
def _render_textual_index_column(element, compiler, **kw):
return compiler.process(element.text, **kw)
class _literal_bindparam(_BindParamClause):
pass
@compiles(_literal_bindparam)
def _render_literal_bindparam(element, compiler, **kw):
return compiler.render_literal_bindparam(element, **kw)
def _get_index_expressions(idx):
if sqla_08:
return list(idx.expressions)
else:
return list(idx.columns)
def _get_index_column_names(idx):
return [getattr(exp, "name", None) for exp in _get_index_expressions(idx)]
|
{
"content_hash": "e40a8cf4baec6d0b7ef7c6e5832c39a6",
"timestamp": "",
"source": "github",
"line_count": 172,
"max_line_length": 78,
"avg_line_length": 31.174418604651162,
"alnum_prop": 0.6743752331219695,
"repo_name": "hexlism/css_platform",
"id": "88bfa3b2f538cb44d6abd2a2e30b76900c786d4d",
"size": "5362",
"binary": false,
"copies": "9",
"ref": "refs/heads/master",
"path": "sleepyenv/lib/python2.7/site-packages/alembic-0.8.4-py2.7.egg/alembic/util/sqla_compat.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "21598"
},
{
"name": "CSS",
"bytes": "6267"
},
{
"name": "HTML",
"bytes": "158697"
},
{
"name": "Java",
"bytes": "186035"
},
{
"name": "JavaScript",
"bytes": "10854505"
},
{
"name": "Mako",
"bytes": "9299"
},
{
"name": "Python",
"bytes": "10688288"
},
{
"name": "Shell",
"bytes": "3771"
}
],
"symlink_target": ""
}
|
from responder import responder
from view import view
def http_error(status):
"""
Create an HTTP error response.
Arguments:
status: string HTTP status including code number and description.
Returns: HTTP response
"""
error_view = view('error.htm', {'error': status})
response = responder(error_view, 'text/html', status)
return response
|
{
"content_hash": "70f39031ae75bf38244b68f35483fc10",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 73,
"avg_line_length": 22.470588235294116,
"alnum_prop": 0.675392670157068,
"repo_name": "samalba/image-spider",
"id": "071939b446e5ec614f9fb387006407bea63007b4",
"size": "407",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "interface/http_error.py",
"mode": "33188",
"license": "mit",
"language": [],
"symlink_target": ""
}
|
"""
An extension of scipy.stats.stats to support masked arrays
"""
# Original author (2007): Pierre GF Gerard-Marchant
# TODO : f_value_wilks_lambda looks botched... what are dfnum & dfden for ?
# TODO : ttest_rel looks botched: what are x1,x2,v1,v2 for ?
# TODO : reimplement ksonesamp
from __future__ import division, print_function, absolute_import
__all__ = ['argstoarray',
'betai',
'chisquare','count_tied_groups',
'describe',
'f_oneway','f_value_wilks_lambda','find_repeats','friedmanchisquare',
'kendalltau','kendalltau_seasonal','kruskal','kruskalwallis',
'ks_twosamp','ks_2samp','kurtosis','kurtosistest',
'linregress',
'mannwhitneyu', 'meppf','mode','moment','mquantiles','msign',
'normaltest',
'obrientransform',
'pearsonr','plotting_positions','pointbiserialr',
'rankdata',
'scoreatpercentile','sem',
'sen_seasonal_slopes','signaltonoise','skew','skewtest','spearmanr',
'theilslopes','threshold','tmax','tmean','tmin','trim','trimboth',
'trimtail','trima','trimr','trimmed_mean','trimmed_std',
'trimmed_stde','trimmed_var','tsem','ttest_1samp','ttest_onesamp',
'ttest_ind','ttest_rel','tvar',
'variation',
'winsorize',
'zmap', 'zscore'
]
import numpy as np
from numpy import ndarray
import numpy.ma as ma
from numpy.ma import masked, nomask
from scipy._lib.six import iteritems
import itertools
import warnings
from collections import namedtuple
from . import stats
from . import distributions
import scipy.special as special
from . import futil
genmissingvaldoc = """
Notes
-----
Missing values are considered pair-wise: if a value is missing in x,
the corresponding value in y is masked.
"""
def _chk_asarray(a, axis):
# Always returns a masked array, raveled for axis=None
a = ma.asanyarray(a)
if axis is None:
a = ma.ravel(a)
outaxis = 0
else:
outaxis = axis
return a, outaxis
def _chk2_asarray(a, b, axis):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
if axis is None:
a = ma.ravel(a)
b = ma.ravel(b)
outaxis = 0
else:
outaxis = axis
return a, b, outaxis
def _chk_size(a,b):
a = ma.asanyarray(a)
b = ma.asanyarray(b)
(na, nb) = (a.size, b.size)
if na != nb:
raise ValueError("The size of the input array should match!"
" (%s <> %s)" % (na, nb))
return (a, b, na)
def argstoarray(*args):
"""
Constructs a 2D array from a group of sequences.
Sequences are filled with missing values to match the length of the longest
sequence.
Parameters
----------
args : sequences
Group of sequences.
Returns
-------
argstoarray : MaskedArray
A ( `m` x `n` ) masked array, where `m` is the number of arguments and
`n` the length of the longest argument.
Notes
-----
`numpy.ma.row_stack` has identical behavior, but is called with a sequence
of sequences.
"""
if len(args) == 1 and not isinstance(args[0], ndarray):
output = ma.asarray(args[0])
if output.ndim != 2:
raise ValueError("The input should be 2D")
else:
n = len(args)
m = max([len(k) for k in args])
output = ma.array(np.empty((n,m), dtype=float), mask=True)
for (k,v) in enumerate(args):
output[k,:len(v)] = v
output[np.logical_not(np.isfinite(output._data))] = masked
return output
def find_repeats(arr):
"""Find repeats in arr and return a tuple (repeats, repeat_count).
Masked values are discarded.
Parameters
----------
arr : sequence
Input array. The array is flattened if it is not 1D.
Returns
-------
repeats : ndarray
Array of repeated values.
counts : ndarray
Array of counts.
"""
marr = ma.compressed(arr)
if not marr.size:
return (np.array(0), np.array(0))
(v1, v2, n) = futil.dfreps(ma.array(ma.compressed(arr), copy=True))
return (v1[:n], v2[:n])
def count_tied_groups(x, use_missing=False):
"""
Counts the number of tied values.
Parameters
----------
x : sequence
Sequence of data on which to counts the ties
use_missing : bool, optional
Whether to consider missing values as tied.
Returns
-------
count_tied_groups : dict
Returns a dictionary (nb of ties: nb of groups).
Examples
--------
>>> from scipy.stats import mstats
>>> z = [0, 0, 0, 2, 2, 2, 3, 3, 4, 5, 6]
>>> mstats.count_tied_groups(z)
{2: 1, 3: 2}
In the above example, the ties were 0 (3x), 2 (3x) and 3 (2x).
>>> z = np.ma.array([0, 0, 1, 2, 2, 2, 3, 3, 4, 5, 6])
>>> mstats.count_tied_groups(z)
{2: 2, 3: 1}
>>> z[[1,-1]] = np.ma.masked
>>> mstats.count_tied_groups(z, use_missing=True)
{2: 2, 3: 1}
"""
nmasked = ma.getmask(x).sum()
# We need the copy as find_repeats will overwrite the initial data
data = ma.compressed(x).copy()
(ties, counts) = find_repeats(data)
nties = {}
if len(ties):
nties = dict(zip(np.unique(counts), itertools.repeat(1)))
nties.update(dict(zip(*find_repeats(counts))))
if nmasked and use_missing:
try:
nties[nmasked] += 1
except KeyError:
nties[nmasked] = 1
return nties
def rankdata(data, axis=None, use_missing=False):
"""Returns the rank (also known as order statistics) of each data point
along the given axis.
If some values are tied, their rank is averaged.
If some values are masked, their rank is set to 0 if use_missing is False,
or set to the average rank of the unmasked values if use_missing is True.
Parameters
----------
data : sequence
Input data. The data is transformed to a masked array
axis : {None,int}, optional
Axis along which to perform the ranking.
If None, the array is first flattened. An exception is raised if
the axis is specified for arrays with a dimension larger than 2
use_missing : bool, optional
Whether the masked values have a rank of 0 (False) or equal to the
average rank of the unmasked values (True).
"""
def _rank1d(data, use_missing=False):
n = data.count()
rk = np.empty(data.size, dtype=float)
idx = data.argsort()
rk[idx[:n]] = np.arange(1,n+1)
if use_missing:
rk[idx[n:]] = (n+1)/2.
else:
rk[idx[n:]] = 0
repeats = find_repeats(data.copy())
for r in repeats[0]:
condition = (data == r).filled(False)
rk[condition] = rk[condition].mean()
return rk
data = ma.array(data, copy=False)
if axis is None:
if data.ndim > 1:
return _rank1d(data.ravel(), use_missing).reshape(data.shape)
else:
return _rank1d(data, use_missing)
else:
return ma.apply_along_axis(_rank1d,axis,data,use_missing).view(ndarray)
def mode(a, axis=0):
a, axis = _chk_asarray(a, axis)
def _mode1D(a):
(rep,cnt) = find_repeats(a)
if not cnt.ndim:
return (0, 0)
elif cnt.size:
return (rep[cnt.argmax()], cnt.max())
else:
not_masked_indices = ma.flatnotmasked_edges(a)
first_not_masked_index = not_masked_indices[0]
return (a[first_not_masked_index], 1)
if axis is None:
output = _mode1D(ma.ravel(a))
output = (ma.array(output[0]), ma.array(output[1]))
else:
output = ma.apply_along_axis(_mode1D, axis, a)
newshape = list(a.shape)
newshape[axis] = 1
slices = [slice(None)] * output.ndim
slices[axis] = 0
modes = output[tuple(slices)].reshape(newshape)
slices[axis] = 1
counts = output[tuple(slices)].reshape(newshape)
output = (modes, counts)
ModeResult = namedtuple('ModeResult', ('mode', 'count'))
return ModeResult(*output)
mode.__doc__ = stats.mode.__doc__
def betai(a, b, x):
x = np.asanyarray(x)
x = ma.where(x < 1.0, x, 1.0) # if x > 1 then return 1.0
return special.betainc(a, b, x)
betai.__doc__ = stats.betai.__doc__
def msign(x):
"""Returns the sign of x, or 0 if x is masked."""
return ma.filled(np.sign(x), 0)
def pearsonr(x,y):
"""
Calculates a Pearson correlation coefficient and the p-value for testing
non-correlation.
The Pearson correlation coefficient measures the linear relationship
between two datasets. Strictly speaking, Pearson's correlation requires
that each dataset be normally distributed. Like other correlation
coefficients, this one varies between -1 and +1 with 0 implying no
correlation. Correlations of -1 or +1 imply an exact linear
relationship. Positive correlations imply that as `x` increases, so does
`y`. Negative correlations imply that as `x` increases, `y` decreases.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Pearson correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : 1-D array_like
Input
y : 1-D array_like
Input
Returns
-------
pearsonr : float
Pearson's correlation coefficient, 2-tailed p-value.
References
----------
http://www.statsoft.com/textbook/glosp.html#Pearson%20Correlation
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
# Get the common mask and the total nb of unmasked elements
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
df = n-2
if df < 0:
return (masked, masked)
(mx, my) = (x.mean(), y.mean())
(xm, ym) = (x-mx, y-my)
r_num = ma.add.reduce(xm*ym)
r_den = ma.sqrt(ma.dot(xm,xm) * ma.dot(ym,ym))
r = r_num / r_den
# Presumably, if r > 1, then it is only some small artifact of floating
# point arithmetic.
r = min(r, 1.0)
r = max(r, -1.0)
df = n - 2
if r is masked or abs(r) == 1.0:
prob = 0.
else:
t_squared = (df / ((1.0 - r) * (1.0 + r))) * r * r
prob = betai(0.5*df, 0.5, df/(df + t_squared))
return r, prob
def spearmanr(x, y, use_ties=True):
"""
Calculates a Spearman rank-order correlation coefficient and the p-value
to test for non-correlation.
The Spearman correlation is a nonparametric measure of the linear
relationship between two datasets. Unlike the Pearson correlation, the
Spearman correlation does not assume that both datasets are normally
distributed. Like other correlation coefficients, this one varies
between -1 and +1 with 0 implying no correlation. Correlations of -1 or
+1 imply an exact linear relationship. Positive correlations imply that
as `x` increases, so does `y`. Negative correlations imply that as `x`
increases, `y` decreases.
Missing values are discarded pair-wise: if a value is missing in `x`, the
corresponding value in `y` is masked.
The p-value roughly indicates the probability of an uncorrelated system
producing datasets that have a Spearman correlation at least as extreme
as the one computed from these datasets. The p-values are not entirely
reliable but are probably reasonable for datasets larger than 500 or so.
Parameters
----------
x : array_like
The length of `x` must be > 2.
y : array_like
The length of `y` must be > 2.
use_ties : bool, optional
Whether the correction for ties should be computed.
Returns
-------
correlation : float
Spearman correlation coefficient
pvalue : float
2-tailed p-value.
References
----------
[CRCProbStat2000] section 14.7
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.ravel(), y.ravel())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
n -= m.sum()
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
df = n-2
if df < 0:
raise ValueError("The input must have at least 3 entries!")
# Gets the ranks and rank differences
rankx = rankdata(x)
ranky = rankdata(y)
dsq = np.add.reduce((rankx-ranky)**2)
# Tie correction
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12.
corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12.
else:
corr_x = corr_y = 0
denom = n*(n**2 - 1)/6.
if corr_x != 0 or corr_y != 0:
rho = denom - dsq - corr_x - corr_y
rho /= ma.sqrt((denom-2*corr_x)*(denom-2*corr_y))
else:
rho = 1. - dsq/denom
t = ma.sqrt(ma.divide(df,(rho+1.0)*(1.0-rho))) * rho
if t is masked:
prob = 0.
else:
prob = betai(0.5*df,0.5,df/(df+t*t))
SpearmanrResult = namedtuple('SpearmanrResult', ('correlation', 'pvalue'))
return SpearmanrResult(rho, prob)
def kendalltau(x, y, use_ties=True, use_missing=False):
"""
Computes Kendall's rank correlation tau on two variables *x* and *y*.
Parameters
----------
x : sequence
First data list (for example, time).
y : sequence
Second data list.
use_ties : {True, False}, optional
Whether ties correction should be performed.
use_missing : {False, True}, optional
Whether missing data should be allocated a rank of 0 (False) or the
average rank (True)
Returns
-------
correlation : float
Kendall tau
pvalue : float
Approximate 2-side p-value.
"""
(x, y, n) = _chk_size(x, y)
(x, y) = (x.flatten(), y.flatten())
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
x = ma.array(x, mask=m, copy=True)
y = ma.array(y, mask=m, copy=True)
n -= m.sum()
KendalltauResult = namedtuple('KendalltauResult', ('correlation', 'pvalue'))
if n < 2:
return KendalltauResult(np.nan, np.nan)
rx = ma.masked_equal(rankdata(x, use_missing=use_missing), 0)
ry = ma.masked_equal(rankdata(y, use_missing=use_missing), 0)
idx = rx.argsort()
(rx, ry) = (rx[idx], ry[idx])
C = np.sum([((ry[i+1:] > ry[i]) * (rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
D = np.sum([((ry[i+1:] < ry[i])*(rx[i+1:] > rx[i])).filled(0).sum()
for i in range(len(ry)-1)], dtype=float)
if use_ties:
xties = count_tied_groups(x)
yties = count_tied_groups(y)
corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float)
corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float)
denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.)
else:
denom = n*(n-1)/2.
tau = (C-D) / denom
var_s = n*(n-1)*(2*n+5)
if use_ties:
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties))
var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties))
v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\
np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float)
v1 /= 2.*n*(n-1)
if n > 2:
v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)],
dtype=float) * \
np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)],
dtype=float)
v2 /= 9.*n*(n-1)*(n-2)
else:
v2 = 0
else:
v1 = v2 = 0
var_s /= 18.
var_s += (v1 + v2)
z = (C-D)/np.sqrt(var_s)
prob = special.erfc(abs(z)/np.sqrt(2))
return KendalltauResult(tau, prob)
def kendalltau_seasonal(x):
"""
Computes a multivariate Kendall's rank correlation tau, for seasonal data.
Parameters
----------
x : 2-D ndarray
Array of seasonal data, with seasons in columns.
"""
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,m) = x.shape
n_p = x.count(0)
S_szn = np.sum(msign(x[i:]-x[i]).sum(0) for i in range(n))
S_tot = S_szn.sum()
n_tot = x.count()
ties = count_tied_groups(x.compressed())
corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties))
denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2.
R = rankdata(x, axis=0, use_missing=True)
K = ma.empty((m,m), dtype=int)
covmat = ma.empty((m,m), dtype=float)
denom_szn = ma.empty(m, dtype=float)
for j in range(m):
ties_j = count_tied_groups(x[:,j].compressed())
corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j))
cmb = n_p[j]*(n_p[j]-1)
for k in range(j,m,1):
K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum()
for i in range(n))
covmat[j,k] = (K[j,k] + 4*(R[:,j]*R[:,k]).sum() -
n*(n_p[j]+1)*(n_p[k]+1))/3.
K[k,j] = K[j,k]
covmat[k,j] = covmat[j,k]
denom_szn[j] = ma.sqrt(cmb*(cmb-corr_j)) / 2.
var_szn = covmat.diagonal()
z_szn = msign(S_szn) * (abs(S_szn)-1) / ma.sqrt(var_szn)
z_tot_ind = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(var_szn.sum())
z_tot_dep = msign(S_tot) * (abs(S_tot)-1) / ma.sqrt(covmat.sum())
prob_szn = special.erfc(abs(z_szn)/np.sqrt(2))
prob_tot_ind = special.erfc(abs(z_tot_ind)/np.sqrt(2))
prob_tot_dep = special.erfc(abs(z_tot_dep)/np.sqrt(2))
chi2_tot = (z_szn*z_szn).sum()
chi2_trd = m * z_szn.mean()**2
output = {'seasonal tau': S_szn/denom_szn,
'global tau': S_tot/denom_tot,
'global tau (alt)': S_tot/denom_szn.sum(),
'seasonal p-value': prob_szn,
'global p-value (indep)': prob_tot_ind,
'global p-value (dep)': prob_tot_dep,
'chi2 total': chi2_tot,
'chi2 trend': chi2_trd,
}
return output
def pointbiserialr(x, y):
x = ma.fix_invalid(x, copy=True).astype(bool)
y = ma.fix_invalid(y, copy=True).astype(float)
# Get rid of the missing data
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
if m is not nomask:
unmask = np.logical_not(m)
x = x[unmask]
y = y[unmask]
n = len(x)
# phat is the fraction of x values that are True
phat = x.sum() / float(n)
y0 = y[~x] # y-values where x is False
y1 = y[x] # y-values where x is True
y0m = y0.mean()
y1m = y1.mean()
rpb = (y1m - y0m)*np.sqrt(phat * (1-phat)) / y.std()
df = n-2
t = rpb*ma.sqrt(df/(1.0-rpb**2))
prob = betai(0.5*df, 0.5, df/(df+t*t))
PointbiserialrResult = namedtuple('PointbiserialrResult', ('correlation',
'pvalue'))
return PointbiserialrResult(rpb, prob)
if stats.pointbiserialr.__doc__:
pointbiserialr.__doc__ = stats.pointbiserialr.__doc__ + genmissingvaldoc
def linregress(*args):
"""
Linear regression calculation
Note that the non-masked version is used, and that this docstring is
replaced by the non-masked docstring + some info on missing data.
"""
if len(args) == 1:
# Input is a single 2-D array containing x and y
args = ma.array(args[0], copy=True)
if len(args) == 2:
x = args[0]
y = args[1]
else:
x = args[:, 0]
y = args[:, 1]
else:
# Input is two 1-D arrays
x = ma.array(args[0]).flatten()
y = ma.array(args[1]).flatten()
m = ma.mask_or(ma.getmask(x), ma.getmask(y), shrink=False)
if m is not nomask:
x = ma.array(x, mask=m)
y = ma.array(y, mask=m)
if np.any(~m):
slope, intercept, r, prob, sterrest = stats.linregress(x.data[~m],
y.data[~m])
else:
# All data is masked
return None, None, None, None, None
else:
slope, intercept, r, prob, sterrest = stats.linregress(x.data, y.data)
LinregressResult = namedtuple('LinregressResult', ('slope', 'intercept',
'rvalue', 'pvalue',
'stderr'))
return LinregressResult(slope, intercept, r, prob, sterrest)
if stats.linregress.__doc__:
linregress.__doc__ = stats.linregress.__doc__ + genmissingvaldoc
def theilslopes(y, x=None, alpha=0.95):
y = ma.asarray(y).flatten()
if x is None:
x = ma.arange(len(y), dtype=float)
else:
x = ma.asarray(x).flatten()
if len(x) != len(y):
raise ValueError("Incompatible lengths ! (%s<>%s)" % (len(y),len(x)))
m = ma.mask_or(ma.getmask(x), ma.getmask(y))
y._mask = x._mask = m
# Disregard any masked elements of x or y
y = y.compressed()
x = x.compressed().astype(float)
# We now have unmasked arrays so can use `stats.theilslopes`
return stats.theilslopes(y, x, alpha=alpha)
theilslopes.__doc__ = stats.theilslopes.__doc__
def sen_seasonal_slopes(x):
x = ma.array(x, subok=True, copy=False, ndmin=2)
(n,_) = x.shape
# Get list of slopes per season
szn_slopes = ma.vstack([(x[i+1:]-x[i])/np.arange(1,n-i)[:,None]
for i in range(n)])
szn_medslopes = ma.median(szn_slopes, axis=0)
medslope = ma.median(szn_slopes, axis=None)
return szn_medslopes, medslope
def ttest_1samp(a, popmean, axis=0):
a, axis = _chk_asarray(a, axis)
if a.size == 0:
return (np.nan, np.nan)
x = a.mean(axis=axis)
v = a.var(axis=axis, ddof=1)
n = a.count(axis=axis)
df = n - 1.
svar = ((n - 1) * v) / df
t = (x - popmean) / ma.sqrt(svar / n)
prob = betai(0.5 * df, 0.5, df / (df + t*t))
Ttest_1sampResult = namedtuple('Ttest_1sampResult', ('statistic', 'pvalue'))
return Ttest_1sampResult(t, prob)
ttest_1samp.__doc__ = stats.ttest_1samp.__doc__
ttest_onesamp = ttest_1samp
def ttest_ind(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
Ttest_indResult = namedtuple('Ttest_indResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_indResult(np.nan, np.nan)
(x1, x2) = (a.mean(axis), b.mean(axis))
(v1, v2) = (a.var(axis=axis, ddof=1), b.var(axis=axis, ddof=1))
(n1, n2) = (a.count(axis), b.count(axis))
df = n1 + n2 - 2.
svar = ((n1-1)*v1+(n2-1)*v2) / df
t = (x1-x2)/ma.sqrt(svar*(1.0/n1 + 1.0/n2)) # n-D computation here!
t = ma.filled(t, 1) # replace NaN t-values with 1.0
probs = betai(0.5 * df, 0.5, df/(df + t*t)).reshape(t.shape)
return Ttest_indResult(t, probs.squeeze())
ttest_ind.__doc__ = stats.ttest_ind.__doc__
def ttest_rel(a, b, axis=0):
a, b, axis = _chk2_asarray(a, b, axis)
if len(a) != len(b):
raise ValueError('unequal length arrays')
Ttest_relResult = namedtuple('Ttest_relResult', ('statistic', 'pvalue'))
if a.size == 0 or b.size == 0:
return Ttest_relResult(np.nan, np.nan)
n = a.count(axis)
df = (n-1.0)
d = (a-b).astype('d')
denom = ma.sqrt((n*ma.add.reduce(d*d,axis) - ma.add.reduce(d,axis)**2) / df)
t = ma.add.reduce(d, axis) / denom
t = ma.filled(t, 1)
probs = betai(0.5*df,0.5,df/(df+t*t)).reshape(t.shape).squeeze()
return Ttest_relResult(t, probs)
ttest_rel.__doc__ = stats.ttest_rel.__doc__
# stats.chisquare works with masked arrays, so we don't need to
# implement it here.
# For backwards compatibilty, stats.chisquare is included in
# the stats.mstats namespace.
chisquare = stats.chisquare
def mannwhitneyu(x,y, use_continuity=True):
"""
Computes the Mann-Whitney statistic
Missing values in `x` and/or `y` are discarded.
Parameters
----------
x : sequence
Input
y : sequence
Input
use_continuity : {True, False}, optional
Whether a continuity correction (1/2.) should be taken into account.
Returns
-------
statistic : float
The Mann-Whitney statistics
pvalue : float
Approximate p-value assuming a normal distribution.
"""
x = ma.asarray(x).compressed().view(ndarray)
y = ma.asarray(y).compressed().view(ndarray)
ranks = rankdata(np.concatenate([x,y]))
(nx, ny) = (len(x), len(y))
nt = nx + ny
U = ranks[:nx].sum() - nx*(nx+1)/2.
U = max(U, nx*ny - U)
u = nx*ny - U
mu = (nx*ny)/2.
sigsq = (nt**3 - nt)/12.
ties = count_tied_groups(ranks)
sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12.
sigsq *= nx*ny/float(nt*(nt-1))
if use_continuity:
z = (U - 1/2. - mu) / ma.sqrt(sigsq)
else:
z = (U - mu) / ma.sqrt(sigsq)
prob = special.erfc(abs(z)/np.sqrt(2))
MannwhitneyuResult = namedtuple('MannwhitneyuResult', ('statistic',
'pvalue'))
return MannwhitneyuResult(u, prob)
def kruskalwallis(*args):
output = argstoarray(*args)
ranks = ma.masked_equal(rankdata(output, use_missing=False), 0)
sumrk = ranks.sum(-1)
ngrp = ranks.count(-1)
ntot = ranks.count()
H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1)
# Tie correction
ties = count_tied_groups(ranks)
T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot)
if T == 0:
raise ValueError('All numbers are identical in kruskal')
H /= T
df = len(output) - 1
prob = stats.chisqprob(H,df)
KruskalResult = namedtuple('KruskalResult', ('statistic', 'pvalue'))
return KruskalResult(H, prob)
kruskal = kruskalwallis
kruskalwallis.__doc__ = stats.kruskal.__doc__
def ks_twosamp(data1, data2, alternative="two-sided"):
"""
Computes the Kolmogorov-Smirnov test on two samples.
Missing values are discarded.
Parameters
----------
data1 : array_like
First data set
data2 : array_like
Second data set
alternative : {'two-sided', 'less', 'greater'}, optional
Indicates the alternative hypothesis. Default is 'two-sided'.
Returns
-------
d : float
Value of the Kolmogorov Smirnov test
p : float
Corresponding p-value.
"""
(data1, data2) = (ma.asarray(data1), ma.asarray(data2))
(n1, n2) = (data1.count(), data2.count())
n = (n1*n2/float(n1+n2))
mix = ma.concatenate((data1.compressed(), data2.compressed()))
mixsort = mix.argsort(kind='mergesort')
csum = np.where(mixsort < n1, 1./n1, -1./n2).cumsum()
# Check for ties
if len(np.unique(mix)) < (n1+n2):
csum = csum[np.r_[np.diff(mix[mixsort]).nonzero()[0],-1]]
alternative = str(alternative).lower()[0]
if alternative == 't':
d = ma.abs(csum).max()
prob = special.kolmogorov(np.sqrt(n)*d)
elif alternative == 'l':
d = -csum.min()
prob = np.exp(-2*n*d**2)
elif alternative == 'g':
d = csum.max()
prob = np.exp(-2*n*d**2)
else:
raise ValueError("Invalid value for the alternative hypothesis: "
"should be in 'two-sided', 'less' or 'greater'")
return (d, prob)
ks_2samp = ks_twosamp
def ks_twosamp_old(data1, data2):
""" Computes the Kolmogorov-Smirnov statistic on 2 samples.
Returns
-------
KS D-value, p-value
"""
(data1, data2) = [ma.asarray(d).compressed() for d in (data1,data2)]
return stats.ks_2samp(data1,data2)
def threshold(a, threshmin=None, threshmax=None, newval=0):
"""
Clip array to a given value.
Similar to numpy.clip(), except that values less than `threshmin` or
greater than `threshmax` are replaced by `newval`, instead of by
`threshmin` and `threshmax` respectively.
Parameters
----------
a : ndarray
Input data
threshmin : {None, float}, optional
Lower threshold. If None, set to the minimum value.
threshmax : {None, float}, optional
Upper threshold. If None, set to the maximum value.
newval : {0, float}, optional
Value outside the thresholds.
Returns
-------
threshold : ndarray
Returns `a`, with values less then `threshmin` and values greater
`threshmax` replaced with `newval`.
"""
a = ma.array(a, copy=True)
mask = np.zeros(a.shape, dtype=bool)
if threshmin is not None:
mask |= (a < threshmin).filled(False)
if threshmax is not None:
mask |= (a > threshmax).filled(False)
a[mask] = newval
return a
def trima(a, limits=None, inclusive=(True,True)):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
Parameters
----------
a : array_like
Input array.
limits : {None, tuple}, optional
Tuple of (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit
will be masked. A limit is None indicates an open interval.
inclusive : (bool, bool) tuple, optional
Tuple of (lower flag, upper flag), indicating whether values exactly
equal to the lower (upper) limit are allowed.
"""
a = ma.asarray(a)
a.unshare_mask()
if (limits is None) or (limits == (None, None)):
return a
(lower_lim, upper_lim) = limits
(lower_in, upper_in) = inclusive
condition = False
if lower_lim is not None:
if lower_in:
condition |= (a < lower_lim)
else:
condition |= (a <= lower_lim)
if upper_lim is not None:
if upper_in:
condition |= (a > upper_lim)
else:
condition |= (a >= upper_lim)
a[condition.filled(True)] = masked
return a
def trimr(a, limits=None, inclusive=(True, True), axis=None):
"""
Trims an array by masking some proportion of the data on each end.
Returns a masked version of the input array.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming is
n*(1.-sum(limits)). The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True,True) tuple}, optional
Tuple of flags indicating whether the number of data being masked on
the left (right) end should be truncated (True) or rounded (False) to
integers.
axis : {None,int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
"""
def _trimr1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
return a
a = ma.asarray(a)
a.unshare_mask()
if limits is None:
return a
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _trimr1D(a.ravel(),lolim,uplim,loinc,upinc).reshape(shp)
else:
return ma.apply_along_axis(_trimr1D, axis, a, lolim,uplim,loinc,upinc)
trimdoc = """
Parameters
----------
a : sequence
Input array
limits : {None, tuple}, optional
If `relative` is False, tuple (lower limit, upper limit) in absolute values.
Values of the input array lower (greater) than the lower (upper) limit are
masked.
If `relative` is True, tuple (lower percentage, upper percentage) to cut
on each side of the array, with respect to the number of unmasked data.
Noting n the number of unmasked data before trimming, the (n*limits[0])th
smallest data and the (n*limits[1])th largest data are masked, and the
total number of unmasked data after trimming is n*(1.-sum(limits))
In each case, the value of one limit can be set to None to indicate an
open interval.
If limits is None, no trimming is performed
inclusive : {(bool, bool) tuple}, optional
If `relative` is False, tuple indicating whether values exactly equal
to the absolute limits are allowed.
If `relative` is True, tuple indicating whether the number of data
being masked on each side should be rounded (True) or truncated
(False).
relative : bool, optional
Whether to consider the limits as absolute values (False) or proportions
to cut (True).
axis : int, optional
Axis along which to trim.
"""
def trim(a, limits=None, inclusive=(True,True), relative=False, axis=None):
"""
Trims an array by masking the data outside some given limits.
Returns a masked version of the input array.
%s
Examples
--------
>>> z = [ 1, 2, 3, 4, 5, 6, 7, 8, 9,10]
>>> trim(z,(3,8))
[--,--, 3, 4, 5, 6, 7, 8,--,--]
>>> trim(z,(0.1,0.2),relative=True)
[--, 2, 3, 4, 5, 6, 7, 8,--,--]
"""
if relative:
return trimr(a, limits=limits, inclusive=inclusive, axis=axis)
else:
return trima(a, limits=limits, inclusive=inclusive)
if trim.__doc__ is not None:
trim.__doc__ = trim.__doc__ % trimdoc
def trimboth(data, proportiontocut=0.2, inclusive=(True,True), axis=None):
"""
Trims the smallest and largest data values.
Trims the `data` by masking the ``int(proportiontocut * n)`` smallest and
``int(proportiontocut * n)`` largest values of data along the given axis,
where n is the number of unmasked values before trimming.
Parameters
----------
data : ndarray
Data to trim.
proportiontocut : float, optional
Percentage of trimming (as a float between 0 and 1).
If n is the number of unmasked values before trimming, the number of
values after trimming is ``(1 - 2*proportiontocut) * n``.
Default is 0.2.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
return trimr(data, limits=(proportiontocut,proportiontocut),
inclusive=inclusive, axis=axis)
def trimtail(data, proportiontocut=0.2, tail='left', inclusive=(True,True),
axis=None):
"""
Trims the data by masking values from one tail.
Parameters
----------
data : array_like
Data to trim.
proportiontocut : float, optional
Percentage of trimming. If n is the number of unmasked values
before trimming, the number of values after trimming is
``(1 - proportiontocut) * n``. Default is 0.2.
tail : {'left','right'}, optional
If 'left' the `proportiontocut` lowest values will be masked.
If 'right' the `proportiontocut` highest values will be masked.
Default is 'left'.
inclusive : {(bool, bool) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False). Default is
(True, True).
axis : int, optional
Axis along which to perform the trimming.
If None, the input array is first flattened. Default is None.
Returns
-------
trimtail : ndarray
Returned array of same shape as `data` with masked tail values.
"""
tail = str(tail).lower()[0]
if tail == 'l':
limits = (proportiontocut,None)
elif tail == 'r':
limits = (None, proportiontocut)
else:
raise TypeError("The tail argument should be in ('left','right')")
return trimr(data, limits=limits, axis=axis, inclusive=inclusive)
trim1 = trimtail
def trimmed_mean(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None):
"""Returns the trimmed mean of the data along the given axis.
%s
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
return trimr(a,limits=limits,inclusive=inclusive,axis=axis).mean(axis=axis)
else:
return trima(a,limits=limits,inclusive=inclusive).mean(axis=axis)
def trimmed_var(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed variance of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits, inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.var(axis=axis, ddof=ddof)
def trimmed_std(a, limits=(0.1,0.1), inclusive=(1,1), relative=True,
axis=None, ddof=0):
"""Returns the trimmed standard deviation of the data along the given axis.
%s
ddof : {0,integer}, optional
Means Delta Degrees of Freedom. The denominator used during computations
is (n-ddof). DDOF=0 corresponds to a biased estimate, DDOF=1 to an un-
biased estimate of the variance.
""" % trimdoc
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
if relative:
out = trimr(a,limits=limits,inclusive=inclusive,axis=axis)
else:
out = trima(a,limits=limits,inclusive=inclusive)
return out.std(axis=axis,ddof=ddof)
def trimmed_stde(a, limits=(0.1,0.1), inclusive=(1,1), axis=None):
"""
Returns the standard error of the trimmed mean along the given axis.
Parameters
----------
a : sequence
Input array
limits : {(0.1,0.1), tuple of float}, optional
tuple (lower percentage, upper percentage) to cut on each side of the
array, with respect to the number of unmasked data.
If n is the number of unmasked data before trimming, the values
smaller than ``n * limits[0]`` and the values larger than
``n * `limits[1]`` are masked, and the total number of unmasked
data after trimming is ``n * (1.-sum(limits))``. In each case,
the value of one limit can be set to None to indicate an open interval.
If `limits` is None, no trimming is performed.
inclusive : {(bool, bool) tuple} optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
axis : int, optional
Axis along which to trim.
Returns
-------
trimmed_stde : scalar or ndarray
"""
def _trimmed_stde_1D(a, low_limit, up_limit, low_inclusive, up_inclusive):
"Returns the standard error of the trimmed mean for a 1D input data."
n = a.count()
idx = a.argsort()
if low_limit:
if low_inclusive:
lowidx = int(low_limit*n)
else:
lowidx = np.round(low_limit*n)
a[idx[:lowidx]] = masked
if up_limit is not None:
if up_inclusive:
upidx = n - int(n*up_limit)
else:
upidx = n - np.round(n*up_limit)
a[idx[upidx:]] = masked
a[idx[:lowidx]] = a[idx[lowidx]]
a[idx[upidx:]] = a[idx[upidx-1]]
winstd = a.std(ddof=1)
return winstd / ((1-low_limit-up_limit)*np.sqrt(len(a)))
a = ma.array(a, copy=True, subok=True)
a.unshare_mask()
if limits is None:
return a.std(axis=axis,ddof=1)/ma.sqrt(a.count(axis))
if (not isinstance(limits,tuple)) and isinstance(limits,float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if (axis is None):
return _trimmed_stde_1D(a.ravel(),lolim,uplim,loinc,upinc)
else:
if a.ndim > 2:
raise ValueError("Array 'a' must be at most two dimensional, but got a.ndim = %d" % a.ndim)
return ma.apply_along_axis(_trimmed_stde_1D, axis, a,
lolim,uplim,loinc,upinc)
def tmean(a, limits=None, inclusive=(True,True)):
return trima(a, limits=limits, inclusive=inclusive).mean()
tmean.__doc__ = stats.tmean.__doc__
def tvar(a, limits=None, inclusive=(True,True)):
a = a.astype(float).ravel()
if limits is None:
n = (~a.mask).sum() # todo: better way to do that?
r = trima(a, limits=limits, inclusive=inclusive).var() * (n/(n-1.))
else:
raise ValueError('mstats.tvar() with limits not implemented yet so far')
return r
tvar.__doc__ = stats.tvar.__doc__
def tmin(a, lowerlimit=None, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (lowerlimit, None), (inclusive, False))
return ma.minimum.reduce(am, axis)
tmin.__doc__ = stats.tmin.__doc__
def tmax(a, upperlimit, axis=0, inclusive=True):
a, axis = _chk_asarray(a, axis)
am = trima(a, (None, upperlimit), (False, inclusive))
return ma.maximum.reduce(am, axis)
tmax.__doc__ = stats.tmax.__doc__
def tsem(a, limits=None, inclusive=(True,True)):
a = ma.asarray(a).ravel()
if limits is None:
n = float(a.count())
return a.std(ddof=1)/ma.sqrt(n)
am = trima(a.ravel(), limits, inclusive)
sd = np.sqrt(am.var(ddof=1))
return sd / np.sqrt(am.count())
tsem.__doc__ = stats.tsem.__doc__
def winsorize(a, limits=None, inclusive=(True, True), inplace=False,
axis=None):
"""Returns a Winsorized version of the input array.
The (limits[0])th lowest values are set to the (limits[0])th percentile,
and the (limits[1])th highest values are set to the (1 - limits[1])th
percentile.
Masked values are skipped.
Parameters
----------
a : sequence
Input array.
limits : {None, tuple of float}, optional
Tuple of the percentages to cut on each side of the array, with respect
to the number of unmasked data, as floats between 0. and 1.
Noting n the number of unmasked data before trimming, the
(n*limits[0])th smallest data and the (n*limits[1])th largest data are
masked, and the total number of unmasked data after trimming
is n*(1.-sum(limits)) The value of one limit can be set to None to
indicate an open interval.
inclusive : {(True, True) tuple}, optional
Tuple indicating whether the number of data being masked on each side
should be rounded (True) or truncated (False).
inplace : {False, True}, optional
Whether to winsorize in place (True) or to use a copy (False)
axis : {None, int}, optional
Axis along which to trim. If None, the whole array is trimmed, but its
shape is maintained.
Notes
-----
This function is applied to reduce the effect of possibly spurious outliers
by limiting the extreme values.
"""
def _winsorize1D(a, low_limit, up_limit, low_include, up_include):
n = a.count()
idx = a.argsort()
if low_limit:
if low_include:
lowidx = int(low_limit * n)
else:
lowidx = np.round(low_limit * n)
a[idx[:lowidx]] = a[idx[lowidx]]
if up_limit is not None:
if up_include:
upidx = n - int(n * up_limit)
else:
upidx = n - np.round(n * up_limit)
a[idx[upidx:]] = a[idx[upidx - 1]]
return a
# We are going to modify a: better make a copy
a = ma.array(a, copy=np.logical_not(inplace))
if limits is None:
return a
if (not isinstance(limits, tuple)) and isinstance(limits, float):
limits = (limits, limits)
# Check the limits
(lolim, uplim) = limits
errmsg = "The proportion to cut from the %s should be between 0. and 1."
if lolim is not None:
if lolim > 1. or lolim < 0:
raise ValueError(errmsg % 'beginning' + "(got %s)" % lolim)
if uplim is not None:
if uplim > 1. or uplim < 0:
raise ValueError(errmsg % 'end' + "(got %s)" % uplim)
(loinc, upinc) = inclusive
if axis is None:
shp = a.shape
return _winsorize1D(a.ravel(), lolim, uplim, loinc, upinc).reshape(shp)
else:
return ma.apply_along_axis(_winsorize1D, axis, a, lolim, uplim, loinc,
upinc)
def moment(a, moment=1, axis=0):
a, axis = _chk_asarray(a, axis)
if moment == 1:
# By definition the first moment about the mean is 0.
shape = list(a.shape)
del shape[axis]
if shape:
# return an actual array of the appropriate shape
return np.zeros(shape, dtype=float)
else:
# the input was 1D, so return a scalar instead of a rank-0 array
return np.float64(0.0)
else:
# Exponentiation by squares: form exponent sequence
n_list = [moment]
current_n = moment
while current_n > 2:
if current_n % 2:
current_n = (current_n-1)/2
else:
current_n /= 2
n_list.append(current_n)
# Starting point for exponentiation by squares
a_zero_mean = a - ma.expand_dims(a.mean(axis), axis)
if n_list[-1] == 1:
s = a_zero_mean.copy()
else:
s = a_zero_mean**2
# Perform multiplications
for n in n_list[-2::-1]:
s = s**2
if n % 2:
s *= a_zero_mean
return s.mean(axis)
moment.__doc__ = stats.moment.__doc__
def variation(a, axis=0):
a, axis = _chk_asarray(a, axis)
return a.std(axis)/a.mean(axis)
variation.__doc__ = stats.variation.__doc__
def skew(a, axis=0, bias=True):
a, axis = _chk_asarray(a,axis)
n = a.count(axis)
m2 = moment(a, 2, axis)
m3 = moment(a, 3, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m3 / m2**1.5)
finally:
np.seterr(**olderr)
if not bias:
can_correct = (n > 2) & (m2 > 0)
if can_correct.any():
m2 = np.extract(can_correct, m2)
m3 = np.extract(can_correct, m3)
nval = ma.sqrt((n-1.0)*n)/(n-2.0)*m3/m2**1.5
np.place(vals, can_correct, nval)
return vals
skew.__doc__ = stats.skew.__doc__
def kurtosis(a, axis=0, fisher=True, bias=True):
a, axis = _chk_asarray(a, axis)
m2 = moment(a, 2, axis)
m4 = moment(a, 4, axis)
olderr = np.seterr(all='ignore')
try:
vals = ma.where(m2 == 0, 0, m4 / m2**2.0)
finally:
np.seterr(**olderr)
if not bias:
n = a.count(axis)
can_correct = (n > 3) & (m2 is not ma.masked and m2 > 0)
if can_correct.any():
n = np.extract(can_correct, n)
m2 = np.extract(can_correct, m2)
m4 = np.extract(can_correct, m4)
nval = 1.0/(n-2)/(n-3)*((n*n-1.0)*m4/m2**2.0-3*(n-1)**2.0)
np.place(vals, can_correct, nval+3.0)
if fisher:
return vals - 3
else:
return vals
kurtosis.__doc__ = stats.kurtosis.__doc__
def describe(a, axis=0, ddof=0):
"""
Computes several descriptive statistics of the passed array.
Parameters
----------
a : array_like
Data array
axis : int or None, optional
Axis along which to calculate statistics. Default 0. If None,
compute over the whole array `a`.
ddof : int, optional
degree of freedom (default 0); note that default ddof is different
from the same routine in stats.describe
Returns
-------
nobs : int
(size of the data (discarding missing values)
minmax : (int, int)
min, max
mean : float
arithmetic mean
variance : float
unbiased variance
skewness : float
biased skewness
kurtosis : float
biased kurtosis
Examples
--------
>>> ma = np.ma.array(range(6), mask=[0, 0, 0, 1, 1, 1])
>>> describe(ma)
(array(3),
(0, 2),
1.0,
1.0,
masked_array(data = 0.0,
mask = False,
fill_value = 1e+20)
,
-1.5)
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis)
mm = (ma.minimum.reduce(a), ma.maximum.reduce(a))
m = a.mean(axis)
v = a.var(axis, ddof=ddof)
sk = skew(a, axis)
kurt = kurtosis(a, axis)
DescribeResult = namedtuple('DescribeResult', ('nobs', 'minmax', 'mean',
'variance', 'skewness',
'kurtosis'))
return DescribeResult(n, mm, m, v, sk, kurt)
def stde_median(data, axis=None):
"""Returns the McKean-Schrader estimate of the standard error of the sample
median along the given axis. masked values are discarded.
Parameters
----------
data : ndarray
Data to trim.
axis : {None,int}, optional
Axis along which to perform the trimming.
If None, the input array is first flattened.
"""
def _stdemed_1D(data):
data = np.sort(data.compressed())
n = len(data)
z = 2.5758293035489004
k = int(np.round((n+1)/2. - z * np.sqrt(n/4.),0))
return ((data[n-k] - data[k-1])/(2.*z))
data = ma.array(data, copy=False, subok=True)
if (axis is None):
return _stdemed_1D(data)
else:
if data.ndim > 2:
raise ValueError("Array 'data' must be at most two dimensional, "
"but got data.ndim = %d" % data.ndim)
return ma.apply_along_axis(_stdemed_1D, axis, data)
def skewtest(a, axis=0):
a, axis = _chk_asarray(a, axis)
if axis is None:
a = a.ravel()
axis = 0
b2 = skew(a,axis)
n = a.count(axis)
if np.min(n) < 8:
raise ValueError(
"skewtest is not valid with less than 8 samples; %i samples"
" were given." % np.min(n))
y = b2 * ma.sqrt(((n+1)*(n+3)) / (6.0*(n-2)))
beta2 = (3.0*(n*n+27*n-70)*(n+1)*(n+3)) / ((n-2.0)*(n+5)*(n+7)*(n+9))
W2 = -1 + ma.sqrt(2*(beta2-1))
delta = 1/ma.sqrt(0.5*ma.log(W2))
alpha = ma.sqrt(2.0/(W2-1))
y = ma.where(y == 0, 1, y)
Z = delta*ma.log(y/alpha + ma.sqrt((y/alpha)**2+1))
SkewtestResult = namedtuple('SkewtestResult', ('statistic', 'pvalue'))
return SkewtestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
skewtest.__doc__ = stats.skewtest.__doc__
def kurtosistest(a, axis=0):
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
if np.min(n) < 5:
raise ValueError(
"kurtosistest requires at least 5 observations; %i observations"
" were given." % np.min(n))
if np.min(n) < 20:
warnings.warn(
"kurtosistest only valid for n>=20 ... continuing anyway, n=%i" %
np.min(n))
b2 = kurtosis(a, axis, fisher=False)
E = 3.0*(n-1) / (n+1)
varb2 = 24.0*n*(n-2.)*(n-3) / ((n+1)*(n+1.)*(n+3)*(n+5))
x = (b2-E)/ma.sqrt(varb2)
sqrtbeta1 = 6.0*(n*n-5*n+2)/((n+7)*(n+9)) * np.sqrt((6.0*(n+3)*(n+5)) /
(n*(n-2)*(n-3)))
A = 6.0 + 8.0/sqrtbeta1 * (2.0/sqrtbeta1 + np.sqrt(1+4.0/(sqrtbeta1**2)))
term1 = 1 - 2./(9.0*A)
denom = 1 + x*ma.sqrt(2/(A-4.0))
if np.ma.isMaskedArray(denom):
# For multi-dimensional array input
denom[denom < 0] = masked
elif denom < 0:
denom = masked
term2 = ma.power((1-2.0/A)/denom,1/3.0)
Z = (term1 - term2) / np.sqrt(2/(9.0*A))
KurtosistestResult = namedtuple('KurtosistestResult', ('statistic',
'pvalue'))
return KurtosistestResult(Z, 2 * distributions.norm.sf(np.abs(Z)))
kurtosistest.__doc__ = stats.kurtosistest.__doc__
def normaltest(a, axis=0):
a, axis = _chk_asarray(a, axis)
s, _ = skewtest(a, axis)
k, _ = kurtosistest(a, axis)
k2 = s*s + k*k
NormaltestResult = namedtuple('NormaltestResult', ('statistic', 'pvalue'))
return NormaltestResult(k2, stats.chisqprob(k2, 2))
normaltest.__doc__ = stats.normaltest.__doc__
def mquantiles(a, prob=list([.25,.5,.75]), alphap=.4, betap=.4, axis=None,
limit=()):
"""
Computes empirical quantiles for a data array.
Samples quantile are defined by ``Q(p) = (1-gamma)*x[j] + gamma*x[j+1]``,
where ``x[j]`` is the j-th order statistic, and gamma is a function of
``j = floor(n*p + m)``, ``m = alphap + p*(1 - alphap - betap)`` and
``g = n*p + m - j``.
Reinterpreting the above equations to compare to **R** lead to the
equation: ``p(k) = (k - alphap)/(n + 1 - alphap - betap)``
Typical values of (alphap,betap) are:
- (0,1) : ``p(k) = k/n`` : linear interpolation of cdf
(**R** type 4)
- (.5,.5) : ``p(k) = (k - 1/2.)/n`` : piecewise linear function
(**R** type 5)
- (0,0) : ``p(k) = k/(n+1)`` :
(**R** type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``: p(k) = mode[F(x[k])].
(**R** type 7, **R** default)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``: Then p(k) ~ median[F(x[k])].
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x.
(**R** type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``: Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed
(**R** type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
Parameters
----------
a : array_like
Input data, as a sequence or array of dimension at most 2.
prob : array_like, optional
List of quantiles to compute.
alphap : float, optional
Plotting positions parameter, default is 0.4.
betap : float, optional
Plotting positions parameter, default is 0.4.
axis : int, optional
Axis along which to perform the trimming.
If None (default), the input array is first flattened.
limit : tuple, optional
Tuple of (lower, upper) values.
Values of `a` outside this open interval are ignored.
Returns
-------
mquantiles : MaskedArray
An array containing the calculated quantiles.
Notes
-----
This formulation is very similar to **R** except the calculation of
``m`` from ``alphap`` and ``betap``, where in **R** ``m`` is defined
with each type.
References
----------
.. [1] *R* statistical software: http://www.r-project.org/
.. [2] *R* ``quantile`` function:
http://stat.ethz.ch/R-manual/R-devel/library/stats/html/quantile.html
Examples
--------
>>> from scipy.stats.mstats import mquantiles
>>> a = np.array([6., 47., 49., 15., 42., 41., 7., 39., 43., 40., 36.])
>>> mquantiles(a)
array([ 19.2, 40. , 42.8])
Using a 2D array, specifying axis and limit.
>>> data = np.array([[ 6., 7., 1.],
[ 47., 15., 2.],
[ 49., 36., 3.],
[ 15., 39., 4.],
[ 42., 40., -999.],
[ 41., 41., -999.],
[ 7., -999., -999.],
[ 39., -999., -999.],
[ 43., -999., -999.],
[ 40., -999., -999.],
[ 36., -999., -999.]])
>>> mquantiles(data, axis=0, limit=(0, 50))
array([[ 19.2 , 14.6 , 1.45],
[ 40. , 37.5 , 2.5 ],
[ 42.8 , 40.05, 3.55]])
>>> data[:, 2] = -999.
>>> mquantiles(data, axis=0, limit=(0, 50))
masked_array(data =
[[19.2 14.6 --]
[40.0 37.5 --]
[42.8 40.05 --]],
mask =
[[False False True]
[False False True]
[False False True]],
fill_value = 1e+20)
"""
def _quantiles1D(data,m,p):
x = np.sort(data.compressed())
n = len(x)
if n == 0:
return ma.array(np.empty(len(p), dtype=float), mask=True)
elif n == 1:
return ma.array(np.resize(x, p.shape), mask=nomask)
aleph = (n*p + m)
k = np.floor(aleph.clip(1, n-1)).astype(int)
gamma = (aleph-k).clip(0,1)
return (1.-gamma)*x[(k-1).tolist()] + gamma*x[k.tolist()]
data = ma.array(a, copy=False)
if data.ndim > 2:
raise TypeError("Array should be 2D at most !")
if limit:
condition = (limit[0] < data) & (data < limit[1])
data[~condition.filled(True)] = masked
p = np.array(prob, copy=False, ndmin=1)
m = alphap + p*(1.-alphap-betap)
# Computes quantiles along axis (or globally)
if (axis is None):
return _quantiles1D(data, m, p)
return ma.apply_along_axis(_quantiles1D, axis, data, m, p)
def scoreatpercentile(data, per, limit=(), alphap=.4, betap=.4):
"""Calculate the score at the given 'per' percentile of the
sequence a. For example, the score at per=50 is the median.
This function is a shortcut to mquantile
"""
if (per < 0) or (per > 100.):
raise ValueError("The percentile should be between 0. and 100. !"
" (got %s)" % per)
return mquantiles(data, prob=[per/100.], alphap=alphap, betap=betap,
limit=limit, axis=0).squeeze()
def plotting_positions(data, alpha=0.4, beta=0.4):
"""
Returns plotting positions (or empirical percentile points) for the data.
Plotting positions are defined as ``(i-alpha)/(n+1-alpha-beta)``, where:
- i is the rank order statistics
- n is the number of unmasked values along the given axis
- `alpha` and `beta` are two parameters.
Typical values for `alpha` and `beta` are:
- (0,1) : ``p(k) = k/n``, linear interpolation of cdf (R, type 4)
- (.5,.5) : ``p(k) = (k-1/2.)/n``, piecewise linear function
(R, type 5)
- (0,0) : ``p(k) = k/(n+1)``, Weibull (R type 6)
- (1,1) : ``p(k) = (k-1)/(n-1)``, in this case,
``p(k) = mode[F(x[k])]``. That's R default (R type 7)
- (1/3,1/3): ``p(k) = (k-1/3)/(n+1/3)``, then
``p(k) ~ median[F(x[k])]``.
The resulting quantile estimates are approximately median-unbiased
regardless of the distribution of x. (R type 8)
- (3/8,3/8): ``p(k) = (k-3/8)/(n+1/4)``, Blom.
The resulting quantile estimates are approximately unbiased
if x is normally distributed (R type 9)
- (.4,.4) : approximately quantile unbiased (Cunnane)
- (.35,.35): APL, used with PWM
- (.3175, .3175): used in scipy.stats.probplot
Parameters
----------
data : array_like
Input data, as a sequence or array of dimension at most 2.
alpha : float, optional
Plotting positions parameter. Default is 0.4.
beta : float, optional
Plotting positions parameter. Default is 0.4.
Returns
-------
positions : MaskedArray
The calculated plotting positions.
"""
data = ma.array(data, copy=False).reshape(1,-1)
n = data.count()
plpos = np.empty(data.size, dtype=float)
plpos[n:] = 0
plpos[data.argsort()[:n]] = ((np.arange(1, n+1) - alpha) /
(n + 1.0 - alpha - beta))
return ma.array(plpos, mask=data._mask)
meppf = plotting_positions
def obrientransform(*args):
"""
Computes a transform on input data (any number of columns). Used to
test for homogeneity of variance prior to running one-way stats. Each
array in ``*args`` is one level of a factor. If an `f_oneway()` run on
the transformed data and found significant, variances are unequal. From
Maxwell and Delaney, p.112.
Returns: transformed data for use in an ANOVA
"""
data = argstoarray(*args).T
v = data.var(axis=0,ddof=1)
m = data.mean(0)
n = data.count(0).astype(float)
# result = ((N-1.5)*N*(a-m)**2 - 0.5*v*(n-1))/((n-1)*(n-2))
data -= m
data **= 2
data *= (n-1.5)*n
data -= 0.5*v*(n-1)
data /= (n-1.)*(n-2.)
if not ma.allclose(v,data.mean(0)):
raise ValueError("Lack of convergence in obrientransform.")
return data
@np.deprecate(message="mstats.signaltonoise is deprecated in scipy 0.16.0")
def signaltonoise(data, axis=0):
"""Calculates the signal-to-noise ratio, as the ratio of the mean over
standard deviation along the given axis.
Parameters
----------
data : sequence
Input data
axis : {0, int}, optional
Axis along which to compute. If None, the computation is performed
on a flat version of the array.
"""
data = ma.array(data, copy=False)
m = data.mean(axis)
sd = data.std(axis, ddof=0)
return m/sd
def sem(a, axis=0, ddof=1):
"""
Calculates the standard error of the mean of the input array.
Also sometimes called standard error of measurement.
Parameters
----------
a : array_like
An array containing the values for which the standard error is
returned.
axis : int or None, optional
If axis is None, ravel `a` first. If axis is an integer, this will be
the axis over which to operate. Defaults to 0.
ddof : int, optional
Delta degrees-of-freedom. How many degrees of freedom to adjust
for bias in limited samples relative to the population estimate
of variance. Defaults to 1.
Returns
-------
s : ndarray or float
The standard error of the mean in the sample(s), along the input axis.
Notes
-----
The default value for `ddof` changed in scipy 0.15.0 to be consistent with
`stats.sem` as well as with the most common definition used (like in the R
documentation).
Examples
--------
Find standard error along the first axis:
>>> from scipy import stats
>>> a = np.arange(20).reshape(5,4)
>>> stats.sem(a)
array([ 2.8284, 2.8284, 2.8284, 2.8284])
Find standard error across the whole array, using n degrees of freedom:
>>> stats.sem(a, axis=None, ddof=0)
1.2893796958227628
"""
a, axis = _chk_asarray(a, axis)
n = a.count(axis=axis)
s = a.std(axis=axis, ddof=ddof) / ma.sqrt(n)
return s
zmap = stats.zmap
zscore = stats.zscore
def f_oneway(*args):
"""
Performs a 1-way ANOVA, returning an F-value and probability given
any number of groups. From Heiman, pp.394-7.
Usage: ``f_oneway(*args)``, where ``*args`` is 2 or more arrays,
one per treatment group.
Returns
-------
statistic : float
The computed F-value of the test.
pvalue : float
The associated p-value from the F-distribution.
"""
# Construct a single array of arguments: each row is a group
data = argstoarray(*args)
ngroups = len(data)
ntot = data.count()
sstot = (data**2).sum() - (data.sum())**2/float(ntot)
ssbg = (data.count(-1) * (data.mean(-1)-data.mean())**2).sum()
sswg = sstot-ssbg
dfbg = ngroups-1
dfwg = ntot - ngroups
msb = ssbg/float(dfbg)
msw = sswg/float(dfwg)
f = msb/msw
prob = special.fdtrc(dfbg, dfwg, f) # equivalent to stats.f.sf
F_onewayResult = namedtuple('F_onewayResult', ('statistic', 'pvalue'))
return F_onewayResult(f, prob)
def f_value_wilks_lambda(ER, EF, dfnum, dfden, a, b):
"""Calculation of Wilks lambda F-statistic for multivariate data, per
Maxwell & Delaney p.657.
"""
ER = ma.array(ER, copy=False, ndmin=2)
EF = ma.array(EF, copy=False, ndmin=2)
if ma.getmask(ER).any() or ma.getmask(EF).any():
raise NotImplementedError("Not implemented when the inputs "
"have missing data")
lmbda = np.linalg.det(EF) / np.linalg.det(ER)
q = ma.sqrt(((a-1)**2*(b-1)**2 - 2) / ((a-1)**2 + (b-1)**2 - 5))
q = ma.filled(q, 1)
n_um = (1 - lmbda**(1.0/q))*(a-1)*(b-1)
d_en = lmbda**(1.0/q) / (n_um*q - 0.5*(a-1)*(b-1) + 1)
return n_um / d_en
def friedmanchisquare(*args):
"""Friedman Chi-Square is a non-parametric, one-way within-subjects ANOVA.
This function calculates the Friedman Chi-square test for repeated measures
and returns the result, along with the associated probability value.
Each input is considered a given group. Ideally, the number of treatments
among each group should be equal. If this is not the case, only the first
n treatments are taken into account, where n is the number of treatments
of the smallest group.
If a group has some missing values, the corresponding treatments are masked
in the other groups.
The test statistic is corrected for ties.
Masked values in one group are propagated to the other groups.
Returns
-------
statistic : float
the test statistic.
pvalue : float
the associated p-value.
"""
data = argstoarray(*args).astype(float)
k = len(data)
if k < 3:
raise ValueError("Less than 3 groups (%i): " % k +
"the Friedman test is NOT appropriate.")
ranked = ma.masked_values(rankdata(data, axis=0), 0)
if ranked._mask is not nomask:
ranked = ma.mask_cols(ranked)
ranked = ranked.compressed().reshape(k,-1).view(ndarray)
else:
ranked = ranked._data
(k,n) = ranked.shape
# Ties correction
repeats = np.array([find_repeats(_) for _ in ranked.T], dtype=object)
ties = repeats[repeats.nonzero()].reshape(-1,2)[:,-1].astype(int)
tie_correction = 1 - (ties**3-ties).sum()/float(n*(k**3-k))
ssbg = np.sum((ranked.sum(-1) - n*(k+1)/2.)**2)
chisq = ssbg * 12./(n*k*(k+1)) * 1./tie_correction
FriedmanchisquareResult = namedtuple('FriedmanchisquareResult',
('statistic', 'pvalue'))
return FriedmanchisquareResult(chisq, stats.chisqprob(chisq, k-1))
|
{
"content_hash": "94d35864e690c3cf66bc6c89c54473d4",
"timestamp": "",
"source": "github",
"line_count": 2106,
"max_line_length": 103,
"avg_line_length": 32.383190883190885,
"alnum_prop": 0.5727943224973974,
"repo_name": "uglyboxer/linear_neuron",
"id": "e9d3fa6bc7d7f35c9db1b93ff1bb25527cab8135",
"size": "68199",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "net-p3/lib/python3.5/site-packages/scipy/stats/mstats_basic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "317983"
},
{
"name": "C++",
"bytes": "5695"
},
{
"name": "CSS",
"bytes": "5382"
},
{
"name": "FORTRAN",
"bytes": "10375"
},
{
"name": "Groff",
"bytes": "17679"
},
{
"name": "HTML",
"bytes": "4000"
},
{
"name": "JavaScript",
"bytes": "24260"
},
{
"name": "Makefile",
"bytes": "214"
},
{
"name": "Matlab",
"bytes": "4346"
},
{
"name": "Python",
"bytes": "24433064"
},
{
"name": "Shell",
"bytes": "3791"
}
],
"symlink_target": ""
}
|
import logging
import uuid
import json
import esgfpid.utils
import esgfpid.assistant.messages
from esgfpid.utils import loginfo, logdebug, logtrace, logerror, logwarn
LOGGER = logging.getLogger(__name__)
LOGGER.addHandler(logging.NullHandler())
class ErrataAssistant(object):
def __init__(self, **args):
'''
:param coupler: The coupler object (for sending the message to the queue).
'''
mandatory_args = ['prefix', 'coupler']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
self.__prefix = args['prefix']
self.__coupler = args['coupler']
def add_errata_ids(self, **args):
logdebug(LOGGER, 'Adding errata ids...')
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
dataset_handle = self.__get_dataset_handle(args)
errata_ids = self.__get_errata_ids_as_list(args)
message = self.__make_add_message(errata_ids, dataset_handle, args['drs_id'], args['version_number'])
self.__send_message_to_queue(message)
loginfo(LOGGER, 'Requesting to add errata ids "%s" to dataset "%s".', ', '.join(errata_ids), dataset_handle)
logdebug(LOGGER, 'Adding errata ids... done')
def remove_errata_ids(self, **args):
logdebug(LOGGER, 'Removing errata ids...')
mandatory_args = ['drs_id', 'version_number', 'errata_ids']
esgfpid.utils.check_presence_of_mandatory_args(args, mandatory_args)
esgfpid.utils.check_noneness_of_mandatory_args(args, mandatory_args)
dataset_handle = self.__get_dataset_handle(args)
errata_ids = self.__get_errata_ids_as_list(args)
message = self.__make_remove_message(errata_ids, dataset_handle, args['drs_id'], args['version_number'])
self.__send_message_to_queue(message)
loginfo(LOGGER, 'Requesting to remove errata ids "%s" from dataset "%s".', ', '.join(errata_ids), dataset_handle)
logdebug(LOGGER, 'Removing errata ids... done')
def __get_errata_ids_as_list(self, args):
errata_ids = args['errata_ids']
if type(errata_ids) == type([]):
return errata_ids
else:
return [errata_ids]
def __get_dataset_handle(self, args):
dataset_handle = esgfpid.utils.make_handle_from_drsid_and_versionnumber(
drs_id=args['drs_id'],
version_number=args['version_number'],
prefix=self.__prefix
)
return dataset_handle
def __make_add_message(self, errata_ids, dataset_handle, drs_id, vers_number):
message_timestamp = esgfpid.utils.get_now_utc_as_formatted_string()
message = esgfpid.assistant.messages.add_errata_ids_message(
dataset_handle = dataset_handle,
timestamp = message_timestamp,
errata_ids = errata_ids,
drs_id=drs_id,
version_number=vers_number
)
return message
def __make_remove_message(self, errata_ids, dataset_handle, drs_id, vers_number):
message_timestamp = esgfpid.utils.get_now_utc_as_formatted_string()
message = esgfpid.assistant.messages.remove_errata_ids_message(
dataset_handle = dataset_handle,
timestamp = message_timestamp,
errata_ids = errata_ids,
drs_id=drs_id,
version_number=vers_number
)
return message
def __send_message_to_queue(self, message):
self.__coupler.send_message_to_queue(message)
|
{
"content_hash": "ca3ad25c06d7fe129d0c043950f259ee",
"timestamp": "",
"source": "github",
"line_count": 95,
"max_line_length": 121,
"avg_line_length": 39.4,
"alnum_prop": 0.6329147742452578,
"repo_name": "IS-ENES-Data/esgf-pid",
"id": "9a18a058c7c2ad3972ddedb61a560ef38b8d3591",
"size": "3743",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "esgfpid/assistant/errata.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "774918"
}
],
"symlink_target": ""
}
|
import argparse
from aws_services import cloudtrail
from aws_services import cloudwatch
from aws_services import ec2
from aws_services import iam
from aws_services import s3
from section import one, two, three, four
# from section.two import section_two
# from section.three import section_three
# from section.four import section_four
def main():
CT = cloudtrail.CloudTrailService()
CW = cloudwatch.CloudWatchService()
EC2 = ec2.EC2Service()
IAM = iam.IAMService()
S3 = s3.S3Service()
if 1 in args.section:
first_section = one.SectionOne(IAM)
first_section.cis_check()
if 2 in args.section:
second_section = two.SectionTwo(CT, S3)
second_section.cis_check()
if 3 in args.section:
third_section = three.SectionThree(CT, CW)
third_section.cis_check()
if 4 in args.section:
fourth_section = four.SectionFour(EC2)
fourth_section.cis_check()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"-d",
"--debug",
help="output debug messages.",
action="store_true"
)
parser.add_argument(
"section",
help="select which section to test.",
nargs="+",
type=int
)
args = parser.parse_args()
main()
|
{
"content_hash": "34d88744213096d01b1ce6ea40c67bf2",
"timestamp": "",
"source": "github",
"line_count": 53,
"max_line_length": 50,
"avg_line_length": 24.867924528301888,
"alnum_prop": 0.6365705614567526,
"repo_name": "SpoonBoy/python-aws-foundation",
"id": "a57daa0507aa952de81519606e5c3dc0495d2d41",
"size": "1318",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "aws-foundation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "23313"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import unittest
import responses
import requests
from tapioca.exceptions import (
ClientError, ServerError, ResponseProcessException,
TapiocaException)
from tapioca.tapioca import TapiocaClient
from tests.client import TesterClient, TesterClientAdapter
class TestTapiocaException(unittest.TestCase):
def setUp(self):
self.wrapper = TesterClient()
@responses.activate
def test_exception_contain_tapioca_client(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"data": {"key": "value"}}',
status=400,
content_type='application/json')
try:
self.wrapper.test().get()
except TapiocaException as e:
exception = e
self.assertIs(exception.client.__class__, TapiocaClient)
@responses.activate
def test_exception_contain_status_code(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"data": {"key": "value"}}',
status=400,
content_type='application/json')
try:
self.wrapper.test().get()
except TapiocaException as e:
exception = e
self.assertIs(exception.status_code, 400)
@responses.activate
def test_exception_message(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"data": {"key": "value"}}',
status=400,
content_type='application/json')
try:
self.wrapper.test().get()
except TapiocaException as e:
exception = e
self.assertEqual(str(exception), 'response status code: 400')
class TestExceptions(unittest.TestCase):
def setUp(self):
self.wrapper = TesterClient()
@responses.activate
def test_adapter_raises_response_process_exception_on_400s(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"erros": "Server Error"}',
status=400,
content_type='application/json')
response = requests.get(self.wrapper.test().data)
with self.assertRaises(ResponseProcessException):
TesterClientAdapter().process_response(response)
@responses.activate
def test_adapter_raises_response_process_exception_on_500s(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"erros": "Server Error"}',
status=500,
content_type='application/json')
response = requests.get(self.wrapper.test().data)
with self.assertRaises(ResponseProcessException):
TesterClientAdapter().process_response(response)
@responses.activate
def test_raises_request_error(self):
responses.add(responses.GET, self.wrapper.test().data,
body='{"data": {"key": "value"}}',
status=400,
content_type='application/json')
with self.assertRaises(ClientError):
self.wrapper.test().get()
@responses.activate
def test_raises_server_error(self):
responses.add(responses.GET, self.wrapper.test().data,
status=500,
content_type='application/json')
with self.assertRaises(ServerError):
self.wrapper.test().get()
|
{
"content_hash": "3dc63a1b95d171d96b201e49ea07f1cd",
"timestamp": "",
"source": "github",
"line_count": 110,
"max_line_length": 69,
"avg_line_length": 32,
"alnum_prop": 0.5900568181818182,
"repo_name": "romulocollopy/tapioca-wrapper",
"id": "7a91e6f420a116b612579d4c1a8aabfbdd159469",
"size": "3537",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "tests/test_exceptions.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "1499"
},
{
"name": "Python",
"bytes": "44283"
}
],
"symlink_target": ""
}
|
from django import forms
from django.contrib.auth.models import User, Group
from django.contrib.auth.forms import UserCreationForm
from django.utils.translation import ugettext_lazy as _
from mks.models import Party
from models import NOTIFICATION_PERIOD_CHOICES
from persons.models import GENDER_CHOICES
class RegistrationForm(UserCreationForm):
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^(?u)[ \w.@+-]{4,}$',
help_text = _("Required. 4-30 characters (only letters, numbers spaces and @/./+/-/_ characters)."),
error_message = _("Required. 4-30 characters (only letters, numbers spaces and @/./+/-/_ characters)."))
email_notification = forms.ChoiceField(choices = NOTIFICATION_PERIOD_CHOICES, initial="W",
label = _('E-Mail Notifications'),
help_text = _('Should we send you e-mail notification about updates to things you follow on the site?'))
party = forms.ModelChoiceField(Party.objects.all(),
required = False,
label = _('(citizen) party member?'),
help_text = _('Are you a member of any party?'))
class Meta:
model = User
fields = ('username', 'email')
def save(self, commit=True):
user = super(RegistrationForm, self).save(commit=False)
user.email = self.cleaned_data['email']
if commit:
user.save()
profile = user.get_profile()
profile.email_notification = self.cleaned_data['email_notification']
profile.party = self.cleaned_data['party']
profile.save()
return user
def clean_email(self):
"Can't use already existing emails for registration"
email = self.cleaned_data['email']
exists = User.objects.filter(email=email).count()
if exists:
raise forms.ValidationError(_('This email is already taken'))
return email
class EditProfileForm(forms.Form):
email = forms.EmailField(required=False ,label=_(u'email address'),
help_text = _("We don't spam, and don't show your email to anyone")
)
username = forms.RegexField(label=_("Username"), max_length=30, regex=r'^(?u)[ \w.@+-]{4,}$',
help_text = _("Required. 4-30 characters (only letters, numbers spaces and @/./+/-/_ characters)."),
error_message = _("Required. 4-30 characters (only letters, numbers spaces and @/./+/-/_ characters)."))
public_profile = forms.BooleanField(label=_('Public profile'),
help_text = _('Allow other users to view your profile on the site'),
required=False)
gender = forms.ChoiceField(choices = GENDER_CHOICES,
label=_('Gender'))
description = forms.CharField(required=False,
label=_('Tell us and other users bit about yourself'),
widget=forms.Textarea(attrs={'rows':3}))
email_notification = forms.ChoiceField(choices = NOTIFICATION_PERIOD_CHOICES,
label = _('E-Mail Notifications'),
help_text = _('Should we send you e-mail notification about updates to things you follow on the site?'))
party = forms.ModelChoiceField(Party.objects.all(),
required = False,
label = _('(citizen) party member?'),
help_text = _('Are you a member of any party?'))
def __init__(self, user=None, *args, **kwargs):
super(EditProfileForm, self).__init__(*args, **kwargs)
self.user = user
self.userprofile = user.get_profile()
if self.user:
self.initial = {'username': user.username,
'first_name':user.first_name,
'last_name':user.last_name,
'email': user.email,
'public_profile': self.userprofile.public_profile,
'gender': self.userprofile.gender,
'description': self.userprofile.description,
'email_notification': self.userprofile.email_notification,
'party': self.userprofile.party,
}
self.has_email = True if user.email else False
g, created = Group.objects.get_or_create(name='Valid Email')
self.valid_email = g in self.user.groups.all()
def clean_username(self):
data = self.cleaned_data['username']
if data == self.user.username:
return data
try:
User.objects.get(username = data)
raise forms.ValidationError("This username is already taken.")
except User.DoesNotExist:
return data
def clean(self):
cleaned_data = self.cleaned_data
return cleaned_data
def save(self, commit = True):
user = self.user
if self.cleaned_data['email'] != None:
if user.email != self.cleaned_data['email']: #email changed - user loses comment permissions, until he validates email again.
g = Group.objects.get(name='Valid Email')
user.groups.remove(g)
user.email = self.cleaned_data['email']
user.username = self.cleaned_data['username']
self.userprofile.gender = self.cleaned_data['gender']
self.userprofile.public_profile = self.cleaned_data['public_profile']
self.userprofile.description = self.cleaned_data['description']
self.userprofile.email_notification = self.cleaned_data['email_notification']
self.userprofile.party = self.cleaned_data['party']
if commit:
user.save()
self.userprofile.save()
return user
|
{
"content_hash": "b9ea4284e48583bfe886ab734542d672",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 147,
"avg_line_length": 49.02439024390244,
"alnum_prop": 0.5651741293532339,
"repo_name": "jspan/Open-Knesset",
"id": "ec766d5c7447c10367f2274800e58e7f0839ea68",
"size": "6030",
"binary": false,
"copies": "8",
"ref": "refs/heads/master",
"path": "user/forms.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "345576"
},
{
"name": "HTML",
"bytes": "688858"
},
{
"name": "JavaScript",
"bytes": "214459"
},
{
"name": "Python",
"bytes": "3920008"
},
{
"name": "Shell",
"bytes": "203"
}
],
"symlink_target": ""
}
|
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
from collections import defaultdict
from pants.engine.nodes import Noop, Return, SelectNode, TaskNode
class GraphValidator(object):
"""A concrete object that implements validation of a completed product graph.
TODO: The name "literal" here is overloaded with SelectLiteral, which is a better fit
for the name. The values here are more "user-specified/configured" than "literal".
This class offers the abstraction to allow plugin implementers to install their own.
TODO: The current implementation is slow, and ideally validation _during_ graph
should run while subgraphs are completing. This would limit their performance
impact, and allow for better locality of errors.
"""
def __init__(self, symbol_table_cls):
self._literal_types = dict()
for name, cls in symbol_table_cls.table().items():
self._literal_types[cls] = name
def _collect_consumed_inputs(self, product_graph, root):
"""Walks successful nodes under the root for its subject, and returns all products used."""
consumed_inputs = set()
# Walk into successful nodes for the same subject under this root.
def predicate(node, state):
return root.subject == node.subject and type(state) is Return
# If a product was successfully selected, record it.
for node, _ in product_graph.walk([root], predicate=predicate):
if type(node) is SelectNode:
consumed_inputs.add(node.product)
return consumed_inputs
def _collect_partially_consumed_inputs(self, product_graph, consumed_inputs, root):
"""Walks below a failed node and collects cases where additional literal products could be used.
Returns:
dict(subject, dict(tuple(input_product, output_product), list(tuple(task, missing_products))))
"""
partials = defaultdict(lambda: defaultdict(list))
# Walk all nodes for the same subject under this root.
def predicate(node, state):
return root.subject == node.subject
for node, state in product_graph.walk([root], predicate=predicate):
# Look for unsatisfied TaskNodes with at least one unsatisfied dependency.
if type(node) is not TaskNode:
continue
if type(state) is not Noop:
continue
sub_deps = [d for d in product_graph.dependencies_of(node) if d.subject == root.subject]
missing_products = {d.product for d in sub_deps if type(product_graph.state(d)) is Noop}
if not missing_products:
continue
# If all unattainable products could have been specified as literal...
if any(product not in self._literal_types for product in missing_products):
continue
# There was at least one dep successfully (recursively) satisfied via a literal.
# TODO: this does multiple walks.
used_literal_deps = set()
for dep in sub_deps:
for product in self._collect_consumed_inputs(product_graph, dep):
if product in self._literal_types:
used_literal_deps.add(product)
if not used_literal_deps:
continue
# The partially consumed products were not fully consumed elsewhere.
if not (used_literal_deps - consumed_inputs):
continue
# Found a partially consumed input.
for used_literal_dep in used_literal_deps:
partials[node.subject][(used_literal_dep, node.product)].append((node.func, missing_products))
return partials
def validate(self, product_graph):
"""Finds 'subject roots' in the product graph and invokes validation on each of them."""
# Locate roots: those who do not have any dependents for the same subject.
roots = set()
for node, dependents in product_graph.dependents():
if any(d.subject == node.subject for d in dependents):
# Node had a dependent for its subject: was not a root.
continue
roots.add(node)
# Raise if there were any partially consumed inputs.
for root in roots:
consumed = self._collect_consumed_inputs(product_graph, root)
partials = self._collect_partially_consumed_inputs(product_graph, consumed, root)
if partials:
raise PartiallyConsumedInputsError.create(self._literal_types, partials)
class PartiallyConsumedInputsError(Exception):
"""No task was able to consume a particular literal product for a subject, although some tried.
In particular, this error allows for safe composition of configuration on targets (ie,
ThriftSources AND ThriftConfig), because if a task requires multiple inputs for a subject
but cannot find them, a useful error is raised.
TODO: Improve the error message in the presence of failures due to mismatched variants.
"""
@classmethod
def _msg(cls, inverted_symbol_table, partially_consumed_inputs):
def name(product):
return inverted_symbol_table[product]
for subject, tasks_and_inputs in partially_consumed_inputs.items():
yield '\nSome products were partially specified for `{}`:'.format(subject)
for ((input_product, output_product), tasks) in tasks_and_inputs.items():
yield ' To consume `{}` and produce `{}`:'.format(name(input_product), name(output_product))
for task, additional_inputs in tasks:
inputs_str = ' AND '.join('`{}`'.format(name(i)) for i in additional_inputs)
yield ' {} also needed ({})'.format(task.__name__, inputs_str)
@classmethod
def create(cls, inverted_symbol_table, partially_consumed_inputs):
msg = '\n'.join(cls._msg(inverted_symbol_table, partially_consumed_inputs))
return cls(msg)
|
{
"content_hash": "bbbee4f040675c7d3d8dd71c60e0719b",
"timestamp": "",
"source": "github",
"line_count": 125,
"max_line_length": 102,
"avg_line_length": 45.352,
"alnum_prop": 0.701887458105486,
"repo_name": "ity/pants",
"id": "57912c0f40dd75e9118e948b68be70bf7ae6cb32",
"size": "5816",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/engine/examples/graph_validator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C++",
"bytes": "781"
},
{
"name": "CSS",
"bytes": "9444"
},
{
"name": "GAP",
"bytes": "2459"
},
{
"name": "Gherkin",
"bytes": "919"
},
{
"name": "Go",
"bytes": "1526"
},
{
"name": "HTML",
"bytes": "75140"
},
{
"name": "Java",
"bytes": "402667"
},
{
"name": "JavaScript",
"bytes": "29992"
},
{
"name": "Protocol Buffer",
"bytes": "3783"
},
{
"name": "Python",
"bytes": "4960888"
},
{
"name": "Scala",
"bytes": "85556"
},
{
"name": "Shell",
"bytes": "58420"
},
{
"name": "Thrift",
"bytes": "2919"
}
],
"symlink_target": ""
}
|
from gym.envs.classic_control import PendulumEnv, CartPoleEnv
import numpy as np
# MuJoCo may not be installed.
HalfCheetahEnv = HopperEnv = None
try:
from gym.envs.mujoco import HalfCheetahEnv, HopperEnv
except Exception:
pass
class CartPoleWrapper(CartPoleEnv):
"""Wrapper for the CartPole-v1 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = batch * [pos, vel, angle, rotation_rate]
x = obs_next[:, 0]
theta = obs_next[:, 2]
# 1.0 if we are still on, 0.0 if we are terminated due to bounds
# (angular or x-axis) being breached.
rew = 1.0 - (
(x < -self.x_threshold)
| (x > self.x_threshold)
| (theta < -self.theta_threshold_radians)
| (theta > self.theta_threshold_radians)
).astype(np.float32)
return rew
class PendulumWrapper(PendulumEnv):
"""Wrapper for the Pendulum-v1 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
# obs = [cos(theta), sin(theta), dtheta/dt]
# To get the angle back from obs: atan2(sin(theta), cos(theta)).
theta = np.arctan2(np.clip(obs[:, 1], -1.0, 1.0), np.clip(obs[:, 0], -1.0, 1.0))
# Do everything in (B,) space (single theta-, action- and
# reward values).
a = np.clip(action, -self.max_torque, self.max_torque)[0]
costs = (
self.angle_normalize(theta) ** 2 + 0.1 * obs[:, 2] ** 2 + 0.001 * (a**2)
)
return -costs
@staticmethod
def angle_normalize(x):
return ((x + np.pi) % (2 * np.pi)) - np.pi
class HalfCheetahWrapper(HalfCheetahEnv or object):
"""Wrapper for the MuJoCo HalfCheetah-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
if obs.ndim == 2 and action.ndim == 2:
assert obs.shape == obs_next.shape
forward_vel = obs_next[:, 8]
ctrl_cost = 0.1 * np.sum(np.square(action), axis=1)
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
else:
forward_vel = obs_next[8]
ctrl_cost = 0.1 * np.square(action).sum()
reward = forward_vel - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
class HopperWrapper(HopperEnv or object):
"""Wrapper for the MuJoCo Hopper-v2 environment.
Adds an additional `reward` method for some model-based RL algos (e.g.
MB-MPO).
"""
def reward(self, obs, action, obs_next):
alive_bonus = 1.0
assert obs.ndim == 2 and action.ndim == 2
assert obs.shape == obs_next.shape and action.shape[0] == obs.shape[0]
vel = obs_next[:, 5]
ctrl_cost = 1e-3 * np.sum(np.square(action), axis=1)
reward = vel + alive_bonus - ctrl_cost
return np.minimum(np.maximum(-1000.0, reward), 1000.0)
if __name__ == "__main__":
env = PendulumWrapper()
env.reset()
for _ in range(100):
env.step(env.action_space.sample())
env.render()
|
{
"content_hash": "be2ecd54e299b03d2b05aaa4c03ab9eb",
"timestamp": "",
"source": "github",
"line_count": 104,
"max_line_length": 88,
"avg_line_length": 32.00961538461539,
"alnum_prop": 0.585761489936918,
"repo_name": "ray-project/ray",
"id": "845ddffc58a66fbdb73ebae4d8cc8fb7374cb7c9",
"size": "3329",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rllib/examples/env/mbmpo_env.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "37490"
},
{
"name": "C++",
"bytes": "5972422"
},
{
"name": "CSS",
"bytes": "10912"
},
{
"name": "Cython",
"bytes": "227477"
},
{
"name": "Dockerfile",
"bytes": "20210"
},
{
"name": "HTML",
"bytes": "30382"
},
{
"name": "Java",
"bytes": "1160849"
},
{
"name": "JavaScript",
"bytes": "1128"
},
{
"name": "Jinja",
"bytes": "6371"
},
{
"name": "Jupyter Notebook",
"bytes": "1615"
},
{
"name": "Makefile",
"bytes": "234"
},
{
"name": "PowerShell",
"bytes": "1114"
},
{
"name": "Python",
"bytes": "19539109"
},
{
"name": "Shell",
"bytes": "134583"
},
{
"name": "Starlark",
"bytes": "334862"
},
{
"name": "TypeScript",
"bytes": "190599"
}
],
"symlink_target": ""
}
|
import json
from django.test import TestCase
from django.core.exceptions import ObjectDoesNotExist
from .models import User
class TestLogin(TestCase):
def setUp(self):
user = User(username='testuser')
user.set_password('rightpassword')
user.save()
self.user_id = user.id
def test_login_invalid_password(self):
response = self.client.post('/accounts/login', {
'username': 'testuser',
'password': 'wrongpassword'
})
self.assertEquals(response.status_code, 400)
def test_login_invalid_username(self):
response = self.client.post('/accounts/login', {
'username': 'nottestuser',
'password': 'rightpassword'
})
self.assertEquals(response.status_code, 400)
def test_login_valid_username_and_password_status_code(self):
response = self.client.post('/accounts/login', {
'username': 'testuser',
'password': 'rightpassword'
})
self.assertEquals(response.status_code, 302)
def test_login_valid_username_and_password_updates_session(self):
self.client.post('/accounts/login', {
'username': 'testuser',
'password': 'rightpassword'
})
self.assertEquals(int(self.client.session['_auth_user_id']), self.user_id)
class TestRegistration(TestCase):
def setUp(self):
user = User(username="test")
user.set_password("testpassword")
user.is_admin = True
user.save()
self.user = user
self.client.login(username='test', password='testpassword')
def test_missing_username(self):
response = self.client.post('/accounts/register', {
'password': 'validpassword',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 400)
def test_missing_password(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 400)
def test_missing_email(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
})
self.assertEquals(response.status_code, 400)
def test_invalid_email(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'invalidemail',
})
self.assertEquals(response.status_code, 400)
def test_unique_username(self):
user = User(username='validuser')
user.set_password('rightpassword')
user.save()
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 400)
def test_unique_email(self):
user = User(username='anothervaliduser', email='valid@email.com')
user.set_password('rightpassword')
user.save()
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 400)
def test_valid(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 201)
def test_valid_creates_user(self):
self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
user = User.objects.get(username='validuser')
self.assertIsNotNone(user)
def test_defaults(self):
params = {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
}
self.client.post('/accounts/register', params)
user = User.objects.get(username='validuser')
self.assertEquals(user.email, params['email'])
self.assertEquals(user.first_name, '')
self.assertEquals(user.last_name, '')
self.assertEquals(user.is_admin, False)
def test_fields_set_correctly(self):
params = {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
}
self.client.post('/accounts/register', params)
user = User.objects.get(username='validuser')
self.assertEquals(user.email, params['email'])
self.assertEquals(user.first_name, '')
self.assertEquals(user.last_name, '')
def test_user_manager(self):
self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
user = User.objects.get(username='validuser')
self.assertEquals(user.manager, self.user)
def test_adding_admin_not_valid(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
'is_admin': True,
})
self.assertEquals(response.status_code, 400)
def test_adding_admin_valid(self):
user = User(username="super_test")
user.set_password("testpassword")
user.is_super_admin = True
user.save()
self.client.login(username='super_test', password='testpassword')
self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
'is_admin': True,
})
user = User.objects.get(username='validuser')
self.assertEquals(user.is_admin, True)
class RegistrationPermissionsTest(TestCase):
def test_create_not_logged_in(self):
response = self.client.post('/accounts/register', {
'username': 'validuser',
'password': 'validpassword',
'email': 'valid@email.com',
})
self.assertEquals(response.status_code, 403)
def test_user_is_not_admin(self):
user = User(username='validuser')
user.set_password('validpassword')
user.save()
self.client.login(username='validuser', password='validpassword')
response = self.client.post('/accounts/register', {
'username': 'newuser',
'password': 'newpassword',
'email': 'new@email.com',
})
self.assertEquals(response.status_code, 403)
class UserListView(TestCase):
def setUp(self):
users = []
users.append(User(username='superadmin', is_super_admin=True))
users.append(User(username='admin1', is_admin=True))
users.append(User(username='admin2', is_admin=True))
for user in users:
user.set_password('password')
user.save()
users.append(User(username='user1', manager=users[1]))
users.append(User(username='user2', manager=users[1]))
for user in users:
user.set_password('password')
user.save()
def test_super_admin(self):
self.client.login(username='superadmin', password='password')
response = self.client.get('/api/users/')
self.assertEquals(response.status_code, 200)
users = json.loads(response.content)
self.assertEquals(len(users), 5)
def test_admin(self):
self.client.login(username='admin1', password='password')
response = self.client.get('/api/users/')
self.assertEquals(response.status_code, 200)
users = json.loads(response.content)
self.assertEquals(len(users), 3)
class UserDetailView(TestCase):
def setUp(self):
users = []
users.append(User(username='superadmin', is_super_admin=True))
users.append(User(username='admin1', is_admin=True))
users.append(User(username='admin2', is_admin=True))
for user in users:
user.set_password('password')
user.save()
users.append(User(username='user1', manager=users[1]))
users.append(User(username='user2', manager=users[1]))
for user in users:
user.set_password('password')
user.save()
self.super_admin = users[0]
self.admin1 = users[1]
self.user1 = users[3]
def test_get_account(self):
self.client.login(username='superadmin', password='password')
response = self.client.get('/api/users/%s' % self.super_admin.id)
self.assertEquals(response.status_code, 200)
user = json.loads(response.content)
self.assertEquals(user['username'], self.super_admin.username)
def test_get_account_superadmin_logged_in_as_admin(self):
self.client.login(username='user1', password='password')
response = self.client.get('/api/users/%s' % self.super_admin.id)
self.assertEquals(response.status_code, 403)
def test_get_account_superadmin_direct_manager(self):
self.client.login(username='admin1', password='password')
response = self.client.get('/api/users/%s' % self.user1.id)
self.assertEquals(response.status_code, 200)
def test_cant_delete_myself(self):
self.client.login(username='admin1', password='password')
response = self.client.delete('/api/users/%s' % self.admin1.id)
self.assertEquals(response.status_code, 400)
def test_cant_delete_admin_with_children(self):
self.client.login(username='superadmin', password='password')
response = self.client.delete('/api/users/%s' % self.admin1.id)
self.assertEquals(response.status_code, 400)
class UserEditPassword(TestCase):
def setUp(self):
self.user = User(username='test')
self.user.set_password('test_password')
self.user.save()
self.client.login(username='test', password='test_password')
def test_user_can_change_his_password(self):
old_password = self.user.password
response = self.client.post('/accounts/edit', {
'id': self.user.id,
'password': 'new_test_password'
})
self.assertEquals(response.status_code, 200)
self.user = User.objects.get(id=self.user.id)
self.assertNotEqual(self.user.password, old_password)
class UserAuthToken(TestCase):
def test_user_has_token(self):
user = User(username="super_test")
user.set_password("testpassword")
user.is_super_admin = True
user.save()
try:
token = user.auth_token
self.assertIsNotNone(token.key)
except ObjectDoesNotExist, e:
self.fail(str(e))
def test_user_with_token(self):
# response = self.client.get('/api/users/')
# self.assertEquals(response.status_code, 403)
user = User(username="super_test")
user.set_password("testpassword")
user.is_super_admin = True
user.save()
token = user.auth_token.key
header = {
'HTTP_AUTHORIZATION': 'Token ' + token
}
response = self.client.get('/api/accounts/', {}, **header)
self.assertEquals(response.status_code, 200)
|
{
"content_hash": "bd0d379d47ff21718a8bc9f3a9a59bad",
"timestamp": "",
"source": "github",
"line_count": 342,
"max_line_length": 82,
"avg_line_length": 34.39181286549707,
"alnum_prop": 0.588930454004421,
"repo_name": "arrayexpress/ae_auto",
"id": "fc7ccee4a889229a8d6ad36ed7f2482e97b38727",
"size": "11762",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ae_web/accounts/tests.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "203"
},
{
"name": "CSS",
"bytes": "596162"
},
{
"name": "HTML",
"bytes": "62396"
},
{
"name": "JavaScript",
"bytes": "605485"
},
{
"name": "Makefile",
"bytes": "1574"
},
{
"name": "PowerShell",
"bytes": "939"
},
{
"name": "Python",
"bytes": "2169498"
},
{
"name": "Ruby",
"bytes": "1030"
}
],
"symlink_target": ""
}
|
import ctypes
import msvcrt
import os
import sys
import code
import windows
from .. import winproxy
from ..generated_def import windef
from ..generated_def.winstructs import *
# Function resolution !
def get_func_addr(dll_name, func_name):
# Load the DLL
ctypes.WinDLL(dll_name)
modules = windows.current_process.peb.modules
if not dll_name.lower().endswith(".dll"):
dll_name += ".dll"
mod = [x for x in modules if x.name == dll_name][0]
return mod.pe.exports[func_name]
def get_remote_func_addr(target, dll_name, func_name):
name_modules = [m for m in target.peb.modules if m.name == dll_name]
if not len(name_modules):
raise ValueError("Module <{0}> not loaded in target <{1}>".format(dll_name, target))
mod = name_modules[0]
return mod.pe.exports[func_name]
def is_wow_64(hProcess):
try:
fnIsWow64Process = get_func_addr("kernel32.dll", "IsWow64Process")
except winproxy.Kernel32Error:
return False
IsWow64Process = ctypes.WINFUNCTYPE(BOOL, HANDLE, ctypes.POINTER(BOOL))(fnIsWow64Process)
Wow64Process = BOOL()
res = IsWow64Process(hProcess, ctypes.byref(Wow64Process))
if res:
return bool(Wow64Process)
raise ctypes.WinError()
def create_file_from_handle(handle, mode="r"):
"""Return a Python :class:`file` arround a windows HANDLE"""
fd = msvcrt.open_osfhandle(handle, os.O_TEXT)
return os.fdopen(fd, mode, 0)
def get_handle_from_file(f):
"""Get the windows HANDLE of a python :class:`file`"""
return msvcrt.get_osfhandle(f.fileno())
def create_console():
"""Create a new console displaying STDOUT
Useful in injection of GUI process"""
winproxy.AllocConsole()
stdout_handle = winproxy.GetStdHandle(windef.STD_OUTPUT_HANDLE)
console_stdout = create_file_from_handle(stdout_handle, "w")
sys.stdout = console_stdout
stdin_handle = winproxy.GetStdHandle(windef.STD_INPUT_HANDLE)
console_stdin = create_file_from_handle(stdin_handle, "r+")
sys.stdin = console_stdin
stderr_handle = winproxy.GetStdHandle(windef.STD_ERROR_HANDLE)
console_stderr = create_file_from_handle(stderr_handle, "w")
sys.stderr = console_stderr
def create_process(path, show_windows=False):
proc_info = PROCESS_INFORMATION()
lpStartupInfo = None
if show_windows:
StartupInfo = STARTUPINFOA()
StartupInfo.cb = ctypes.sizeof(StartupInfo)
StartupInfo.dwFlags = 0
lpStartupInfo = ctypes.byref(StartupInfo)
windows.winproxy.CreateProcessA(path, lpProcessInformation=ctypes.byref(proc_info), lpStartupInfo=lpStartupInfo)
proc = [p for p in windows.system.processes if p.pid == proc_info.dwProcessId][0]
return proc
def enable_privilege(lpszPrivilege, bEnablePrivilege):
"""Enable of disable a privilege: enable_privilege(SE_DEBUG_NAME, True)"""
tp = TOKEN_PRIVILEGES()
luid = LUID()
hToken = HANDLE()
winproxy.OpenProcessToken(winproxy.GetCurrentProcess(), TOKEN_ALL_ACCESS, byref(hToken))
winproxy.LookupPrivilegeValueA(None, lpszPrivilege, byref(luid))
tp.PrivilegeCount = 1
tp.Privileges[0].Luid = luid
if bEnablePrivilege:
tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED
else:
tp.Privileges[0].Attributes = 0
winproxy.AdjustTokenPrivileges(hToken, False, byref(tp), sizeof(TOKEN_PRIVILEGES))
winproxy.CloseHandle(hToken)
if winproxy.GetLastError() == windef.ERROR_NOT_ALL_ASSIGNED:
raise ValueError("Failed to get privilege {0}".format(lpszPrivilege))
return True
def check_is_elevated():
"""Return True if process is Admin"""
hToken = HANDLE()
elevation = TOKEN_ELEVATION()
cbsize = DWORD()
winproxy.OpenProcessToken(winproxy.GetCurrentProcess(), TOKEN_ALL_ACCESS, byref(hToken))
winproxy.GetTokenInformation(hToken, TokenElevation, byref(elevation), sizeof(elevation), byref(cbsize))
winproxy.CloseHandle(hToken)
return elevation.TokenIsElevated
def check_debug():
"""Check that kernel is in debug mode
beware of NOUMEX (https://msdn.microsoft.com/en-us/library/windows/hardware/ff556253(v=vs.85).aspx#_______noumex______)"""
hkresult = HKEY()
cbsize = DWORD(1024)
bufferres = (c_char * cbsize.value)()
winproxy.RegOpenKeyExA(HKEY_LOCAL_MACHINE, "System\\CurrentControlSet\\Control", 0, KEY_READ, byref(hkresult))
winproxy.RegGetValueA(hkresult, None, "SystemStartOptions", RRF_RT_REG_SZ, None, byref(bufferres), byref(cbsize))
winproxy.RegCloseKey(hkresult)
control = bufferres[:]
if "DEBUG" not in control:
# print "[-] Enable debug boot!"
# print "> bcdedit /debug on"
return False
if "DEBUG=NOUMEX" not in control:
pass
# print "[*] Warning noumex not set!"
# print "> bcdedit /set noumex on"
return True
class FixedInteractiveConsole(code.InteractiveConsole):
def raw_input(self, prompt=">>>"):
sys.stdout.write(prompt)
return raw_input("")
def pop_shell():
"""Pop a console with an InterativeConsole"""
create_console()
FixedInteractiveConsole(locals()).interact()
def get_kernel_modules():
cbsize = DWORD()
winproxy.NtQuerySystemInformation(SystemModuleInformation, None, 0, byref(cbsize))
raw_buffer = (cbsize.value * c_char)()
buffer = SYSTEM_MODULE_INFORMATION.from_address(ctypes.addressof(raw_buffer))
winproxy.NtQuerySystemInformation(SystemModuleInformation, byref(raw_buffer), sizeof(raw_buffer), byref(cbsize))
modules = (SYSTEM_MODULE * buffer.ModulesCount).from_address(addressof(buffer) + SYSTEM_MODULE_INFORMATION.Modules.offset)
return list(modules)
class VirtualProtected(object):
"""A context manager usable like `VirtualProtect` that will restore the old protection at exit
Example::
with utils.VirtualProtected(IATentry.addr, ctypes.sizeof(PVOID), windef.PAGE_EXECUTE_READWRITE):
IATentry.value = 0x42424242
"""
def __init__(self, addr, size, new_protect):
if (addr % 0x1000):
addr = addr - addr % 0x1000
self.addr = addr
self.size = size
self.new_protect = new_protect
def __enter__(self):
self.old_protect = DWORD()
winproxy.VirtualProtect(self.addr, self.size, self.new_protect, ctypes.byref(self.old_protect))
return self
def __exit__(self, exc_type, exc_value, traceback):
winproxy.VirtualProtect(self.addr, self.size, self.old_protect.value, ctypes.byref(self.old_protect))
return False
class DisableWow64FsRedirection(object):
"""A context manager that disable the Wow64 Fs Redirection"""
def __enter__(self):
self.OldValue = PVOID()
winproxy.Wow64DisableWow64FsRedirection(ctypes.byref(self.OldValue))
return self
def __exit__(self, exc_type, exc_value, traceback):
winproxy.Wow64RevertWow64FsRedirection(self.OldValue)
return False
|
{
"content_hash": "c82d575e0dd2da903aa795e46b8868c3",
"timestamp": "",
"source": "github",
"line_count": 199,
"max_line_length": 129,
"avg_line_length": 35.2713567839196,
"alnum_prop": 0.684285510756518,
"repo_name": "sogeti-esec-lab/LKD",
"id": "1a688412a3db94e5aa0c2e4f45550052b3d82fe6",
"size": "7019",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "windows/utils/winutils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "763807"
}
],
"symlink_target": ""
}
|
"""Tests for tensor2tensor.utils.bleu_hook."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import six
from tensor2tensor.data_generators import text_encoder
from tensor2tensor.utils import bleu_hook
import tensorflow as tf
class BleuHookTest(tf.test.TestCase):
def testComputeBleuEqual(self):
translation_corpus = [[1, 2, 3]]
reference_corpus = [[1, 2, 3]]
bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus)
actual_bleu = 1.0
self.assertEqual(bleu, actual_bleu)
def testComputeNotEqual(self):
translation_corpus = [[1, 2, 3, 4]]
reference_corpus = [[5, 6, 7, 8]]
bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus)
# The smoothing prevents 0 for small corpora
actual_bleu = 0.0798679
self.assertAllClose(bleu, actual_bleu, atol=1e-03)
def testComputeMultipleBatch(self):
translation_corpus = [[1, 2, 3, 4], [5, 6, 7, 0]]
reference_corpus = [[1, 2, 3, 4], [5, 6, 7, 10]]
bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus)
actual_bleu = 0.7231
self.assertAllClose(bleu, actual_bleu, atol=1e-03)
def testComputeMultipleNgrams(self):
reference_corpus = [[1, 2, 1, 13], [12, 6, 7, 4, 8, 9, 10]]
translation_corpus = [[1, 2, 1, 3], [5, 6, 7, 4]]
bleu = bleu_hook.compute_bleu(reference_corpus, translation_corpus)
actual_bleu = 0.3436
self.assertAllClose(bleu, actual_bleu, atol=1e-03)
def testBleuTokenize(self):
self.assertEqual(bleu_hook.bleu_tokenize(u"hi, “there”"),
[u"hi", u",", u"“", u"there", u"”"])
def _generate_test_data(self, name, hyps, refs):
"""Writes test data to temporary files.
Args:
name: str, used for making temp files unique across tests
hyps: list of unicode strings serving as translation hypotheses
refs: list of unicode strings serving as references
Returns:
hyp_file: path to temporary file containing the hypotheses
refs_file: path to temporary file containing the references
"""
assert len(hyps) == len(refs)
hyp_file = os.path.join(tempfile.gettempdir(), "{}.hyps".format(name))
refs_file = os.path.join(tempfile.gettempdir(), "{}.refs".format(name))
for filename, items in zip([hyp_file, refs_file], [hyps, refs]):
with (open(filename, "wb")
if six.PY2 else open(filename, "w", encoding="utf-8")) as out:
content = text_encoder.unicode_to_native(u"\n".join(items))
out.write(content)
return hyp_file, refs_file
def testBleuWrapper(self):
hyp_filename, ref_filename = self._generate_test_data(
"standard", [u"a b a c", u"e f g d"], [u"a b a z", u"y f g d k l m"])
bleu = bleu_hook.bleu_wrapper(ref_filename, hyp_filename)
actual_bleu = 0.3436
self.assertAllClose(bleu, actual_bleu, atol=1e-03)
def testBleuWrapperWithUnicodeLineSeparator(self):
hyp_filename, ref_filename = self._generate_test_data(
"unicode-linesep", [u"a b a c", u"e f \u2028 d"],
[u"a b a z", u"y f g d k l m"])
bleu = bleu_hook.bleu_wrapper(ref_filename, hyp_filename)
actual_bleu = 0.2638
self.assertAllClose(bleu, actual_bleu, atol=1e-03)
if __name__ == "__main__":
tf.test.main()
|
{
"content_hash": "64425cc7c3aed4f53000d21ee6146c43",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 77,
"avg_line_length": 36.505494505494504,
"alnum_prop": 0.658639373871162,
"repo_name": "mlperf/training_results_v0.5",
"id": "999a240f8101596543f2ca04e6219b1741604f70",
"size": "3951",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "v0.5.0/google/cloud_v2.8/gnmt-tpuv2-8/code/gnmt/model/t2t/tensor2tensor/utils/bleu_hook_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "5720"
},
{
"name": "C++",
"bytes": "1288180"
},
{
"name": "CMake",
"bytes": "40880"
},
{
"name": "CSS",
"bytes": "32420"
},
{
"name": "Cuda",
"bytes": "1362093"
},
{
"name": "Dockerfile",
"bytes": "19488"
},
{
"name": "Go",
"bytes": "1088660"
},
{
"name": "HTML",
"bytes": "19756888"
},
{
"name": "Java",
"bytes": "45405"
},
{
"name": "JavaScript",
"bytes": "302838"
},
{
"name": "Jupyter Notebook",
"bytes": "9104667"
},
{
"name": "Lua",
"bytes": "4430"
},
{
"name": "Makefile",
"bytes": "3652"
},
{
"name": "Python",
"bytes": "31508548"
},
{
"name": "Scala",
"bytes": "106211"
},
{
"name": "Shell",
"bytes": "409745"
}
],
"symlink_target": ""
}
|
"""Constants for the ezviz integration."""
DOMAIN = "ezviz"
MANUFACTURER = "Ezviz"
# Configuration
ATTR_SERIAL = "serial"
CONF_CAMERAS = "cameras"
CONF_FFMPEG_ARGUMENTS = "ffmpeg_arguments"
ATTR_HOME = "HOME_MODE"
ATTR_AWAY = "AWAY_MODE"
ATTR_TYPE_CLOUD = "EZVIZ_CLOUD_ACCOUNT"
ATTR_TYPE_CAMERA = "CAMERA_ACCOUNT"
# Services data
DIR_UP = "up"
DIR_DOWN = "down"
DIR_LEFT = "left"
DIR_RIGHT = "right"
ATTR_ENABLE = "enable"
ATTR_DIRECTION = "direction"
ATTR_SPEED = "speed"
ATTR_LEVEL = "level"
ATTR_TYPE = "type_value"
# Service names
SERVICE_PTZ = "ptz"
SERVICE_ALARM_TRIGER = "sound_alarm"
SERVICE_WAKE_DEVICE = "wake_device"
SERVICE_ALARM_SOUND = "alarm_sound"
SERVICE_DETECTION_SENSITIVITY = "set_alarm_detection_sensibility"
# Defaults
EU_URL = "apiieu.ezvizlife.com"
RUSSIA_URL = "apirus.ezvizru.com"
DEFAULT_CAMERA_USERNAME = "admin"
DEFAULT_RTSP_PORT = 554
DEFAULT_TIMEOUT = 25
DEFAULT_FFMPEG_ARGUMENTS = ""
# Data
DATA_COORDINATOR = "coordinator"
DATA_UNDO_UPDATE_LISTENER = "undo_update_listener"
|
{
"content_hash": "1ee8e3a08d820ca7ffb4dc4f9aac2428",
"timestamp": "",
"source": "github",
"line_count": 43,
"max_line_length": 65,
"avg_line_length": 23.558139534883722,
"alnum_prop": 0.7265547877591313,
"repo_name": "Danielhiversen/home-assistant",
"id": "ec1471d8bc4cd1cdc28c082006b8046879968ea9",
"size": "1013",
"binary": false,
"copies": "7",
"ref": "refs/heads/dev",
"path": "homeassistant/components/ezviz/const.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2443"
},
{
"name": "Python",
"bytes": "36870185"
},
{
"name": "Shell",
"bytes": "4910"
}
],
"symlink_target": ""
}
|
"""
Support for LiteJet switch.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/switch.litejet/
"""
import logging
import homeassistant.components.litejet as litejet
from homeassistant.components.switch import SwitchDevice
DEPENDENCIES = ['litejet']
ATTR_NUMBER = 'number'
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the LiteJet switch platform."""
litejet_ = hass.data['litejet_system']
devices = []
for i in litejet_.button_switches():
name = litejet_.get_switch_name(i)
if not litejet.is_ignored(hass, name):
devices.append(LiteJetSwitch(hass, litejet_, i, name))
add_devices(devices, True)
class LiteJetSwitch(SwitchDevice):
"""Representation of a single LiteJet switch."""
def __init__(self, hass, lj, i, name):
"""Initialize a LiteJet switch."""
self._hass = hass
self._lj = lj
self._index = i
self._state = False
self._name = name
lj.on_switch_pressed(i, self._on_switch_pressed)
lj.on_switch_released(i, self._on_switch_released)
def _on_switch_pressed(self):
_LOGGER.debug("Updating pressed for %s", self._name)
self._state = True
self.schedule_update_ha_state()
def _on_switch_released(self):
_LOGGER.debug("Updating released for %s", self._name)
self._state = False
self.schedule_update_ha_state()
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return if the switch is pressed."""
return self._state
@property
def should_poll(self):
"""Return that polling is not necessary."""
return False
@property
def device_state_attributes(self):
"""Return the device-specific state attributes."""
return {
ATTR_NUMBER: self._index
}
def turn_on(self, **kwargs):
"""Press the switch."""
self._lj.press_switch(self._index)
def turn_off(self, **kwargs):
"""Release the switch."""
self._lj.release_switch(self._index)
|
{
"content_hash": "477fdd051e1e633fa69c1b7ece0e5778",
"timestamp": "",
"source": "github",
"line_count": 83,
"max_line_length": 74,
"avg_line_length": 27.301204819277107,
"alnum_prop": 0.6213592233009708,
"repo_name": "LinuxChristian/home-assistant",
"id": "1e7c46733ad431cf90c2f9015d474e10f1ca869b",
"size": "2266",
"binary": false,
"copies": "12",
"ref": "refs/heads/dev",
"path": "homeassistant/components/switch/litejet.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "13788"
},
{
"name": "HTML",
"bytes": "1733802"
},
{
"name": "JavaScript",
"bytes": "15192"
},
{
"name": "Python",
"bytes": "7415265"
},
{
"name": "Ruby",
"bytes": "517"
},
{
"name": "Shell",
"bytes": "15154"
}
],
"symlink_target": ""
}
|
from setuptools import setup
readme = open('README.rst').read()
history = open('CHANGES.rst').read().replace('.. :changelog:', '')
setup(
name='sqlalchemy-redshift',
version='0.2.1.dev0',
description='Amazon Redshift Dialect for sqlalchemy',
long_description=readme + '\n\n' + history,
author='Matt George',
author_email='mgeorge@gmail.com',
maintainer='Thomas Grainger',
maintainer_email='sqlalchemy-redshift@graingert.co.uk',
license="MIT",
url='https://github.com/graingert/redshift_sqlalchemy',
packages=['redshift_sqlalchemy'],
package_data={'redshift_sqlalchemy': ['redshift-ssl-ca-cert.pem']},
install_requires=[
'psycopg2>=2.5',
# requires sqlalchemy.sql.base.DialectKWArgs.dialect_options, new in
# version 0.9.2
'SQLAlchemy>=0.9.2',
],
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
"Programming Language :: Python",
],
entry_points={
'sqlalchemy.dialects': [
'redshift = redshift_sqlalchemy.dialect:RedshiftDialect',
'redshift.psycopg2 = redshift_sqlalchemy.dialect:RedshiftDialect',
]
},
)
|
{
"content_hash": "c46f560198c929729254dd14aec49c2b",
"timestamp": "",
"source": "github",
"line_count": 39,
"max_line_length": 78,
"avg_line_length": 34.35897435897436,
"alnum_prop": 0.6283582089552239,
"repo_name": "jklukas/redshift_sqlalchemy",
"id": "df88571634b742173d2143e8cc6126ef011e1672",
"size": "1340",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "80348"
}
],
"symlink_target": ""
}
|
import goal
import challenge
import res_users
import badge
|
{
"content_hash": "547745e36b86e718c6fe4f5ad77591eb",
"timestamp": "",
"source": "github",
"line_count": 4,
"max_line_length": 16,
"avg_line_length": 14.75,
"alnum_prop": 0.847457627118644,
"repo_name": "vileopratama/vitech",
"id": "0b9350c82755d0a85e5fea6640557e1b23384f36",
"size": "159",
"binary": false,
"copies": "48",
"ref": "refs/heads/master",
"path": "src/addons/gamification/models/__init__.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "9611"
},
{
"name": "CSS",
"bytes": "2125999"
},
{
"name": "HTML",
"bytes": "252393"
},
{
"name": "Java",
"bytes": "1840167"
},
{
"name": "JavaScript",
"bytes": "6176224"
},
{
"name": "Makefile",
"bytes": "19072"
},
{
"name": "Mako",
"bytes": "7659"
},
{
"name": "NSIS",
"bytes": "16782"
},
{
"name": "Python",
"bytes": "9438805"
},
{
"name": "Ruby",
"bytes": "220"
},
{
"name": "Shell",
"bytes": "22312"
},
{
"name": "Vim script",
"bytes": "406"
},
{
"name": "XSLT",
"bytes": "11489"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from moto.core.responses import BaseResponse
from moto.ec2.utils import filters_from_querystring
def process_rules_from_querystring(querystring):
try:
group_name_or_id = querystring.get('GroupName')[0]
except:
group_name_or_id = querystring.get('GroupId')[0]
ip_protocol = querystring.get('IpPermissions.1.IpProtocol', [None])[0]
from_port = querystring.get('IpPermissions.1.FromPort', [None])[0]
to_port = querystring.get('IpPermissions.1.ToPort', [None])[0]
ip_ranges = []
for key, value in querystring.items():
if 'IpPermissions.1.IpRanges' in key:
ip_ranges.append(value[0])
source_groups = []
source_group_ids = []
for key, value in querystring.items():
if 'IpPermissions.1.Groups.1.GroupId' in key:
source_group_ids.append(value[0])
elif 'IpPermissions.1.Groups' in key:
source_groups.append(value[0])
return (group_name_or_id, ip_protocol, from_port, to_port, ip_ranges, source_groups, source_group_ids)
class SecurityGroups(BaseResponse):
def authorize_security_group_egress(self):
raise NotImplementedError('SecurityGroups.authorize_security_group_egress is not yet implemented')
def authorize_security_group_ingress(self):
self.ec2_backend.authorize_security_group_ingress(*process_rules_from_querystring(self.querystring))
return AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE
def create_security_group(self):
name = self.querystring.get('GroupName')[0]
description = self.querystring.get('GroupDescription', [None])[0]
vpc_id = self.querystring.get("VpcId", [None])[0]
group = self.ec2_backend.create_security_group(name, description, vpc_id=vpc_id)
template = self.response_template(CREATE_SECURITY_GROUP_RESPONSE)
return template.render(group=group)
def delete_security_group(self):
# TODO this should raise an error if there are instances in the group. See http://docs.aws.amazon.com/AWSEC2/latest/APIReference/ApiReference-query-DeleteSecurityGroup.html
name = self.querystring.get('GroupName')
sg_id = self.querystring.get('GroupId')
if name:
self.ec2_backend.delete_security_group(name[0])
elif sg_id:
self.ec2_backend.delete_security_group(group_id=sg_id[0])
return DELETE_GROUP_RESPONSE
def describe_security_groups(self):
groupnames = self._get_multi_param("GroupName")
group_ids = self._get_multi_param("GroupId")
filters = filters_from_querystring(self.querystring)
groups = self.ec2_backend.describe_security_groups(
group_ids=group_ids,
groupnames=groupnames,
filters=filters
)
template = self.response_template(DESCRIBE_SECURITY_GROUPS_RESPONSE)
return template.render(groups=groups)
def revoke_security_group_egress(self):
raise NotImplementedError('SecurityGroups.revoke_security_group_egress is not yet implemented')
def revoke_security_group_ingress(self):
self.ec2_backend.revoke_security_group_ingress(*process_rules_from_querystring(self.querystring))
return REVOKE_SECURITY_GROUP_INGRESS_REPONSE
CREATE_SECURITY_GROUP_RESPONSE = """<CreateSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
<groupId>{{ group.id }}</groupId>
</CreateSecurityGroupResponse>"""
DELETE_GROUP_RESPONSE = """<DeleteSecurityGroupResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</DeleteSecurityGroupResponse>"""
DESCRIBE_SECURITY_GROUPS_RESPONSE = """<DescribeSecurityGroupsResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<securityGroupInfo>
{% for group in groups %}
<item>
<ownerId>111122223333</ownerId>
<groupId>{{ group.id }}</groupId>
<groupName>{{ group.name }}</groupName>
<groupDescription>{{ group.description }}</groupDescription>
{% if group.vpc_id %}
<vpcId>{{ group.vpc_id }}</vpcId>
{% endif %}
<ipPermissions>
{% for rule in group.ingress_rules %}
<item>
<ipProtocol>{{ rule.ip_protocol }}</ipProtocol>
<fromPort>{{ rule.from_port }}</fromPort>
<toPort>{{ rule.to_port }}</toPort>
<groups>
{% for source_group in rule.source_groups %}
<item>
<userId>111122223333</userId>
<groupId>{{ source_group.id }}</groupId>
<groupName>{{ source_group.name }}</groupName>
</item>
{% endfor %}
</groups>
<ipRanges>
{% for ip_range in rule.ip_ranges %}
<item>
<cidrIp>{{ ip_range }}</cidrIp>
</item>
{% endfor %}
</ipRanges>
</item>
{% endfor %}
</ipPermissions>
<ipPermissionsEgress/>
<tagSet>
{% for tag in group.get_tags() %}
<item>
<resourceId>{{ tag.resource_id }}</resourceId>
<resourceType>{{ tag.resource_type }}</resourceType>
<key>{{ tag.key }}</key>
<value>{{ tag.value }}</value>
</item>
{% endfor %}
</tagSet>
</item>
{% endfor %}
</securityGroupInfo>
</DescribeSecurityGroupsResponse>"""
AUTHORIZE_SECURITY_GROUP_INGRESS_REPONSE = """<AuthorizeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</AuthorizeSecurityGroupIngressResponse>"""
REVOKE_SECURITY_GROUP_INGRESS_REPONSE = """<RevokeSecurityGroupIngressResponse xmlns="http://ec2.amazonaws.com/doc/2012-12-01/">
<requestId>59dbff89-35bd-4eac-99ed-be587EXAMPLE</requestId>
<return>true</return>
</RevokeSecurityGroupIngressResponse>"""
|
{
"content_hash": "830cfea49afa2e5da716ab24ca7c3647",
"timestamp": "",
"source": "github",
"line_count": 155,
"max_line_length": 180,
"avg_line_length": 42.619354838709675,
"alnum_prop": 0.6023312140478353,
"repo_name": "EarthmanT/moto",
"id": "9a9aaafd9fa9f68e40bd86cc35f44bafaeb1a8ef",
"size": "6606",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "moto/ec2/responses/security_groups.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "203"
},
{
"name": "Python",
"bytes": "1772707"
}
],
"symlink_target": ""
}
|
"""
Plugin responsible for post-installation configuration
"""
from packstack.installer import utils
from packstack.installer import basedefs
# ------------- Postscript Packstack Plugin Initialization --------------
PLUGIN_NAME = "Postscript"
PLUGIN_NAME_COLORED = utils.color_text(PLUGIN_NAME, 'blue')
def initConfig(controller):
group = {"GROUP_NAME": "POSTSCRIPT",
"DESCRIPTION": "POSTSCRIPT Config parameters",
"PRE_CONDITION": lambda x: 'yes',
"PRE_CONDITION_MATCH": "yes",
"POST_CONDITION": False,
"POST_CONDITION_MATCH": True}
controller.addGroup(group, [])
def initSequences(controller):
config = controller.CONF
postscript_steps = []
if (config['CONFIG_PROVISION_TEMPEST'] == "y" and
config['CONFIG_RUN_TEMPEST'] == "y"):
postscript_steps.append(
{'title': 'Running Tempest',
'functions': [run_tempest]}
)
controller.addSequence("Running post install scripts", [], [],
postscript_steps)
# -------------------------- step functions --------------------------
def run_tempest(config, messages):
logfile = basedefs.DIR_LOG + "/tempest.log"
print("Running Tempest on %s" % config['CONFIG_TEMPEST_HOST'])
server = utils.ScriptRunner(config['CONFIG_TEMPEST_HOST'])
server.append('pushd /var/lib/tempest')
server.append('tox -eall -- --concurrency=2 %s > %s'
% (config['CONFIG_RUN_TEMPEST_TESTS'], logfile))
server.append('popd')
server.execute()
|
{
"content_hash": "c99abc5ea9d407dfe2f578f480de81c4",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 73,
"avg_line_length": 32.8125,
"alnum_prop": 0.594920634920635,
"repo_name": "imcsk8/packstack",
"id": "eb0e88a2ee20ca635bc58fdd904e1d9acc879efb",
"size": "2145",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packstack/plugins/postscript_951.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Puppet",
"bytes": "54266"
},
{
"name": "Python",
"bytes": "392836"
},
{
"name": "Ruby",
"bytes": "15291"
},
{
"name": "Shell",
"bytes": "551"
}
],
"symlink_target": ""
}
|
from __future__ import (nested_scopes, generators, division, absolute_import, with_statement,
print_function, unicode_literals)
import os
import shutil
import tempfile
import unittest2 as unittest
from twitter.common.collections import OrderedSet
from pants.base.build_file import BuildFile
from pants.util.dirutil import safe_mkdir, touch
class BuildFileTest(unittest.TestCase):
@classmethod
def makedirs(cls, path):
safe_mkdir(os.path.join(BuildFileTest.root_dir, path))
@classmethod
def touch(cls, path):
touch(os.path.join(BuildFileTest.root_dir, path))
@classmethod
def buildfile(cls, path):
return BuildFile(BuildFileTest.root_dir, path)
def setUp(self):
BuildFileTest.base_dir = tempfile.mkdtemp()
# Seed a BUILD outside the build root that should not be detected
touch(os.path.join(BuildFileTest.base_dir, 'BUILD'))
BuildFileTest.root_dir = os.path.join(BuildFileTest.base_dir, 'root')
BuildFileTest.touch('grandparent/parent/BUILD')
BuildFileTest.touch('grandparent/parent/BUILD.twitter')
# Tricky! This is a directory
BuildFileTest.makedirs('grandparent/parent/BUILD.dir')
BuildFileTest.makedirs('grandparent/BUILD')
BuildFileTest.touch('BUILD')
BuildFileTest.touch('BUILD.twitter')
BuildFileTest.touch('grandparent/parent/child1/BUILD')
BuildFileTest.touch('grandparent/parent/child1/BUILD.twitter')
BuildFileTest.touch('grandparent/parent/child2/child3/BUILD')
BuildFileTest.makedirs('grandparent/parent/child2/BUILD')
BuildFileTest.makedirs('grandparent/parent/child4')
BuildFileTest.touch('grandparent/parent/child5/BUILD')
BuildFileTest.makedirs('path-that-does-exist')
BuildFileTest.touch('path-that-does-exist/BUILD.invalid.suffix')
self.buildfile = BuildFileTest.buildfile('grandparent/parent/BUILD')
@classmethod
def tearDownClass(cls):
shutil.rmtree(BuildFileTest.root_dir)
def testSiblings(self):
buildfile = BuildFileTest.buildfile('grandparent/parent/BUILD.twitter')
self.assertEquals(OrderedSet([buildfile]), OrderedSet(self.buildfile.siblings()))
self.assertEquals(OrderedSet([self.buildfile]), OrderedSet(buildfile.siblings()))
buildfile = BuildFileTest.buildfile('grandparent/parent/child2/child3/BUILD')
self.assertEquals(OrderedSet(), OrderedSet(buildfile.siblings()))
def testFamily(self):
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('grandparent/parent/BUILD'),
BuildFileTest.buildfile('grandparent/parent/BUILD.twitter'),
]), self.buildfile.family())
buildfile = BuildFileTest.buildfile('grandparent/parent/child2/child3/BUILD')
self.assertEquals(OrderedSet([buildfile]), buildfile.family())
def testAncestors(self):
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('BUILD'),
BuildFileTest.buildfile('BUILD.twitter'),
]), self.buildfile.ancestors())
def testDescendants(self):
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('grandparent/parent/child1/BUILD'),
BuildFileTest.buildfile('grandparent/parent/child1/BUILD.twitter'),
BuildFileTest.buildfile('grandparent/parent/child2/child3/BUILD'),
BuildFileTest.buildfile('grandparent/parent/child5'),
]), self.buildfile.descendants())
def testMustExistFalse(self):
buildfile = BuildFile(BuildFileTest.root_dir, "path-that-does-not-exist/BUILD", must_exist=False)
self.assertEquals(OrderedSet([buildfile]), buildfile.family())
def testMustExistTrue(self):
with self.assertRaises(BuildFile.MissingBuildFileError):
BuildFile(BuildFileTest.root_dir, "path-that-does-not-exist/BUILD", must_exist=True)
with self.assertRaises(BuildFile.MissingBuildFileError):
BuildFile(BuildFileTest.root_dir, "path-that-does-exist/BUILD", must_exist=True)
with self.assertRaises(BuildFile.MissingBuildFileError):
BuildFile(BuildFileTest.root_dir, "path-that-does-exist/BUILD.invalid.suffix", must_exist=True)
def testSuffixOnly(self):
BuildFileTest.makedirs('suffix-test')
BuildFileTest.touch('suffix-test/BUILD.suffix')
BuildFileTest.touch('suffix-test/BUILD.suffix2')
BuildFileTest.makedirs('suffix-test/child')
BuildFileTest.touch('suffix-test/child/BUILD.suffix3')
buildfile = BuildFileTest.buildfile('suffix-test/BUILD.suffix')
self.assertEquals(OrderedSet([BuildFileTest.buildfile('suffix-test/BUILD.suffix2')]),
OrderedSet(buildfile.siblings()))
self.assertEquals(OrderedSet([BuildFileTest.buildfile('suffix-test/BUILD.suffix'),
BuildFileTest.buildfile('suffix-test/BUILD.suffix2')]),
buildfile.family())
self.assertEquals(OrderedSet([BuildFileTest.buildfile('suffix-test/child/BUILD.suffix3')]),
buildfile.descendants())
def testAncestorsSuffix1(self):
BuildFileTest.makedirs('suffix-test1/parent')
BuildFileTest.touch('suffix-test1/parent/BUILD.suffix')
BuildFileTest.touch('suffix-test1/BUILD')
buildfile = BuildFileTest.buildfile('suffix-test1/parent/BUILD.suffix')
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('suffix-test1/BUILD'),
BuildFileTest.buildfile('BUILD'),
BuildFileTest.buildfile('BUILD.twitter')]),
buildfile.ancestors())
def testAncestorsSuffix2(self):
BuildFileTest.makedirs('suffix-test2')
BuildFileTest.makedirs('suffix-test2/subdir')
BuildFileTest.touch('suffix-test2/subdir/BUILD.foo')
BuildFileTest.touch('suffix-test2/BUILD.bar')
buildfile = BuildFileTest.buildfile('suffix-test2/subdir/BUILD.foo')
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('suffix-test2/BUILD.bar'),
BuildFileTest.buildfile('BUILD'),
BuildFileTest.buildfile('BUILD.twitter')]),
buildfile.ancestors())
def test_buildfile_with_dir_must_exist_false(self):
# We should be able to create a BuildFile against a dir called BUILD if must_exist is false.
# This is used in what_changed for example.
buildfile = BuildFile(BuildFileTest.root_dir, 'grandparent/BUILD', must_exist=False)
self.assertFalse(buildfile.exists())
def test_buildfile_with_dir_must_exist_true(self):
# We should NOT be able to create a BuildFile instance against a dir called BUILD
# in the default case.
with self.assertRaises(BuildFile.MissingBuildFileError):
BuildFile(BuildFileTest.root_dir, 'grandparent/BUILD')
def test_directory_called_build_skipped(self):
# Ensure the buildfiles found do not include grandparent/BUILD since it is a dir.
buildfiles = BuildFile.scan_buildfiles(os.path.join(BuildFileTest.root_dir, 'grandparent'))
self.assertEquals(OrderedSet([
BuildFileTest.buildfile('grandparent/parent/BUILD'),
BuildFileTest.buildfile('grandparent/parent/BUILD.twitter'),
BuildFileTest.buildfile('grandparent/parent/child1/BUILD'),
BuildFileTest.buildfile('grandparent/parent/child1/BUILD.twitter'),
BuildFileTest.buildfile('grandparent/parent/child2/child3/BUILD'),
BuildFileTest.buildfile('grandparent/parent/child5/BUILD'),
]), buildfiles)
def test_scan_buildfiles_exclude(self):
buildfiles = BuildFile.scan_buildfiles(
BuildFileTest.root_dir, '', spec_excludes=[
os.path.join(BuildFileTest.root_dir, 'grandparent/parent/child1'),
os.path.join(BuildFileTest.root_dir, 'grandparent/parent/child2')
])
self.assertEquals([BuildFileTest.buildfile('BUILD'),
BuildFileTest.buildfile('/BUILD.twitter'),
BuildFileTest.buildfile('/grandparent/parent/BUILD'),
BuildFileTest.buildfile('/grandparent/parent/BUILD.twitter'),
BuildFileTest.buildfile('/grandparent/parent/child5/BUILD'),
],
buildfiles)
def test_invalid_root_dir_error(self):
BuildFileTest.touch('BUILD')
with self.assertRaises(BuildFile.InvalidRootDirError):
BuildFile('tmp', 'grandparent/BUILD')
def test_exception_class_hierarchy(self):
"""Exception handling code depends on the fact that all exceptions from BuildFile are
subclassed from the BuildFileError base class.
"""
self.assertIsInstance(BuildFile.InvalidRootDirError(), BuildFile.BuildFileError)
self.assertIsInstance(BuildFile.MissingBuildFileError(), BuildFile.BuildFileError)
|
{
"content_hash": "7fbcc2337da204381957a415b38e7673",
"timestamp": "",
"source": "github",
"line_count": 192,
"max_line_length": 101,
"avg_line_length": 43.911458333333336,
"alnum_prop": 0.7289763966314791,
"repo_name": "square/pants",
"id": "70556127f5e26c4be919587e493a9c0778214cbe",
"size": "8578",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tests/python/pants_test/base/test_build_file.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "AGS Script",
"bytes": "273"
},
{
"name": "CSS",
"bytes": "9347"
},
{
"name": "GAP",
"bytes": "4684"
},
{
"name": "Java",
"bytes": "46389"
},
{
"name": "JavaScript",
"bytes": "9523"
},
{
"name": "Python",
"bytes": "2250380"
},
{
"name": "Scala",
"bytes": "5517"
},
{
"name": "Shell",
"bytes": "29381"
},
{
"name": "Thrift",
"bytes": "1674"
}
],
"symlink_target": ""
}
|
from django.contrib import admin
from geotrek.core.models import (Datasource, Stake, Usage, Network, Trail,
Comfort,)
class DatasourceAdmin(admin.ModelAdmin):
list_display = ('source', 'structure')
search_fields = ('source', 'structure')
list_filter = ('structure',)
class StakeAdmin(admin.ModelAdmin):
list_display = ('stake', 'structure')
search_fields = ('stake', 'structure')
list_filter = ('structure',)
class UsageAdmin(admin.ModelAdmin):
list_display = ('usage', 'structure')
search_fields = ('usage', 'structure')
list_filter = ('structure',)
class NetworkAdmin(admin.ModelAdmin):
list_display = ('network', 'structure')
search_fields = ('network', 'structure')
list_filter = ('structure',)
class ComfortAdmin(admin.ModelAdmin):
list_display = ('comfort', 'structure')
search_fields = ('comfort', 'structure')
list_filter = ('structure',)
admin.site.register(Datasource, DatasourceAdmin)
admin.site.register(Stake, StakeAdmin)
admin.site.register(Usage, UsageAdmin)
admin.site.register(Network, NetworkAdmin)
admin.site.register(Comfort, ComfortAdmin)
|
{
"content_hash": "23f3d8b0c3fb4da7af16c39a3ef31ec5",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 74,
"avg_line_length": 28.4390243902439,
"alnum_prop": 0.6783876500857633,
"repo_name": "camillemonchicourt/Geotrek",
"id": "00c443fe89f10cee5744185437d6556488920459",
"size": "1166",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "geotrek/core/admin.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "CSS",
"bytes": "5624"
},
{
"name": "JavaScript",
"bytes": "109098"
},
{
"name": "Makefile",
"bytes": "9360"
},
{
"name": "Python",
"bytes": "1109322"
},
{
"name": "Shell",
"bytes": "15170"
}
],
"symlink_target": ""
}
|
import rethinkdb as r
import os
conn = r.connect(host = 'localhost',
port = int(os.environ['RETHINKDB_PORT']),
db = 'hackathon',
auth_key = str(os.environ['RETHINKDB_AUTH_KEY']))
from AfricasTalkingGateway import AfricasTalkingGateway, AfricasTalkingGatewayException
import logging
logging.basicConfig(filename='SMS.log', level=logging.DEBUG)
username = str(os.environ['SMS_API_USER'])
apikey = str(os.environ['SMS_API_KEY'])
def sendText(to):
gateway = AfricasTalkingGateway(username, apikey)
"""We are a day away from the USIU-hackathon;
the very first that takes place this Saturday on the 14th of March.
We hope to see you there!
"""
message = "The USIU Hackathon is finally here :D. Registration starts at 8:00 AM. Happy Pi Day, Happy hacking!"
recipients = gateway.sendMessage(to, message)
try:
for recipient in recipients:
logging.info('number=%s;status=%s;messageId=%s;cost=%s'
% (recipient['number'], recipient['status'],
recipient['messageId'], recipient['cost']))
except AfricasTalkingGatewayException, e:
logging.warning('Database setup completed %s' % str(e))
for doc in r.table('Attendee').run(conn):
sendText(doc['phone_number'])
|
{
"content_hash": "f521076a5a3cce28b5089f05a21c2f2f",
"timestamp": "",
"source": "github",
"line_count": 41,
"max_line_length": 115,
"avg_line_length": 32.21951219512195,
"alnum_prop": 0.6555639666919001,
"repo_name": "USIU-Hackathon/ss2015-hackathon",
"id": "b32b0ddbda6f02ac6480ec18e2e962b6879766ce",
"size": "1345",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "mailing/sendSms.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "37046"
},
{
"name": "Handlebars",
"bytes": "40430"
},
{
"name": "JavaScript",
"bytes": "30420"
},
{
"name": "Makefile",
"bytes": "48"
},
{
"name": "Python",
"bytes": "16572"
}
],
"symlink_target": ""
}
|
from django.conf.urls import patterns, include, url
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Examples:
# url(r'^$', 'myweb.views.home', name='home'),
# url(r'^myweb/', include('myweb.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
# url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
# url(r'^admin/', include(admin.site.urls)),
)
|
{
"content_hash": "9c88d47137190960fb85e41b1eb37244",
"timestamp": "",
"source": "github",
"line_count": 17,
"max_line_length": 71,
"avg_line_length": 32.529411764705884,
"alnum_prop": 0.674502712477396,
"repo_name": "lz1988/django-web2015",
"id": "edd67be3979ffefa0b15d603562f3a0c7082ebe4",
"size": "553",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "myweb/myweb/urls.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "C",
"bytes": "797682"
},
{
"name": "CSS",
"bytes": "527578"
},
{
"name": "Emacs Lisp",
"bytes": "152779"
},
{
"name": "Groff",
"bytes": "61139"
},
{
"name": "HTML",
"bytes": "3184026"
},
{
"name": "JavaScript",
"bytes": "760809"
},
{
"name": "Python",
"bytes": "13157847"
},
{
"name": "Ruby",
"bytes": "1758"
},
{
"name": "Shell",
"bytes": "154036"
},
{
"name": "Smarty",
"bytes": "130"
},
{
"name": "Tcl",
"bytes": "2476"
},
{
"name": "Yacc",
"bytes": "7550"
}
],
"symlink_target": ""
}
|
import math
import sys
import unittest
from cupy import testing
@testing.gpu
class TestRanges(unittest.TestCase):
_multiprocess_can_split_ = True
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_arange(self, xp, dtype):
return xp.arange(10, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_arange2(self, xp, dtype):
return xp.arange(5, 10, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_arange3(self, xp, dtype):
return xp.arange(1, 11, 2, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_arange4(self, xp, dtype):
return xp.arange(20, 2, -3, dtype=dtype)
@testing.numpy_cupy_array_equal()
def test_arange_no_dtype_int(self, xp):
return xp.arange(1, 11, 2)
@testing.numpy_cupy_array_equal()
def test_arange_no_dtype_float(self, xp):
return xp.arange(1.0, 11.0, 2.0)
@testing.numpy_cupy_array_equal()
def test_arange_negative_size(self, xp):
return xp.arange(3, 1)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace(self, xp, dtype):
return xp.linspace(0, 10, 5, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace2(self, xp, dtype):
return xp.linspace(10, 0, 5, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_zero_num(self, xp, dtype):
return xp.linspace(0, 10, 0, dtype=dtype)
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_zero_num_no_endopoint_with_retstep(self, xp, dtype):
x, step = xp.linspace(0, 10, 0, dtype=dtype, endpoint=False,
retstep=True)
self.assertTrue(math.isnan(step))
return x
@testing.with_requires('numpy>=1.10')
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_one_num_no_endopoint_with_retstep(self, xp, dtype):
x, step = xp.linspace(0, 10, 1, dtype=dtype, endpoint=False,
retstep=True)
self.assertTrue(math.isnan(step))
return x
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_one_num(self, xp, dtype):
return xp.linspace(0, 10, 1, dtype=dtype)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_no_endpoint(self, xp, dtype):
return xp.linspace(0, 10, 5, dtype=dtype, endpoint=False)
@testing.for_all_dtypes(no_bool=True)
@testing.numpy_cupy_array_equal()
def test_linspace_with_retstep(self, xp, dtype):
x, step = xp.linspace(0, 10, 5, dtype=dtype, retstep=True)
self.assertEqual(step, 2.5)
return x
@testing.numpy_cupy_allclose()
def test_linspace_no_dtype_int(self, xp):
return xp.linspace(0, 10)
@testing.numpy_cupy_allclose()
def test_linspace_no_dtype_float(self, xp):
return xp.linspace(0.0, 10.0)
@testing.numpy_cupy_allclose()
def test_linspace_float_args_with_int_dtype(self, xp):
return xp.linspace(0.1, 9.1, 11, dtype=int)
@testing.with_requires('numpy>=1.10')
@testing.numpy_cupy_raises()
def test_linspace_neg_num(self, xp):
return xp.linspace(0, 10, -1)
@testing.numpy_cupy_allclose()
def test_linspace_float_overflow(self, xp):
return xp.linspace(0., sys.float_info.max / 5, 10, dtype=float)
@testing.with_requires('numpy>=1.10')
@testing.numpy_cupy_array_equal()
def test_linspace_float_underflow(self, xp):
# find minimum subnormal number
x = sys.float_info.min
while x / 2 > 0:
x /= 2
return xp.linspace(0., x, 10, dtype=float)
|
{
"content_hash": "b66d3ef567ee61c9a56c47d63a9a4b00",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 74,
"avg_line_length": 33.02439024390244,
"alnum_prop": 0.6368783850320039,
"repo_name": "truongdq/chainer",
"id": "4bf60224d25aee3cc448a6ae270c12b5cbcda76a",
"size": "4062",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "tests/cupy_tests/creation_tests/test_ranges.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "3366"
},
{
"name": "C",
"bytes": "18613"
},
{
"name": "Cuda",
"bytes": "6118"
},
{
"name": "PowerShell",
"bytes": "7195"
},
{
"name": "Python",
"bytes": "1259487"
}
],
"symlink_target": ""
}
|
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('workshops', '0248_membership_rolled_to_membership'),
]
operations = [
migrations.AlterField(
model_name='membership',
name='variant',
field=models.CharField(choices=[('partner', 'Partner'), ('affiliate', 'Affiliate'), ('sponsor', 'Sponsor'), ('bronze', 'Bronze'), ('silver', 'Silver'), ('gold', 'Gold'), ('platinum', 'Platinum'), ('alacarte', 'A la carte')], max_length=40),
),
]
|
{
"content_hash": "8611fe430cd930c7c07096a1f0a1fced",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 252,
"avg_line_length": 34.9375,
"alnum_prop": 0.5867620751341681,
"repo_name": "pbanaszkiewicz/amy",
"id": "a5825fa995e86ed3c3fe387fc5ec0c5ae33b5ccf",
"size": "609",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "amy/workshops/migrations/0249_auto_20210709_1542.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "5850"
},
{
"name": "Dockerfile",
"bytes": "1034"
},
{
"name": "HTML",
"bytes": "313293"
},
{
"name": "JavaScript",
"bytes": "39427"
},
{
"name": "Makefile",
"bytes": "1780"
},
{
"name": "Python",
"bytes": "2707815"
}
],
"symlink_target": ""
}
|
"""
Pagination fields
"""
# pylint: disable=no-init, too-few-public-methods, no-self-use
from rest_framework_3 import serializers
from rest_framework_3 import pagination
from rest_framework_3.views import Response
from rest_framework_3.templatetags.rest_framework_3 import replace_query_param
# DRF 2.4.X compatibility.
ReadOnlyField = getattr(serializers, 'ReadOnlyField', serializers.Field)
class NextPageLinkField(ReadOnlyField):
"""
Field that returns a link to the next page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_next():
return None
page = value.next_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class NextPageField(ReadOnlyField):
"""
Field that returns a link to the next page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_next():
return None
return value.next_page_number()
class PreviousPageLinkField(ReadOnlyField):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_previous():
return None
page = value.previous_page_number()
request = self.context.get('request')
url = request and request.build_absolute_uri() or ''
return replace_query_param(url, self.page_field, page)
class PreviousPageField(ReadOnlyField):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
if not value.has_previous():
return None
return value.previous_page_number()
class PageField(ReadOnlyField):
"""
Field that returns a link to the previous page in paginated results.
"""
page_field = 'page'
def to_representation(self, value):
return value.number
# compatibility for DRF 3.0 and older
try:
BasePagination = pagination.PageNumberPagination
except:
BasePagination = pagination.BasePaginationSerializer
class PaginationSerializer(BasePagination):
"""
Pagination serializer.
"""
next = NextPageField(source='*')
next_link = NextPageLinkField(source='*')
page = PageField(source='*')
previous = PreviousPageField(source='*')
previous_link = PreviousPageLinkField(source='*')
count = ReadOnlyField(source='paginator.count')
total = ReadOnlyField(source='paginator.num_pages')
class EmberPaginationSerializer(PaginationSerializer):
"""
Backwards compatibility for name change
"""
pass
class PageNumberPagination(BasePagination):
"""
An Ember (soon to be json-api) compatible pagination format
"""
def get_paginated_response(self, data):
previous = None
next = None
if self.page.has_previous():
previous = self.page.previous_page_number()
if self.page.has_next():
next = self.page.next_page_number()
return Response({
'results': data,
'next': next,
'next_link': self.get_next_link(),
'page': self.page.number,
'previous': previous,
'previous_link': self.get_previous_link(),
'count': self.page.paginator.count,
'total': self.page.paginator.num_pages,
})
|
{
"content_hash": "845d4fea7df0a759f9dd7e26ad06f491",
"timestamp": "",
"source": "github",
"line_count": 129,
"max_line_length": 78,
"avg_line_length": 27.82170542635659,
"alnum_prop": 0.6486486486486487,
"repo_name": "coUrbanize/rest_framework_ember",
"id": "a0cfc7d3fdfd597c124e035910bf88cd09c19726",
"size": "3589",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "rest_framework_ember_3/pagination.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "29620"
}
],
"symlink_target": ""
}
|
import uuid
from . url import sanitize_url_fragment
from . exception import *
class BaseEndpoint(object):
def __init__(self, request, endpoint):
self.request = request
self.endpoint = sanitize_url_fragment(endpoint)
def _url_with(self, *params):
def add_to_endpoint(endpoint, param):
return endpoint + "/" + str(param)
return reduce(add_to_endpoint, params, self.endpoint)
class Endpoint(BaseEndpoint):
def find(self, id):
return self.find_by({"id": id})
def update(self, id, params):
return self.request.put(self._url_with(id), payload=params)
def remove(self, id):
return self.request.delete(self._url_with(id))
def list(self):
return self.request.get(self.endpoint)
def find_by(self, params):
try:
return self.request.get(self.endpoint, payload=params)
except RequestError: # If we can't find the resource
return None
def create(self, params):
return self.request.post(self.endpoint, payload=params)
class CartEndpoint(BaseEndpoint):
def __init__(self, request, endpoint, cart_id=None):
super(CartEndpoint, self).__init__(request, endpoint)
self.id = cart_id or uuid.uuid4()
self.endpoint = self._url_with(cart_id)
def add_item(self, params):
return self.request.post(self.endpoint, params)
def add_variation(self, params):
return self.add_item(params)
def update_item(self, item_id, params):
return self.request.put(
self._url_with(self.id, "items", item_id),
params
)
def contents(self):
return self.request.get(self.endpoint)
def has_item(self, item_id):
return bool(self.request.get(self._url_with("has", item_id))["status"])
def get_item(self, item_id):
return self.request.get(self._url_with("item", item_id))
def delete_item(self, item_id):
return self.request.delete(self._url_with("item", item_id))
def checkout_options(self):
return self.request.get(self._url_with("checkout"))
def checkout(self, params):
return self.request.post(self._url_with("checkout"), params)
def delete(self):
return self.request.delete(self.endpoint)
class CheckoutEndpoint(BaseEndpoint):
def __init__(self, request, endpoint):
super(CheckoutEndpoint, self).__init__(request, endpoint)
def payment(self, method, order_id, params):
return self.request.post(self._url_with("payment", method, order_id), params)
|
{
"content_hash": "4c909c502ce219baf9143f7e171b732d",
"timestamp": "",
"source": "github",
"line_count": 91,
"max_line_length": 85,
"avg_line_length": 28.46153846153846,
"alnum_prop": 0.6351351351351351,
"repo_name": "moltin/python-sdk",
"id": "e6e0d6ed903f38cbbffcb627f8c9cfed1b925942",
"size": "2590",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "moltin/endpoints.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "HTML",
"bytes": "7952"
},
{
"name": "Python",
"bytes": "22858"
}
],
"symlink_target": ""
}
|
"""`dict` automatically saving it's state to a file on disk."""
import json
import locale
import os
from typing import Generic, MutableMapping, MutableSequence, TypeVar, Union
class Error(Exception):
"""Base class for all exception of this module."""
class InvalidFileError(Error):
"""Error raised if the dict cannot be deserialize from disk."""
class UnknownError(Error):
"""An unexpected error occurred."""
WrappableTypes = Union[MutableSequence, MutableMapping]
NonWrappableTypes = Union[float, int, str]
WrappableTypeVar = TypeVar('WrappableTypeVar',
bound=WrappableTypes)
NonWrappableTypeVar = TypeVar('NonWrappableTypeVar',
bound=NonWrappableTypes)
def _maybe_wrap(
persistent_dict: 'PersistentDict',
item: Union[WrappableTypeVar, NonWrappableTypeVar]
) -> Union['_PersistentDictWrapper[WrappableTypeVar]', NonWrappableTypeVar]:
if isinstance(item, (MutableSequence, MutableMapping)):
return _PersistentDictWrapper(persistent_dict, item)
return item
class _PersistentDictWrapper(Generic[WrappableTypeVar]):
"""Wraps objects held in PersistentDict propagating the auto-save logic."""
def __init__(self,
persistent_dict: 'PersistentDict',
value: WrappableTypeVar) -> None:
self._persistent_dict = persistent_dict
self._value = value
def __getattr__(self, name: str):
attribute = self._value.__getattribute__(name)
if hasattr(attribute, '__call__'):
def wrapped_function(*args, **kwargs):
result = attribute(*args, **kwargs)
self._persistent_dict.save_to_disk()
return _maybe_wrap(self._persistent_dict, result)
return wrapped_function
return _maybe_wrap(self._persistent_dict, attribute)
def __delitem__(self, key):
self._value.__delitem__(key)
self._persistent_dict.save_to_disk()
def __setitem__(self, key, value):
self._value.__setitem__(key, value)
self._persistent_dict.save_to_disk()
def __getitem__(self, key):
value = self._value.__getitem__(key)
return _maybe_wrap(self._persistent_dict, value)
def __len__(self):
return self._value.__len__()
def __contains__(self, item):
return self._value.__contains__(item)
def __iter__(self):
return self._value.__iter__()
class PersistentDict():
"""`dict` that automatically saves its content to disk when updated."""
def __init__(self, path: str):
self._path = path
self._dict = self._read_from_disk()
def _read_from_disk(self):
try:
with open(self._path, encoding=locale.getpreferredencoding()) as file:
return json.load(file)
except IOError:
# Couldn't read file. Default to empty dict.
return {}
except ValueError as exc:
raise InvalidFileError('Invalid config file.') from exc
except Exception as exc:
raise UnknownError(
'An unknown error occurred reading config file.') from exc
def save_to_disk(self):
"""Save this `PersistentDict` to disk"""
if not self._dict:
try:
os.remove(self._path)
except OSError:
pass
return
with open(self._path, 'w', encoding=locale.getpreferredencoding()) as file:
json.dump(self._dict, file, sort_keys=True, indent=2,
separators=(',', ': '))
def __getattr__(self, name: str):
attribute = self._dict.__getattribute__(name)
if hasattr(attribute, '__call__'):
def wrapped_function(*args, **kwargs):
result = attribute(*args, **kwargs)
self.save_to_disk()
return _maybe_wrap(self, result)
return wrapped_function
return _maybe_wrap(self, attribute)
def __delitem__(self, key):
self._dict.__delitem__(key)
self.save_to_disk()
def __setitem__(self, key, value):
self._dict.__setitem__(key, value)
self.save_to_disk()
def __getitem__(self, key: str):
value = self._dict.__getitem__(key)
return _maybe_wrap(self, value)
def __len__(self):
return self._dict.__len__()
def __contains__(self, item):
return self._dict.__contains__(item)
def __iter__(self):
return self._dict.__iter__()
def __str__(self):
return str(self._dict)
def __repr__(self):
return str(self._dict)
def __eq__(self, other):
return self._dict == other
def __ne__(self, other):
return self._dict != other
|
{
"content_hash": "30898836c93e6e450816cddf55f4e9d7",
"timestamp": "",
"source": "github",
"line_count": 153,
"max_line_length": 79,
"avg_line_length": 28.627450980392158,
"alnum_prop": 0.6406392694063927,
"repo_name": "graveljp/smugcli",
"id": "2cd11ed4fd626916dcb11bd6af4aecdd1a22d4b9",
"size": "4380",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "smugcli/persistent_dict.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "223689"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
import unittest
import mock
from dashboard import ttest
class TTestTest(unittest.TestCase):
"""Tests for the t-test functions."""
def setUp(self):
"""Sets the t-table values for the tests below."""
table_patch = mock.patch.object(ttest, '_TABLE', [
(1, [0, 6.314, 12.71, 31.82, 63.66, 318.31]),
(2, [0, 2.920, 4.303, 6.965, 9.925, 22.327]),
(3, [0, 2.353, 3.182, 4.541, 5.841, 10.215]),
(4, [0, 2.132, 2.776, 3.747, 4.604, 7.173]),
(10, [0, 1.372, 1.812, 2.228, 2.764, 3.169]),
(100, [0, 1.290, 1.660, 1.984, 2.364, 2.626]),
])
table_patch.start()
self.addCleanup(table_patch.stop)
two_tail_patch = mock.patch.object(ttest, '_TWO_TAIL',
[1, 0.2, 0.1, 0.05, 0.02, 0.01])
two_tail_patch.start()
self.addCleanup(two_tail_patch.stop)
def testWelchsTTest(self):
"""Tests the t value and degrees of freedom output of Welch's t-test."""
# The t-value can be checked with scipy.stats.ttest_ind(equal_var=False).
# However the t-value output by scipy.stats.ttest_ind is -6.32455532034.
# This implementation produces slightly different results.
result = ttest.WelchsTTest([2, 3, 2, 3, 2, 3], [4, 5, 4, 5, 4, 5])
self.assertAlmostEqual(10.0, result.df)
self.assertAlmostEqual(-6.325, result.t, delta=1.0)
def testWelchsTTest_EmptySample_RaisesError(self):
"""An error should be raised when an empty sample is passed in."""
with self.assertRaises(RuntimeError):
ttest.WelchsTTest([], [])
with self.assertRaises(RuntimeError):
ttest.WelchsTTest([], [1, 2, 3])
with self.assertRaises(RuntimeError):
ttest.WelchsTTest([1, 2, 3], [])
def testTTest_EqualSamples_PValueIsOne(self):
"""Checks that t = 0 and p = 1 when the samples are the same."""
result = ttest.WelchsTTest([1, 2, 3], [1, 2, 3])
self.assertEqual(0, result.t)
self.assertEqual(1, result.p)
def testTTest_VeryDifferentSamples_PValueIsLow(self):
"""Checks that p is very low when the samples are clearly different."""
result = ttest.WelchsTTest([100, 101, 100, 101, 100],
[1, 2, 1, 2, 1, 2, 1, 2])
self.assertLessEqual(250, result.t)
self.assertLessEqual(0.01, result.p)
def testTTest_DifferentVariance(self):
"""Verifies that higher variance -> higher p value."""
result_low_var = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5])
result_high_var = ttest.WelchsTTest([1, 4, 1, 4], [3, 6, 3, 6])
self.assertLess(result_low_var.p, result_high_var.p)
def testTTest_DifferentSampleSize(self):
"""Verifies that smaller sample size -> higher p value."""
result_larger_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5, 4, 5])
result_smaller_sample = ttest.WelchsTTest([2, 3, 2, 3], [4, 5])
self.assertLess(result_larger_sample.p, result_smaller_sample.p)
def testTTest_DifferentMeanDifference(self):
"""Verifies that smaller difference between means -> higher p value."""
result_far_means = ttest.WelchsTTest([2, 3, 2, 3], [5, 6, 5, 6])
result_near_means = ttest.WelchsTTest([2, 3, 2, 3], [3, 4, 3, 4])
self.assertLess(result_far_means.p, result_near_means.p)
def testTValue(self):
"""Tests calculation of the t-value using Welch's formula."""
# Results can be verified by directly plugging variables into Welch's
# equation (e.g. using a calculator or the Python interpreter).
stats1 = ttest.SampleStats(mean=0.299, var=0.05, size=150)
stats2 = ttest.SampleStats(mean=0.307, var=0.08, size=165)
# Note that a negative t-value is obtained when the first sample has a
# smaller mean than the second, otherwise a positive value is returned.
self.assertAlmostEqual(-0.27968236,
ttest._TValue(stats1=stats1, stats2=stats2))
self.assertAlmostEqual(0.27968236,
ttest._TValue(stats1=stats2, stats2=stats1))
def testTValue_ConstantSamples_ResultIsInfinity(self):
"""If there is no variation, infinity is used as the t-statistic value."""
stats = ttest.SampleStats(mean=1.0, var=0, size=10)
self.assertEqual(float('inf'), ttest._TValue(stats, stats))
def testDegreesOfFreedom(self):
"""Tests calculation of estimated degrees of freedom."""
# The formula used to estimate degrees of freedom for independent-samples
# t-test is called the Welch-Satterthwaite equation. Note that since the
# Welch-Satterthwaite equation gives an estimate of degrees of freedom,
# the result is a floating-point number and not an integer.
stats1 = ttest.SampleStats(mean=0.299, var=0.05, size=150)
stats2 = ttest.SampleStats(mean=0.307, var=0.08, size=165)
self.assertAlmostEqual(307.19879975,
ttest._DegreesOfFreedom(stats1, stats2))
def testDegreesOfFreedom_ZeroVariance_ResultIsOne(self):
"""The lowest possible value is returned for df if variance is zero."""
stats = ttest.SampleStats(mean=1.0, var=0, size=10)
self.assertEqual(1.0, ttest._DegreesOfFreedom(stats, stats))
def testDegreesOfFreedom_SmallSample_RaisesError(self):
"""Degrees of freedom can't be calculated if sample size is too small."""
size_0 = ttest.SampleStats(mean=0, var=0, size=0)
size_1 = ttest.SampleStats(mean=1.0, var=0, size=1)
size_5 = ttest.SampleStats(mean=2.0, var=0.5, size=5)
# An error is raised if the size of one of the samples is too small.
with self.assertRaises(RuntimeError):
ttest._DegreesOfFreedom(size_0, size_5)
with self.assertRaises(RuntimeError):
ttest._DegreesOfFreedom(size_1, size_5)
with self.assertRaises(RuntimeError):
ttest._DegreesOfFreedom(size_5, size_0)
with self.assertRaises(RuntimeError):
ttest._DegreesOfFreedom(size_5, size_1)
# If both of the samples have a variance of 0, no error is raised.
self.assertEqual(1.0, ttest._DegreesOfFreedom(size_1, size_1))
class LookupPValueTest(unittest.TestCase):
def setUp(self):
"""Sets the t-table values for the tests below."""
table_patch = mock.patch.object(ttest, '_TABLE', [
(1, [0, 6.314, 12.71, 31.82, 63.66, 318.31]),
(2, [0, 2.920, 4.303, 6.965, 9.925, 22.327]),
(3, [0, 2.353, 3.182, 4.541, 5.841, 10.215]),
(4, [0, 2.132, 2.776, 3.747, 4.604, 7.173]),
(10, [0, 1.372, 1.812, 2.228, 2.764, 3.169]),
(100, [0, 1.290, 1.660, 1.984, 2.364, 2.626]),
])
table_patch.start()
self.addCleanup(table_patch.stop)
two_tail_patch = mock.patch.object(ttest, '_TWO_TAIL',
[1, 0.2, 0.1, 0.05, 0.02, 0.01])
two_tail_patch.start()
self.addCleanup(two_tail_patch.stop)
def testLookupPValue_ExactMatchInTable(self):
"""Tests looking up an entry that is in the table."""
self.assertEqual(0.1, ttest._LookupPValue(3.182, 3.0))
self.assertEqual(0.1, ttest._LookupPValue(-3.182, 3.0))
def testLookupPValue_TValueBetweenTwoValues_SmallerColumnIsUsed(self):
# The second column is used because 3.1 is below 4.303,
# so the next-lowest t-value, 2.920, is used.
self.assertEqual(0.2, ttest._LookupPValue(3.1, 2.0))
self.assertEqual(0.2, ttest._LookupPValue(-3.1, 2.0))
def testLookup_DFBetweenTwoValues_SmallerRowIsUsed(self):
self.assertEqual(0.05, ttest._LookupPValue(2.228, 45.0))
self.assertEqual(0.05, ttest._LookupPValue(-2.228, 45.0))
def testLookup_DFAndTValueBetweenTwoValues_SmallerRowAndColumnIsUsed(self):
self.assertEqual(0.1, ttest._LookupPValue(2.0, 45.0))
self.assertEqual(0.1, ttest._LookupPValue(-2.0, 45.0))
def testLookupPValue_LargeTValue_LastColumnIsUsed(self):
# The smallest possible p-value will be used when t is large.
self.assertEqual(0.01, ttest._LookupPValue(500.0, 1.0))
self.assertEqual(0.01, ttest._LookupPValue(-500.0, 1.0))
def testLookupPValue_ZeroTValue_FirstColumnIsUsed(self):
# The largest possible p-value will be used when t is zero.
self.assertEqual(1.0, ttest._LookupPValue(0.0, 1.0))
self.assertEqual(1.0, ttest._LookupPValue(0.0, 2.0))
def testLookupPValue_SmallTValue_FirstColumnIsUsed(self):
# The largest possible p-value will be used when t is almost zero.
self.assertEqual(1.0, ttest._LookupPValue(0.1, 2.0))
self.assertEqual(1.0, ttest._LookupPValue(-0.1, 2.0))
def testLookupPValue_LargeDegreesOfFreedom_LastRowIsUsed(self):
# The last row of the table should be used.
self.assertEqual(0.02, ttest._LookupPValue(2.365, 100.0))
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "9a54bd243c98cdcc296439028c3a609f",
"timestamp": "",
"source": "github",
"line_count": 194,
"max_line_length": 78,
"avg_line_length": 44.83505154639175,
"alnum_prop": 0.6616463554840193,
"repo_name": "catapult-project/catapult",
"id": "47be39a90fc2d86e7be7ad6ef8b9e07c8fe1cfc9",
"size": "8861",
"binary": false,
"copies": "6",
"ref": "refs/heads/main",
"path": "dashboard/dashboard/ttest_test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "C",
"bytes": "1324"
},
{
"name": "C++",
"bytes": "46069"
},
{
"name": "CSS",
"bytes": "23376"
},
{
"name": "Dockerfile",
"bytes": "1541"
},
{
"name": "Go",
"bytes": "114396"
},
{
"name": "HTML",
"bytes": "12394298"
},
{
"name": "JavaScript",
"bytes": "1559584"
},
{
"name": "Makefile",
"bytes": "1774"
},
{
"name": "Python",
"bytes": "6778695"
},
{
"name": "Shell",
"bytes": "2288"
}
],
"symlink_target": ""
}
|
from snakebite.client import Client
client = Client('localhost', 9000)
for p in client.mkdir(['/foo/bar', '/input'], create_parent=True):
print p
|
{
"content_hash": "2805295ae45c540e451b649cf52a7a5c",
"timestamp": "",
"source": "github",
"line_count": 5,
"max_line_length": 66,
"avg_line_length": 29.8,
"alnum_prop": 0.7114093959731543,
"repo_name": "MinerKasch/HadoopWithPython",
"id": "11eca31cadb6d85ad01578f53f60dda99648c84d",
"size": "149",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "python/HDFS/mkdir.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "PigLatin",
"bytes": "2240"
},
{
"name": "Python",
"bytes": "10790"
}
],
"symlink_target": ""
}
|
"""
ADF Composite Output
#########################
This module provides the primary (user facing) output parser.
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from collections import defaultdict
import re
import six
import numpy as np
import pandas as pd
from io import StringIO
from exa.util.units import Length
from exa import TypedMeta
from exatomic.base import sym2z
from exatomic.algorithms.basis import lmap, enum_cartesian
from exatomic.algorithms.numerical import dfac21
from exatomic.core.atom import Atom, Frequency
from exatomic.core.gradient import Gradient
from exatomic.core.basis import BasisSet, BasisSetOrder
from exatomic.core.orbital import Orbital, Excitation, MOMatrix
from exatomic.core.tensor import NMRShielding, JCoupling
from .editor import Editor
class OutMeta(TypedMeta):
atom = Atom
basis_set = BasisSet
basis_set_order = BasisSetOrder
orbital = Orbital
contribution = pd.DataFrame
excitation = Excitation
momatrix = MOMatrix
sphr_momatrix = MOMatrix
gradient = Gradient
frequency = Frequency
nmr_shielding = NMRShielding
j_coupling = JCoupling
class Output(six.with_metaclass(OutMeta, Editor)):
"""The ADF output parser."""
def parse_atom(self):
# TODO : only supports single frame, gets last atomic positions
# this will actually get the very first coordinates
#_re_atom_00 = 'Atoms in this Fragment Cart. coord.s (Angstrom)'
_re_atom_00 = 'ATOMS'
found1 = self.find(_re_atom_00, keys_only=True)
# use the regex instead of find because we have a similar search string in an nmr and
# cpl calculation for the nuclear coordinates
_reatom = "(?i)NUCLEAR COORDINATES"
found2 = self.regex(_reatom, keys_only=True)
# to find the optimized frames
_reopt = "Coordinates (Cartesian)"
found_opt = self.find(_reopt, keys_only=True)
if found_opt:
starts = np.array(found_opt) + 6
stop = starts[0]
while '------' not in self[stop]: stop += 1
stops = starts + stop - starts[0]
dfs = []
for idx, (start, stop) in enumerate(zip(starts, stops)):
# parse everything as they may be useful in the future
df = self.pandas_dataframe(start, stop, ncol=11)
# drop everything
df.drop(list(range(5, 11)), axis='columns', inplace=True)
# we read the coordinates in bohr so no need to convrt
df.columns = ['set', 'symbol', 'x', 'y', 'z']
df['set'] = df['set'].astype(int)
df['Z'] = df['symbol'].map(sym2z)
df['frame'] = idx
df['set'] -= 1
dfs.append(df)
atom = pd.concat(dfs, ignore_index=True)
elif found1:
start = stop = found1[-1] + 4
while self[stop].strip(): stop += 1
atom = self.pandas_dataframe(start, stop, ncol=8)
atom.drop(list(range(5,8)), axis='columns', inplace=True)
atom.columns = ['set', 'symbol', 'x', 'y', 'z']
for c in ['x', 'y', 'z']: atom[c] *= Length['Angstrom', 'au']
atom['Z'] = atom['symbol'].map(sym2z)
atom['set'] -= 1
atom['frame'] = 0
elif found2:
#if len(found) > 1:
# raise NotImplementedError("We can only parse outputs from a single NMR calculation")
atom = []
for idx, val in enumerate(found2):
start = val + 3
stop = start
while self[stop].strip(): stop += 1
# a bit of a hack to make sure that there is no formatting change depending on the
# number of atoms in the molecule as the index is right justified so if there are
# more than 100 atoms it will fill the alloted space for the atom index and change the
# delimitter and therefore the number of columns
self[start:stop] = map(lambda x: x.replace('(', ''), self[start:stop])
df = self.pandas_dataframe(start, stop, ncol=5)
df.columns = ['symbol', 'set', 'x', 'y', 'z']
for c in ['x', 'y', 'z']: df[c] *= Length['Angstrom', 'au']
df['Z'] = df['symbol'].map(sym2z)
df['frame'] = idx
# remove the trailing chracters from the index
df['set'] = list(map(lambda x: x.replace('):', ''), df['set']))
df['set'] = df['set'].astype(int) - 1
atom.append(df)
atom = pd.concat(atom)
else:
raise NotImplementedError("We could not find the atom table in this output. Please submit "+ \
"an issue ticket so we can add it in.")
self.atom = atom
def parse_basis_set(self):
# Find the basis set
_re_bas_00 = '(Slater-type) F U N C T I O N S'
_re_bas_01 = 'Atom Type'
start = self.find(_re_bas_00, keys_only=True)[-1] + 3
starts = self.find(_re_bas_01, start=start, keys_only=True)
lines = []
for ext in starts:
for i in range(4):
lines.append(start + ext + i)
stop = start + ext + 4
while self[stop].strip():
lines.append(stop)
stop += 1
df = pd.read_fwf(StringIO('\n'.join([self[i] for i in lines])),
widths=[4, 2, 12, 4],
names=['n', 'L', 'alpha', 'symbol'])
# Where atom types change
idxs = [0] + df['n'][df['n'] == '---'].index.tolist() + [df.shape[0]]
sets, shells = [], []
for i, (start, stop) in enumerate(zip(idxs, idxs[1:])):
sets.append(np.repeat(i - 1, stop - start))
shells.append(np.arange(-1, stop - start - 1))
df['set'] = np.concatenate(sets)
df['shell'] = np.concatenate(shells)
# Atom table basis set map
basmap = df['symbol'].dropna()
basmap = basmap[basmap.str.endswith(')')].str.strip(')')
basmap = {val: df['set'][key] + 1 for
key, val in basmap.to_dict().items()}
# Discard the garbage
drop = df['n'].str.strip().str.isnumeric().fillna(False)
df.drop(drop[drop == False].index, inplace=True)
df.drop('symbol', axis=1, inplace=True)
# Clean up the series
df['alpha'] = df['alpha'].astype(np.float64)
df['n'] = df['n'].astype(np.int64)
df['L'] = df['L'].str.lower().map(lmap)
df['d'] = np.sqrt((2 * df['L'] + 1) / (4 * np.pi))
df['r'] = df['n'] - (df['L'] + 1)
df['frame'] = 0
self.basis_set = BasisSet(df)
self.meta['spherical'] = False
self.atom['set'] = self.atom['symbol'].map(basmap)
def parse_basis_set_order(self):
# All the columns we need
data = defaultdict(list)
sets = self.basis_set.groupby('set')
# Iterate over atoms
for center, symbol, seht in zip(self.atom.index,
self.atom['symbol'],
self.atom['set']):
# Per basis set
bas = sets.get_group(seht).groupby('L')
for L, grp in bas:
# Iterate over cartesians
for l, m, n in enum_cartesian[L]:
for shell, r in zip(grp['shell'], grp['r']):
data['center'].append(center)
data['symbol'].append(symbol)
data['shell'].append(shell)
data['seht'].append(seht)
data['L'].append(L)
data['l'].append(l)
data['m'].append(m)
data['n'].append(n)
data['r'].append(r)
data['set'] = data.pop('seht')
data['frame'] = 0
self.basis_set_order = pd.DataFrame.from_dict(data)
self.basis_set_order['prefac'] = (self.basis_set_order['L'].apply(dfac21) /
(self.basis_set_order['l'].apply(dfac21) *
self.basis_set_order['m'].apply(dfac21) *
self.basis_set_order['n'].apply(dfac21))
).apply(np.sqrt)
def parse_orbital(self):
_re_orb_00 = 'Orbital Energies, both Spins'
_re_orb_01 = 'Orbital Energies, per Irrep and Spin'
found = self.find(_re_orb_00, _re_orb_01, keys_only=True)
# Open shell vs. closed shell
cols = {
_re_orb_00: ['symmetry', 'vector', 'spin', 'occupation', 'energy', 'eV'],
_re_orb_01: ['vector', 'occupation', 'energy', 'eV', 'dE']}
key = _re_orb_00 if found[_re_orb_00] else _re_orb_01
ldx = found[key][-1] + 4
starts = []
stops = []
irreps = []
while self[ldx].strip() != '':
# error catching for when we have a symmetry label
try:
_ = int(self[ldx].strip()[0])
ldx += 1
except ValueError:
stops.append(ldx)
irreps.append(self[ldx])
# to ensure that we do not skip over the blank line
# and exdecute an infinite while loop
if not (self[ldx].strip() == ''):
ldx += 1
starts.append(ldx)
else:
break
else:
# to get the bottom of the table
stops.append(ldx)
# the first entry is actually the very beginning of the table
stops = stops[1:]
# put everything together
dfs = []
for start, stop, irrep in zip(starts, stops, irreps):
df = self.pandas_dataframe(start, stop, cols[key])
df['irrep'] = irrep.strip()
dfs.append(df)
df = pd.concat(dfs, ignore_index=True)
df['vector'] -= 1
if 'spin' in cols[key]:
df['spin'] = df.spin.map({'A': 0, 'B': 1})
df.sort_values(by=['spin', 'energy'], inplace=True)
else:
df.sort_values(by='energy', inplace=True)
df['spin'] = 0
df.reset_index(drop=True, inplace=True)
df['frame'] = df['group'] = 0
self.orbital = df
def parse_contribution(self):
_re_con_00 = ('E(eV) Occ MO % '
'SFO (first member) E(eV) Occ Fragment')
# MO contribution by percentage
found = self.find(_re_con_00, keys_only=True)
starts = [i + 3 for i in found]
widths = [12, 6, 6, 6, 11, 6, 10, 12, 6, 6, 3]
names = ['eV', 'occupation', 'vector', 'sym', '%', 'SFO',
'angmom', 'eV(sfo)', 'occ(sfo)', 'atom', 'symbol']
dfs = []
# Prints for both spins
for i, start in enumerate(starts):
stop = start
while self[stop].strip(): stop += 1
dfs.append(pd.read_fwf(StringIO('\n'.join(self[start:stop])),
delim_whitespace=True, widths=widths,
names=names))
dfs[-1]['spin'] = i
dfs = pd.concat(dfs).reset_index(drop=True)
dfs = dfs.applymap(lambda x: np.nan if (isinstance(x, six.string_types)
and x.isspace()) else x)
dfs.fillna(method='ffill', inplace=True)
# Clean up
dfs['symbol'] = dfs['symbol'].str.strip()
dfs['angmom'] = dfs['angmom'].str.strip()
dfs['angmom'].update(dfs['angmom'].map({'S': 'S:'}))
dfs[['L', 'ml']] = dfs['angmom'].str.extract('(.*):(.*)', expand=True)
dfs['%'] = dfs['%'].str.replace('%', '')
dfs['%'].update(dfs['%'].map({" ******": np.inf}))
dfs['%'] = dfs['%'].astype(np.float64)
dfs['occupation'] = dfs['occupation'].astype(np.float64)
dfs['vector'] = dfs['vector'].astype(np.int64) - 1
dfs['eV'] = dfs['eV'].astype(np.float64)
dfs['atom'] -= 1
self.contribution = dfs
def parse_excitation(self):
# Excitation
_re_exc_00 = '(sum=1) transition dipole moment'
_re_exc_01 = ' no. E/a.u. E/eV f Symmetry'
found = self.find_next(_re_exc_00, keys_only=True)
if not found: return
# First table of interest here
start = found + 4
stop = self.find_next(_re_exc_01, keys_only=True) - 3
os = len(self[start].split()) == 9
todrop = ['occ:', 'virt:']
cols = ['excitation', 'occ', 'drop', 'virt', 'weight', 'TDMx', 'TDMy', 'TDMz']
if os: cols.insert(1, 'spin')
if os: todrop = ['occ', 'virt']
adf = self.pandas_dataframe(start, stop, cols)
adf.drop('drop', axis=1, inplace=True)
s1 = set(adf[cols[1]][adf[cols[1]] == 'NTO'].index)
s2 = set(adf['excitation'][adf['excitation'].isin(todrop)].index)
adf.drop(s1 | s2, axis=0, inplace=True)
adf['excitation'] = adf['excitation'].str[:-1].astype(np.int64) - 1
if os: adf['spin'] = adf['spin'].map({'Alph': 0, 'Beta': 1})
adf[['occ', 'occsym']] = adf['occ'].str.extract('([0-9]*)(.*)', expand=True)
adf[['virt', 'virtsym']] = adf['virt'].str.extract('([0-9]*)(.*)', expand=True)
adf['occ'] = adf['occ'].astype(np.int64) - 1
adf['virt'] = adf['virt'].astype(np.int64) - 1
# Second one here
start = stop + 5
stop = start
while self[stop].strip(): stop += 1
cols = _re_exc_01.split()
df = self.pandas_dataframe(start, stop + 1, cols)
df.drop(cols[0], axis=1, inplace=True)
df.columns = ['energy', 'eV', 'osc', 'symmetry']
# Expand the second table to fit the original
for col in df.columns: adf[col] = adf.excitation.map(df[col])
adf['frame'] = adf['group'] = 0
self.excitation = adf
def parse_momatrix(self):
_re_mo_00 = 'Eigenvectors .* in BAS representation'
_re_mo_01 = 'row '
_re_mo_02 = 'nosym'
found = self.regex(_re_mo_00, _re_mo_01, _re_mo_02,
flags=re.IGNORECASE, keys_only=True)
if not found[_re_mo_00] or not found[_re_mo_01]: return
if found[_re_mo_02]:
thresh = found[_re_mo_00][0]
rowmajor = 'rows' in self[thresh]
starts = np.array([i for i in found[_re_mo_01] if i > thresh]) + 1
nchi = starts[1] - starts[0] - 3
ncol = len(self[starts[0] + 1].split()) - 1
if len(starts) % 2: os = False
else:
anchor = starts[len(starts)//2 - 1] + nchi
sail = starts[len(starts)//2]
os = True if self.find('SPIN 2', start=anchor, stop=sail) else False
blocks = [starts] if not os else [starts[:len(starts)//2],
starts[len(starts)//2:]]
data = pd.DataFrame()
for i, block in enumerate(blocks):
stop = block[-1] + nchi
skips = [k + j for k in list(block[1:] - block[0] - 3) for j in range(3)]
name = 'coef' if not i else 'coef{}'.format(i)
col = self.pandas_dataframe(block[0], stop, ncol + 1,
skiprows=skips).drop(0, axis=1,
).unstack().dropna().reset_index(drop=True)
data[name] = col
norb = len(data.index) // nchi
data['orbital'] = np.concatenate([np.repeat(range(i, norb, ncol), nchi)
for i in range(ncol)])
data['chi'] = np.tile(range(nchi), norb)
data['frame'] = 0
if rowmajor:
data.rename(columns={'orbital': 'chi', 'chi': 'orbital'}, inplace=True)
data.sort_values(by=['orbital', 'chi'], inplace=True)
self.momatrix = data
else:
print('Symmetrized calcs not supported yet.')
def parse_sphr_momatrix(self, verbose=False):
"""
Parser localized momatrix (if present).
If the ``locorb`` keyword is used in ADF, an additional momatrix is
printed after localization is performed. Parsing this table allows
for visualization of these orbitals.
Note:
The attr :attr:`~exatomic.adf.output._re_loc_mo` is used for parsing this
section.
"""
_re_loc_mo = ("Localized MOs expanded in CFs+SFOs",
"SFO contributions (%) per Localized Orbital")
found = self.find(*_re_loc_mo)
if len(found[_re_loc_mo[0]]) == 0:
if verbose:
print("No localization performed.")
return # Nothing to parse
start = found[_re_loc_mo[0]][0][0] + 8
stop = found[_re_loc_mo[1]][0][0] - 4
# Parse the localized momatrix as a whole block of text
df = pd.read_fwf(StringIO("\n".join(self[start:stop])),
widths=(16, 9, 9, 9, 9, 9, 9, 9, 9), header=None)
del df[0]
# Identify the eigenvectors and (un)stack them correctly
n = df[df[1].isnull()].index[0] # number of basis functions
m = np.ceil(df.shape[0]/n).astype(int) # number of printed blocks of text
# idx - indexes of "lines" (rows) that don't contain coefficients
idx = [(n+5)*j + i - 5 for j in range(1, m) for i in range(0, 5)]
df = df[~df.index.isin(idx)]
coefs = []
for i in range(0, df.shape[0]//n+1):
d = df.iloc[n*(i-1):n*i, :]
coefs.append(d.unstack().dropna().values.astype(float))
coefs = np.concatenate(coefs)
m = coefs.shape[0]//n # Number of localized MOs
momatrix = pd.DataFrame.from_dict({'coef': coefs,
'orbital': [i for i in range(m) for _ in range(n)],
'chi': [j for _ in range(m) for j in range(n)]})
momatrix['frame'] = self.atom['frame'].unique()[-1]
self.sphr_momatrix = momatrix
def parse_gradient(self):
_regrad = "Energy gradients wrt nuclear displacements"
found = self.find(_regrad, keys_only=True)
if not found:
return
starts = np.array(found) + 6
stop = starts[0]
while '----' not in self[stop]: stop += 1
stops = starts + (stop - starts[0])
dfs = []
for i, (start, stop) in enumerate(zip(starts, stops)):
df = self.pandas_dataframe(start, stop, ncol=5)
df.columns = ['atom', 'symbol', 'fx', 'fy', 'fz']
df['frame'] = i
df['atom'] -= 1
dfs.append(df)
grad = pd.concat(dfs, ignore_index=True)
grad['Z'] = grad['symbol'].map(sym2z)
grad = grad[['atom', 'Z', 'fx', 'fy', 'fz', 'symbol', 'frame']]
for u in ['fx', 'fy', 'fz']: grad[u] *= 1./Length['Angstrom', 'au']
self.gradient = grad
def parse_frequency(self):
_renorm = "Vibrations and Normal Modes"
_refreq = "List of All Frequencies:"
found = self.find(_refreq, keys_only=True)
if not found:
return
elif len(found) > 1:
raise NotImplementedError("We cannot parse more than one frequency calculation in a single output")
found = self.find(_refreq, _renorm, keys_only=True)
start = found[_refreq][0] + 9
stop = start
while self[stop]: stop += 1
df = self.pandas_dataframe(start, stop, ncol=3)
freqs = df[0].values
n = int(np.ceil(freqs.shape[0]/3))
start = found[_renorm][0] + 9
stop = start
while self[stop]: stop += 1
natoms = stop - start
dfs = []
fdx = 0
for i in range(n):
if i == 0:
start = found[_renorm][0] + 9
else:
start = stop + 4
stop = start + natoms
freqs = list(map(lambda x: float(x), self[start-2].split()))
ncol = len(freqs)
df = self.pandas_dataframe(start, stop, ncol=1+3*ncol)
tmp = list(map(lambda x: x.split('.'), df[0]))
index, symbol = list(map(list, zip(*tmp)))
slices = [list(range(1+i, 1+3*ncol, 3)) for i in range(ncol)]
dx, dy, dz = [df[i].unstack().values for i in slices]
freqdx = np.repeat(list(range(fdx, ncol+fdx)), natoms)
zs = pd.Series(symbol).map(sym2z)
freqs = np.repeat(freqs, natoms)
stacked = pd.DataFrame.from_dict({'Z': np.tile(zs, ncol), 'label': np.tile(index, ncol), 'dx': dx,
'dy': dy, 'dz': dz, 'frequency': freqs, 'freqdx': freqdx})
stacked['ir_int'] = 0.0
stacked['symbol'] = np.tile(symbol, ncol)
dfs.append(stacked)
fdx += ncol
frequency = pd.concat(dfs, ignore_index=True)
frequency['frame'] = 0
# TODO: check units of the normal modes
self.frequency = frequency
def parse_nmr_shielding(self):
_reatom = "N U C L E U S :"
_reshield = "==== total shielding tensor"
_renatom = "NUCLEAR COORDINATES (ANGSTROMS)"
found = self.find(_reatom, keys_only=True)
if not found:
#raise NotImplementedError("Could not find {} in output".format(_reatom))
return
ncalc = self.find(_renatom, keys_only=True)
ncalc.append(len(self))
ndx = 0
dfs = []
for start in found:
try:
ndx = ndx if start > ncalc[ndx] and start < ncalc[ndx+1] else ndx+1
except IndexError:
raise IndexError("It seems that there was an issue with determining which NMR calculation we are in")
start_shield = self.find(_reshield, keys_only=True, start=start)[0] + start + 2
end_shield = start_shield + 3
symbol, index = self[start].split()[-1].split('(')
index = int(index.replace(')', ''))
isotropic = float(self[start_shield+4].split()[-1])
df = self.pandas_dataframe(start_shield, end_shield, ncol=3)
cols = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']
df = pd.DataFrame(df.unstack().values.reshape(1,9), columns=cols)
df['isotropic'] = isotropic
df['atom'] = index - 1
df['symbol'] = symbol
df['label'] = 'nmr shielding'
df['frame'] = ndx
dfs.append(df)
shielding = pd.concat(dfs, ignore_index=True)
self.nmr_shielding = shielding
def parse_j_coupling(self):
_recoupl = "total calculated spin-spin coupling:"
_reatom = "Internal CPL numbering of atoms:"
found = self.find(_reatom, keys_only=True)
if not found:
return
found = self.find(_reatom, _recoupl, keys_only=True)
# we grab the tensors inside the principal axis representation
# for the cartesian axis representation we start the list at 0 and grab every other instance
start_coupl = found[_recoupl][1::2]
start_pert = np.array(found[_reatom]) - 3
dfs = []
# grab atoms
cols = ['xx', 'xy', 'xz', 'yx', 'yy', 'yz', 'zx', 'zy', 'zz']
for ln, start in zip(start_pert, start_coupl):
line = self[ln].split()
# we just replace all of the () in the strings
pert_nucl = list(map(lambda x: x.replace('(', '').replace(')', ''), line[5:]))
nucl = list(map(lambda x: x.replace('(', '').replace(')', ''), line[1:3]))
# grab both tensors
df = self.pandas_dataframe(start+2, start+5, ncol=6)
# this will grab the iso value and tensor elements for the j coupling in hz
df.drop(range(3), axis='columns', inplace=True)
df = pd.DataFrame(df.unstack().values.reshape(1,9), columns=cols)
iso = self[start+1].split()[-1]
# place all of the dataframe columns
df['isotropic'] = float(iso)
df['atom'] = int(nucl[0])
df['symbol'] = nucl[1]
df['pt_atom'] = int(pert_nucl[0])
df['pt_symbol'] = pert_nucl[1]
df['label'] = 'j coupling'
df['frame'] = 0
dfs.append(df)
# put everything together
j_coupling = pd.concat(dfs, ignore_index=True)
j_coupling['atom'] -= 1
j_coupling['pt_atom'] -= 1
self.j_coupling = j_coupling
def __init__(self, *args, **kwargs):
super(Output, self).__init__(*args, **kwargs)
|
{
"content_hash": "0e3bcf3e16877f5c82bae042eaa1cae7",
"timestamp": "",
"source": "github",
"line_count": 551,
"max_line_length": 117,
"avg_line_length": 45.3448275862069,
"alnum_prop": 0.5119871923153893,
"repo_name": "exa-analytics/atomic",
"id": "978d52e0933f119970455a4df53044fccaf50c5d",
"size": "25123",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "exatomic/adf/output.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "JavaScript",
"bytes": "76185"
},
{
"name": "Python",
"bytes": "82565"
}
],
"symlink_target": ""
}
|
from ohno import config
from ohno.client.telnet import Telnet
from ohno.client.pty import Pty
class Client(object):
"""Wraps around a specific NetHack client (telnet, /usr/bin/nethack, ..)"""
def __init__(self, ohno):
self.ohno = ohno
if config.CLIENT == 'telnet':
self.ohno.logger.client('Initializing telnet client')
self._client = Telnet(ohno)
elif config.CLIENT == 'pty':
self.ohno.logger.client('Initializing nethack client')
self._client = Pty(ohno)
else:
raise Exception('config.CLIENT?')
def start_resume_game(self):
"""
Starts or resumes a nethack game and leaves the game in a state
such that we can start running ohno.senses.update()
"""
self.ohno.logger.client('Starting/resuming game..')
self._client.start_resume_game()
self.ohno.logger.client('Game should be started/resumed.')
def send(self, data, **kwargs):
self.ohno.logger.client('> ' + repr(data))
return self._client.send(data, **kwargs)
def receive(self, **kwargs):
data = self._client.receive(**kwargs)
self.ohno.logger.client('< ' + repr(data))
return data
|
{
"content_hash": "4e0fb1615999211ae2d5db0cbd20b6ed",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 79,
"avg_line_length": 35.542857142857144,
"alnum_prop": 0.6093247588424437,
"repo_name": "helgefmi/ohno",
"id": "6050c80e750fc07cfc3c38de0653814ea6390cc8",
"size": "1244",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ohno/client/client.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "335981"
}
],
"symlink_target": ""
}
|
import ROOT
from array import array
class Marker( ROOT.TMarker ):
def __init__( self, x,y, markerColor=None, markerStyle=5 ):
ROOT.TMarker.__init__( self, x,y, markerStyle )
if markerColor: self.SetMarkerColor( markerColor )
class CrossMarker( ROOT.TMarker ):
""" A special class just for "cross" markers where the line-width is adjustable. """
# holds drawn TLines for memory management
container = []
def __init__( self, x,y, markerColor=ROOT.kBlack, markerSize=1.0, lineWidth=3 ):
ROOT.TMarker.__init__( self, x,y, 5 )
self.SetMarkerSize( markerSize )
self.x = x
self.y = y
self.markerSize = markerSize
self.markerColor = markerColor
self.lineWidth = lineWidth
def Draw(self):
"""
Standard markers have size 8px (my guess is (8px,8px) but don't know for sure).
Convert self.x and self.y to AbsPixel coordinates and draw in pixels.
"""
# convert x and y to AbsPixel coordinates
u = ROOT.gPad.XtoAbsPixel( self.x )
v = ROOT.gPad.YtoAbsPixel( self.y )
t1 = ROOT.TLine()
t1.SetLineWidth( self.lineWidth )
t1.SetLineColor( self.markerColor )
t1.DrawLine(
ROOT.gPad.AbsPixeltoX( int(u - 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoY( int(v - 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoX( int(u + 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoY( int(v + 4*self.markerSize) ),
)
t2 = ROOT.TLine()
t2.SetLineWidth( self.lineWidth )
t2.SetLineColor( self.markerColor )
t2.DrawLine(
ROOT.gPad.AbsPixeltoX( int(u - 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoY( int(v + 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoX( int(u + 4*self.markerSize) ),
ROOT.gPad.AbsPixeltoY( int(v - 4*self.markerSize) ),
)
self.container.append( (t1,t2) )
|
{
"content_hash": "3d206cc5f28bbf8ea48bf91da6f5622a",
"timestamp": "",
"source": "github",
"line_count": 57,
"max_line_length": 87,
"avg_line_length": 33.40350877192982,
"alnum_prop": 0.618172268907563,
"repo_name": "svenkreiss/PyROOTUtils",
"id": "2346330509fc3f1dec9bfca5f6193877696691e7",
"size": "1904",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "PyROOTUtils/Marker.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "31969"
}
],
"symlink_target": ""
}
|
"""
Handle upload and retrieval of files from iRODS. This method requires a preconfigured connection
to an iRODS repository throught the `iinit` command
config options:
upload:
dir: ../final
method: irods
folder: absolute parent path in iRODS repository
resource: (optional) iRODS resource name, if other than default
"""
import os
from bcbio.provenance import do
from bcbio.upload import filesystem
def _check_create_collection(irods_fname,isdir=False):
irods_dir=""
if isdir:
irods_dir=irods_fname
else:
irods_dir=os.path.dirname(irods_fname)
cmd = ["imkdir", "-p",irods_dir]
do.run(cmd,"iRODS: create collection %s" % (irods_dir))
def update_file(finfo, sample_info, config):
"""
Update the file to an iRODS repository.
"""
ffinal = filesystem.update_file(finfo, sample_info, config, pass_uptodate=True)
_upload_dir_icommands_cli(config.get("dir"), config.get("folder"), config)
def _upload_dir_icommands_cli(local_dir, irods_dir, config=None, metadata=None):
"""
Upload directory recursively via the standard icommands CLI.
example: irsync -Kvar -R $resource $local_dir i:$irods_dir
go to https://docs.irods.org/4.2.0/icommands/user/#irsync for more info
"""
args = ["-K","-v","-a","-r"]
if config:
if config.get("resource"):
args += ["-R", config.get("resource")]
_check_create_collection(irods_dir,isdir=True)
cmd = ["irsync"] + args + [local_dir, "i:"+irods_dir]
do.run(cmd, "Uploading to iRODS")
|
{
"content_hash": "0e9ebfb123cc78d368089e04eb7b8718",
"timestamp": "",
"source": "github",
"line_count": 49,
"max_line_length": 100,
"avg_line_length": 32.59183673469388,
"alnum_prop": 0.6455854727614276,
"repo_name": "a113n/bcbio-nextgen",
"id": "822513145020cfa688ed31d73e52ef391e70b635",
"size": "1597",
"binary": false,
"copies": "5",
"ref": "refs/heads/master",
"path": "bcbio/upload/irods.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "1150780"
},
{
"name": "Ruby",
"bytes": "624"
},
{
"name": "Shell",
"bytes": "13596"
}
],
"symlink_target": ""
}
|
from django.contrib.auth import get_user_model
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext as _
from arctic.generics import (CreateView, ListView, UpdateView)
from arctic.loading import get_user_role_model
from .forms import (UserChangeMultiForm, UserCreationMultiForm)
User = get_user_model()
UserRole = get_user_role_model()
username_field = User.USERNAME_FIELD
class UserListView(ListView):
page_title = _('Users')
paginate_by = 20
model = UserRole
fields = [('user__{}'.format(username_field), 'Username'),
'role__name', 'user__is_active', 'user__last_login']
ordering_fields = ['user__{}'.format(username_field),
'role__name', 'user__last_login']
search_fields = ['user__{}'.format(username_field)]
filter_fields = ['user__is_active']
# action_links = [
# ('delete', 'users:delete', 'fa-trash'),
# ]
field_links = {
'user__{}'.format(username_field): 'users:detail',
}
tool_links = [
(_('Create Users'), 'users:create', 'fa-plus'),
]
permission_required = ""
class UserCreateView(CreateView):
page_title = _('Create User')
model = UserRole
success_url = reverse_lazy('users:list')
form_class = UserCreationMultiForm
permission_required = ""
def get_success_message(self, cleaned_data):
return _('User {} was successfully created').format(
self.object['user'])
class UserUpdateView(UpdateView):
page_title = _('Change User')
model = UserRole
success_url = reverse_lazy('users:list')
form_class = UserChangeMultiForm
permission_required = ""
def get_form_kwargs(self):
kwargs = super(UserUpdateView, self).get_form_kwargs()
kwargs.update(instance={
'user': self.object.user,
'role': self.object
})
return kwargs
def get_success_message(self, cleaned_data):
return _('User {} was successfully updated').format(
self.object['user'])
|
{
"content_hash": "7a7c88fc6396464ecb4d5bc827b81e79",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 66,
"avg_line_length": 30.36764705882353,
"alnum_prop": 0.6300242130750605,
"repo_name": "ddaan/django-arctic",
"id": "72c70866761a006c959fcb055a4e6c6de7e9245c",
"size": "2065",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "arctic/users/views.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "189003"
},
{
"name": "HTML",
"bytes": "62483"
},
{
"name": "JavaScript",
"bytes": "38489"
},
{
"name": "Python",
"bytes": "109791"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import
from django.db import models
from django.utils.translation import ugettext_lazy as _
from sentry.db.models import BoundedPositiveIntegerField, FlexibleForeignKey, Model
class GroupHash(Model):
__core__ = False
class State:
UNLOCKED = None
LOCKED_IN_MIGRATION = 1
project = FlexibleForeignKey("sentry.Project", null=True)
hash = models.CharField(max_length=32)
group = FlexibleForeignKey("sentry.Group", null=True)
group_tombstone_id = BoundedPositiveIntegerField(db_index=True, null=True)
state = BoundedPositiveIntegerField(
choices=[(State.LOCKED_IN_MIGRATION, _("Locked (Migration in Progress)"))], null=True
)
class Meta:
app_label = "sentry"
db_table = "sentry_grouphash"
unique_together = (("project", "hash"),)
|
{
"content_hash": "582a87e3d93c385871cc6d3618f55ea9",
"timestamp": "",
"source": "github",
"line_count": 27,
"max_line_length": 93,
"avg_line_length": 31.40740740740741,
"alnum_prop": 0.6910377358490566,
"repo_name": "mvaled/sentry",
"id": "577ff1ec1a4ff19c7474cd90d74c2d7acfef27f2",
"size": "848",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "src/sentry/models/grouphash.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "226439"
},
{
"name": "Dockerfile",
"bytes": "6431"
},
{
"name": "HTML",
"bytes": "173429"
},
{
"name": "JavaScript",
"bytes": "9314175"
},
{
"name": "Lua",
"bytes": "65885"
},
{
"name": "Makefile",
"bytes": "9225"
},
{
"name": "Python",
"bytes": "50385401"
},
{
"name": "Ruby",
"bytes": "168"
},
{
"name": "Shell",
"bytes": "5685"
},
{
"name": "TypeScript",
"bytes": "773664"
}
],
"symlink_target": ""
}
|
from __future__ import absolute_import, unicode_literals
import sys
from django.apps.registry import Apps, apps
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation
)
from django.contrib.contenttypes import management
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.db import connections, models, router
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_str
from django.utils.six import StringIO
from .models import Author, Article, SchemeIncludedURL
@override_settings(ROOT_URLCONF='contenttypes_tests.urls')
class ContentTypesViewsTests(TestCase):
fixtures = ['testdata.json']
def test_shortcut_with_absolute_url(self):
"Can view a shortcut for an Author object that has a get_absolute_url method"
for obj in Author.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, 'http://testserver%s' % obj.get_absolute_url(),
status_code=302, target_status_code=404)
def test_shortcut_with_absolute_url_including_scheme(self):
"""
Can view a shortcut when object's get_absolute_url returns a full URL
the tested URLs are in fixtures/testdata.json :
"http://...", "https://..." and "//..."
"""
for obj in SchemeIncludedURL.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(SchemeIncludedURL).id, obj.pk)
response = self.client.get(short_url)
self.assertRedirects(response, obj.get_absolute_url(),
status_code=302,
fetch_redirect_response=False)
def test_shortcut_no_absolute_url(self):
"Shortcuts for an object that has no get_absolute_url method raises 404"
for obj in Article.objects.all():
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Article).id, obj.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_wrong_type_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, 'nobody/expects')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_shortcut_bad_pk(self):
short_url = '/shortcut/%s/%s/' % (ContentType.objects.get_for_model(Author).id, '42424242')
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_nonint_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % ('spam', an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_bad_content_type(self):
an_author = Author.objects.all()[0]
short_url = '/shortcut/%s/%s/' % (42424242, an_author.pk)
response = self.client.get(short_url)
self.assertEqual(response.status_code, 404)
def test_create_contenttype_on_the_spot(self):
"""
Make sure ContentTypeManager.get_for_model creates the corresponding
content type if it doesn't exist in the database (for some reason).
"""
class ModelCreatedOnTheFly(models.Model):
name = models.CharField()
class Meta:
verbose_name = 'a model created on the fly'
app_label = 'my_great_app'
apps = Apps()
ct = ContentType.objects.get_for_model(ModelCreatedOnTheFly)
self.assertEqual(ct.app_label, 'my_great_app')
self.assertEqual(ct.model, 'modelcreatedonthefly')
self.assertEqual(ct.name, 'a model created on the fly')
class IsolatedModelsTestCase(TestCase):
def setUp(self):
# The unmanaged models need to be removed after the test in order to
# prevent bad interactions with the flush operation in other tests.
self._old_models = apps.app_configs['contenttypes_tests'].models.copy()
def tearDown(self):
apps.app_configs['contenttypes_tests'].models = self._old_models
apps.all_models['contenttypes_tests'] = self._old_models
apps.clear_cache()
class GenericForeignKeyTests(IsolatedModelsTestCase):
def test_str(self):
class Model(models.Model):
field = GenericForeignKey()
expected = "contenttypes_tests.Model.field"
actual = force_str(Model.field)
self.assertEqual(expected, actual)
def test_missing_content_type_field(self):
class TaggedItem(models.Model):
# no content_type field
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey content type references the non-existent field 'TaggedItem.content_type'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E002',
)
]
self.assertEqual(errors, expected)
def test_invalid_content_type_field(self):
class Model(models.Model):
content_type = models.IntegerField() # should be ForeignKey
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E003',
)
]
self.assertEqual(errors, expected)
def test_content_type_field_pointing_to_wrong_model(self):
class Model(models.Model):
content_type = models.ForeignKey('self') # should point to ContentType
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object.check()
expected = [
checks.Error(
"'Model.content_type' is not a ForeignKey to 'contenttypes.ContentType'.",
hint="GenericForeignKeys must use a ForeignKey to 'contenttypes.ContentType' as the 'content_type' field.",
obj=Model.content_object,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
def test_missing_object_id_field(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
# missing object_id field
content_object = GenericForeignKey()
errors = TaggedItem.content_object.check()
expected = [
checks.Error(
"The GenericForeignKey object ID references the non-existent field 'object_id'.",
hint=None,
obj=TaggedItem.content_object,
id='contenttypes.E001',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class Model(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object_ = GenericForeignKey(
'content_type', 'object_id')
errors = Model.content_object_.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=Model.content_object_,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
def test_generic_foreign_key_checks_are_performed(self):
class MyGenericForeignKey(GenericForeignKey):
def check(self, **kwargs):
return ['performed!']
class Model(models.Model):
content_object = MyGenericForeignKey()
errors = checks.run_checks()
self.assertEqual(errors, ['performed!'])
def test_unsaved_instance_on_generic_foreign_key(self):
"""
#10811 -- Assigning an unsaved object to GenericForeignKey
should raise an exception.
"""
class Model(models.Model):
content_type = models.ForeignKey(ContentType, null=True)
object_id = models.PositiveIntegerField(null=True)
content_object = GenericForeignKey('content_type', 'object_id')
author = Author(name='Author')
model = Model()
model.content_object = None # no error here as content_type allows None
with self.assertRaisesMessage(ValueError,
'Cannot assign "%r": "%s" instance isn\'t saved in the database.'
% (author, author._meta.object_name)):
model.content_object = author # raised ValueError here as author is unsaved
author.save()
model.content_object = author # no error because the instance is saved
class GenericRelationshipTests(IsolatedModelsTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'custom_content_type', 'custom_object_id')
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem',
content_type_field='custom_content_type',
object_id_field='custom_object_id')
errors = Bookmark.tags.field.check()
self.assertEqual(errors, [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation('MissingModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract."),
hint=None,
obj=Model.rel.field,
id='fields.E300',
)
]
self.assertEqual(errors, expected)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation('Model')
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
'content_type', 'object_id')
errors = Model.rel.field.check()
self.assertEqual(errors, [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation('TaggedItem')
errors = Bookmark.tags.field.check()
expected = [
checks.Error(
("The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey."),
hint=None,
obj=Bookmark.tags.field,
id='contenttypes.E004',
)
]
self.assertEqual(errors, expected)
@override_settings(TEST_SWAPPED_MODEL='contenttypes_tests.Replacement')
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = 'TEST_SWAPPED_MODEL'
class Model(models.Model):
rel = GenericRelation('SwappedModel')
errors = Model.rel.field.check()
expected = [
checks.Error(
("Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out."),
hint="Update the relation to point at 'settings.TEST_SWAPPED_MODEL'.",
obj=Model.rel.field,
id='fields.E301',
)
]
self.assertEqual(errors, expected)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation('TaggedItem')
errors = InvalidBookmark.tags_.field.check()
expected = [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=InvalidBookmark.tags_.field,
id='fields.E001',
)
]
self.assertEqual(errors, expected)
class UpdateContentTypesTests(TestCase):
def setUp(self):
self.before_count = ContentType.objects.count()
ContentType.objects.create(name='fake', app_label='contenttypes_tests', model='Fake')
self.app_config = apps.get_app_config('contenttypes_tests')
self.old_stdout = sys.stdout
sys.stdout = StringIO()
def tearDown(self):
sys.stdout = self.old_stdout
def test_interactive_true(self):
"""
interactive mode of update_contenttypes() (the default) should delete
stale contenttypes.
"""
management.input = lambda x: force_str("yes")
management.update_contenttypes(self.app_config)
self.assertIn("Deleting stale content type", sys.stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count)
def test_interactive_false(self):
"""
non-interactive mode of update_contenttypes() shouldn't delete stale
content types.
"""
management.update_contenttypes(self.app_config, interactive=False)
self.assertIn("Stale content types remain.", sys.stdout.getvalue())
self.assertEqual(ContentType.objects.count(), self.before_count + 1)
class TestRouter(object):
def db_for_read(self, model, **hints):
return 'other'
def db_for_write(self, model, **hints):
return 'default'
class ContentTypesMultidbTestCase(TestCase):
def setUp(self):
self.old_routers = router.routers
router.routers = [TestRouter()]
# Whenever a test starts executing, only the "default" database is
# connected. We explicitly connect to the "other" database here. If we
# don't do it, then it will be implicitly connected later when we query
# it, but in that case some database backends may automatically perform
# extra queries upon connecting (notably mysql executes
# "SET SQL_AUTO_IS_NULL = 0"), which will affect assertNumQueries().
connections['other'].ensure_connection()
def tearDown(self):
router.routers = self.old_routers
def test_multidb(self):
"""
Test that, when using multiple databases, we use the db_for_read (see
#20401).
"""
ContentType.objects.clear_cache()
with self.assertNumQueries(0, using='default'), \
self.assertNumQueries(1, using='other'):
ContentType.objects.get_for_model(Author)
|
{
"content_hash": "b975860ecebbb5c743bb5f5d8befb6f6",
"timestamp": "",
"source": "github",
"line_count": 429,
"max_line_length": 123,
"avg_line_length": 38.438228438228435,
"alnum_prop": 0.6110369921164343,
"repo_name": "YYWen0o0/python-frame-django",
"id": "e89d3dcf0d7a2e0787d4894fab2bafba240db677",
"size": "16514",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "tests/contenttypes_tests/tests.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "CSS",
"bytes": "53353"
},
{
"name": "JavaScript",
"bytes": "102434"
},
{
"name": "Python",
"bytes": "9808771"
},
{
"name": "Shell",
"bytes": "10452"
}
],
"symlink_target": ""
}
|
print("Hello World")
# In[7]:
import tweepy
import pandas as pd
#Then create new twitter app: https://apps.twitter.com/
#Then set up Oauth below (filling in the empty double quotes)
# In[5]:
# == OAuth Authentication ==
#
# This mode of authentication is the new preferred way
# of authenticating with Twitter.
# Source: https://github.com/tweepy/tweepy/blob/master/examples/oauth.py
# The consumer keys can be found on your application's Details
# page located at https://dev.twitter.com/apps (under "OAuth settings")
consumer_key=""
consumer_secret=""
# The access tokens can be found on your applications's Details
# page located at https://dev.twitter.com/apps (located
# under "Your access token")
access_token=""
access_token_secret=""
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.secure = True
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
# If the authentication was successful, you should
# see the name of the account print out
print(api.me().name)
# In[6]:
#If you set up to write as well as read, you can send this message
api.update_status(status='This tweet just came from my local Jupyter notebook. Thanks project Jupyter and Tweepy for the helpful docs!')
# In[17]:
#put your twitter handle below instead of mine to get list of who you follow
friend_ids_all = api.friends_ids("richmanmax")
len(friend_ids_all)
# In[104]:
#put your twitter handle below instead of mine to get list of who follows you
followers_ids_all = api.followers_ids("richmanmax")
len(followers_ids_all)
# In[72]:
#choose how many friends (people who follow you) you want to analyze
#note: in the next section you will hit rate limit of 180 calls
#so pick a number under 180 or be prepared to wait
friend_ids_some = friend_ids_all[0:10]
friend_ids_some
# In[78]:
#check how many user API calls we have left
api.rate_limit_status()['resources']['users']
# In[79]:
user_json = []
for i in friend_ids_some:
user_json += [api.get_user(i).__getstate__()['_json']]
len(user_json)
# In[80]:
#check how many user API calls we have left
api.rate_limit_status()['resources']['users']
# In[81]:
user_json
# In[111]:
from pandas.io.json import json_normalize
df_user_data = json_normalize(user_json)
df_user_data
# In[112]:
df_user_data.columns
# In[113]:
#example frequency of a variable
df_user_data['time_zone'].value_counts(dropna=False)
# In[120]:
#merge in information if they follow you
followback = []
for x in range(len(followers_ids_all)):
followback.append("TRUE")
user_data_followback = zip(followers_ids_all,followback)
df_user_data_followback = pd.DataFrame(user_data_followback)
df_user_data_followback.columns = ["id","follow_back"]
df_user_data_followback
df_user_data_merge = pd.DataFrame.merge(df_user_data,df_user_data_followback, how='left', on='id')
df_user_data_merge
# In[126]:
#simplify data frame to just the key variables of interest
#then sort by those following with most tweets and fewest followers (likely for me to still follow)
df_user_simple = df_user_data_merge[['id','name','screen_name','follow_back','statuses_count','followers_count','friends_count']]
df_user_simple.sort_values(["follow_back","statuses_count","followers_count"],ascending=[False,False,True])
# In[127]:
#DANGER WILL ROBINSON: This is the write command that will unfollow people
#it is appropriately called "destroy friendship" so use with care.
#designed currently for using one ID from above at a time for #####
api.destroy_friendship(######)
# In[ ]:
|
{
"content_hash": "47d0d544a2a50eda2389b43289fe223f",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 136,
"avg_line_length": 23.859060402684563,
"alnum_prop": 0.7279887482419128,
"repo_name": "mjrich/trimming-twitter-friends",
"id": "f10bae869397842090ccaf09c70480d051589338",
"size": "3583",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "trimming-twitter-friends.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "3583"
}
],
"symlink_target": ""
}
|
import os
import pytest
from mapproxy.client.http import HTTPClient
from mapproxy.script.wms_capabilities import wms_capabilities_command
from mapproxy.test.http import mock_httpd
from mapproxy.test.helper import capture
TESTSERVER_ADDRESS = ("127.0.0.1", 56413)
TESTSERVER_URL = "http://%s:%s" % TESTSERVER_ADDRESS
CAPABILITIES111_FILE = os.path.join(
os.path.dirname(__file__), "fixture", "util_wms_capabilities111.xml"
)
CAPABILITIES130_FILE = os.path.join(
os.path.dirname(__file__), "fixture", "util_wms_capabilities130.xml"
)
SERVICE_EXCEPTION_FILE = os.path.join(
os.path.dirname(__file__), "fixture", "util_wms_capabilities_service_exception.xml"
)
class TestUtilWMSCapabilities(object):
def setup(self):
self.client = HTTPClient()
self.args = ["command_dummy", "--host", TESTSERVER_URL + "/service"]
def test_http_error(self):
self.args = ["command_dummy", "--host", "http://foo.doesnotexist"]
with capture() as (out, err):
with pytest.raises(SystemExit):
wms_capabilities_command(self.args)
assert err.getvalue().startswith("ERROR:")
self.args[2] = "/no/valid/url"
with capture() as (out, err):
with pytest.raises(SystemExit):
wms_capabilities_command(self.args)
assert err.getvalue().startswith("ERROR:")
def test_request_not_parsable(self):
with mock_httpd(
TESTSERVER_ADDRESS,
[
(
{
"path": "/service?request=GetCapabilities&version=1.1.1&service=WMS",
"method": "GET",
},
{"status": "200", "body": ""},
)
],
):
with capture() as (out, err):
with pytest.raises(SystemExit):
wms_capabilities_command(self.args)
error_msg = err.getvalue().rsplit("-" * 80, 1)[1].strip()
assert error_msg.startswith("Could not parse the document")
def test_service_exception(self):
self.args = [
"command_dummy",
"--host",
TESTSERVER_URL + "/service?request=GetCapabilities",
]
with open(SERVICE_EXCEPTION_FILE, "rb") as fp:
capabilities_doc = fp.read()
with mock_httpd(
TESTSERVER_ADDRESS,
[
(
{
"path": "/service?request=GetCapabilities&version=1.1.1&service=WMS",
"method": "GET",
},
{"status": "200", "body": capabilities_doc},
)
],
):
with capture() as (out, err):
with pytest.raises(SystemExit):
wms_capabilities_command(self.args)
error_msg = err.getvalue().rsplit("-" * 80, 1)[1].strip()
assert "Not a capabilities document" in error_msg
def test_parse_capabilities(self):
self.args = [
"command_dummy",
"--host",
TESTSERVER_URL + "/service?request=GetCapabilities",
"--version",
"1.1.1",
]
with open(CAPABILITIES111_FILE, "rb") as fp:
capabilities_doc = fp.read()
with mock_httpd(
TESTSERVER_ADDRESS,
[
(
{
"path": "/service?request=GetCapabilities&version=1.1.1&service=WMS",
"method": "GET",
},
{"status": "200", "body": capabilities_doc},
)
],
):
with capture() as (out, err):
wms_capabilities_command(self.args)
lines = out.getvalue().split("\n")
assert lines[1].startswith("Capabilities Document Version 1.1.1")
def test_parse_130capabilities(self):
self.args = [
"command_dummy",
"--host",
TESTSERVER_URL + "/service?request=GetCapabilities",
"--version",
"1.3.0",
]
with open(CAPABILITIES130_FILE, "rb") as fp:
capabilities_doc = fp.read()
with mock_httpd(
TESTSERVER_ADDRESS,
[
(
{
"path": "/service?request=GetCapabilities&version=1.3.0&service=WMS",
"method": "GET",
},
{"status": "200", "body": capabilities_doc},
)
],
):
with capture() as (out, err):
wms_capabilities_command(self.args)
lines = out.getvalue().split("\n")
assert lines[1].startswith("Capabilities Document Version 1.3.0")
def test_key_error(self):
self.args = [
"command_dummy",
"--host",
TESTSERVER_URL + "/service?request=GetCapabilities",
]
with open(CAPABILITIES111_FILE, "rb") as fp:
capabilities_doc = fp.read()
capabilities_doc = capabilities_doc.replace(b"minx", b"foo")
with mock_httpd(
TESTSERVER_ADDRESS,
[
(
{
"path": "/service?request=GetCapabilities&version=1.1.1&service=WMS",
"method": "GET",
},
{"status": "200", "body": capabilities_doc},
)
],
):
with capture() as (out, err):
with pytest.raises(SystemExit):
wms_capabilities_command(self.args)
assert err.getvalue().startswith("XML-Element has no such attribute")
|
{
"content_hash": "94d5d0ad208b62de30ff3a3737de614a",
"timestamp": "",
"source": "github",
"line_count": 167,
"max_line_length": 97,
"avg_line_length": 36.31137724550898,
"alnum_prop": 0.470811345646438,
"repo_name": "camptocamp/mapproxy",
"id": "7e63322468c99855365db68c4ccb2f1995ed1527",
"size": "6708",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "mapproxy/test/system/test_util_wms_capabilities.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1326087"
},
{
"name": "Shell",
"bytes": "324"
}
],
"symlink_target": ""
}
|
'''
Plugin that answers questions from people
'''
from adaptors import google_answers
import logging
logger = logging.getLogger('answers')
def GetFirstName(full_name):
return full_name.split()[0]
def GetLastQuestion(messages):
for message in reversed(messages):
if not message.text:
continue
if message.text.find('?') != -1:
return message
return None
class AnswersPlugin(object):
def HandleMessages(self, conversation, new_messages):
# TODO(fortuna): Save timestamp of last processed message to avoid re-processing.
last_question = GetLastQuestion(new_messages[-5:])
if not last_question:
return
logging.info("Answering: %s", last_question.text)
answer = google_answers.GetAnswer(last_question.text)
if answer is not None:
logging.info("Answering with %s", answer)
reply = ""
if last_question.user and last_question.user.name and len(conversation.Members()) > 2:
reply = u"@%s: " % GetFirstName(last_question.user.name)
reply += answer
conversation.PostMessage(reply)
else:
logging.info("No answer found.")
|
{
"content_hash": "2cc531298bea0cae4ea8ced81134d3b6",
"timestamp": "",
"source": "github",
"line_count": 38,
"max_line_length": 92,
"avg_line_length": 29.44736842105263,
"alnum_prop": 0.6907953529937444,
"repo_name": "andreimatei/congratboto",
"id": "b42b06aaa7d8a7d22cc2d8cde4c5c51f3be58a3c",
"size": "1119",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "app_engine/plugins/answers.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "29114"
}
],
"symlink_target": ""
}
|
import socket
# Cria um objecto socket UDP.
ipv4 = socket.AF_INET
udp = socket.SOCK_DGRAM
sock = socket.socket(ipv4, udp)
print "Socket successfully created"
# Reserve a porta na qual deseja aceitar conexões.
port = 12345
# Ative a porta para o servidor
sock.bind(('127.0.0.1', port))
print "Socket binded to %s" %(port)
while True:
# Conecte e receba dados do cliente.
data, addr = sock.recvfrom(1024)
print 'Got connection from', addr
print 'Data recv: ', data
# Envie dados para o cliente.
sock.sendto('Hi Client', addr)
|
{
"content_hash": "1d8f82e59e84643fbaf2331b386e407d",
"timestamp": "",
"source": "github",
"line_count": 23,
"max_line_length": 50,
"avg_line_length": 24,
"alnum_prop": 0.6956521739130435,
"repo_name": "lab-inf/2017.1-cc-rede1",
"id": "e6c7de6ab0f103c7c2edaf6cdad7e1a61a98d374",
"size": "696",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "lab04/03/server.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "49228"
},
{
"name": "Shell",
"bytes": "2282"
}
],
"symlink_target": ""
}
|
import os
# try/except added for compatibility with python < 3.8
try:
from unittest import mock
from unittest.mock import AsyncMock # pragma: NO COVER
except ImportError: # pragma: NO COVER
import mock
import math
from google.api_core import gapic_v1, grpc_helpers, grpc_helpers_async, path_template
from google.api_core import client_options
from google.api_core import exceptions as core_exceptions
import google.auth
from google.auth import credentials as ga_credentials
from google.auth.exceptions import MutualTLSChannelError
from google.iam.v1 import iam_policy_pb2 # type: ignore
from google.iam.v1 import options_pb2 # type: ignore
from google.iam.v1 import policy_pb2 # type: ignore
from google.oauth2 import service_account
from google.protobuf import field_mask_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.type import expr_pb2 # type: ignore
import grpc
from grpc.experimental import aio
from proto.marshal.rules import wrappers
from proto.marshal.rules.dates import DurationRule, TimestampRule
import pytest
from google.cloud.datacatalog_v1.services.data_catalog import (
DataCatalogAsyncClient,
DataCatalogClient,
pagers,
transports,
)
from google.cloud.datacatalog_v1.types import (
bigquery,
common,
data_source,
datacatalog,
dataplex_spec,
gcs_fileset_spec,
physical_schema,
schema,
search,
table_spec,
tags,
timestamps,
usage,
)
def client_cert_source_callback():
return b"cert bytes", b"key bytes"
# If default endpoint is localhost, then default mtls endpoint will be the same.
# This method modifies the default endpoint so the client can produce a different
# mtls endpoint for endpoint testing purposes.
def modify_default_endpoint(client):
return (
"foo.googleapis.com"
if ("localhost" in client.DEFAULT_ENDPOINT)
else client.DEFAULT_ENDPOINT
)
def test__get_default_mtls_endpoint():
api_endpoint = "example.googleapis.com"
api_mtls_endpoint = "example.mtls.googleapis.com"
sandbox_endpoint = "example.sandbox.googleapis.com"
sandbox_mtls_endpoint = "example.mtls.sandbox.googleapis.com"
non_googleapi = "api.example.com"
assert DataCatalogClient._get_default_mtls_endpoint(None) is None
assert (
DataCatalogClient._get_default_mtls_endpoint(api_endpoint) == api_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(api_mtls_endpoint)
== api_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(sandbox_endpoint)
== sandbox_mtls_endpoint
)
assert (
DataCatalogClient._get_default_mtls_endpoint(sandbox_mtls_endpoint)
== sandbox_mtls_endpoint
)
assert DataCatalogClient._get_default_mtls_endpoint(non_googleapi) == non_googleapi
@pytest.mark.parametrize(
"client_class,transport_name",
[
(DataCatalogClient, "grpc"),
(DataCatalogAsyncClient, "grpc_asyncio"),
],
)
def test_data_catalog_client_from_service_account_info(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_info"
) as factory:
factory.return_value = creds
info = {"valid": True}
client = client_class.from_service_account_info(info, transport=transport_name)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("datacatalog.googleapis.com:443")
@pytest.mark.parametrize(
"transport_class,transport_name",
[
(transports.DataCatalogGrpcTransport, "grpc"),
(transports.DataCatalogGrpcAsyncIOTransport, "grpc_asyncio"),
],
)
def test_data_catalog_client_service_account_always_use_jwt(
transport_class, transport_name
):
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=True)
use_jwt.assert_called_once_with(True)
with mock.patch.object(
service_account.Credentials, "with_always_use_jwt_access", create=True
) as use_jwt:
creds = service_account.Credentials(None, None, None)
transport = transport_class(credentials=creds, always_use_jwt_access=False)
use_jwt.assert_not_called()
@pytest.mark.parametrize(
"client_class,transport_name",
[
(DataCatalogClient, "grpc"),
(DataCatalogAsyncClient, "grpc_asyncio"),
],
)
def test_data_catalog_client_from_service_account_file(client_class, transport_name):
creds = ga_credentials.AnonymousCredentials()
with mock.patch.object(
service_account.Credentials, "from_service_account_file"
) as factory:
factory.return_value = creds
client = client_class.from_service_account_file(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
client = client_class.from_service_account_json(
"dummy/file/path.json", transport=transport_name
)
assert client.transport._credentials == creds
assert isinstance(client, client_class)
assert client.transport._host == ("datacatalog.googleapis.com:443")
def test_data_catalog_client_get_transport_class():
transport = DataCatalogClient.get_transport_class()
available_transports = [
transports.DataCatalogGrpcTransport,
]
assert transport in available_transports
transport = DataCatalogClient.get_transport_class("grpc")
assert transport == transports.DataCatalogGrpcTransport
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
def test_data_catalog_client_client_options(
client_class, transport_class, transport_name
):
# Check that if channel is provided we won't create a new one.
with mock.patch.object(DataCatalogClient, "get_transport_class") as gtc:
transport = transport_class(credentials=ga_credentials.AnonymousCredentials())
client = client_class(transport=transport)
gtc.assert_not_called()
# Check that if channel is provided via str we will create a new one.
with mock.patch.object(DataCatalogClient, "get_transport_class") as gtc:
client = client_class(transport=transport_name)
gtc.assert_called()
# Check the case api_endpoint is provided.
options = client_options.ClientOptions(api_endpoint="squid.clam.whelk")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name, client_options=options)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT is
# "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_MTLS_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is not provided and GOOGLE_API_USE_MTLS_ENDPOINT has
# unsupported value.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "Unsupported"}):
with pytest.raises(MutualTLSChannelError):
client = client_class(transport=transport_name)
# Check the case GOOGLE_API_USE_CLIENT_CERTIFICATE has unsupported value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "Unsupported"}
):
with pytest.raises(ValueError):
client = client_class(transport=transport_name)
# Check the case quota_project_id is provided
options = client_options.ClientOptions(quota_project_id="octopus")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id="octopus",
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case api_endpoint is provided
options = client_options.ClientOptions(
api_audience="https://language.googleapis.com"
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience="https://language.googleapis.com",
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,use_client_cert_env",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", "true"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"true",
),
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", "false"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
"false",
),
],
)
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
@mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "auto"})
def test_data_catalog_client_mtls_env_auto(
client_class, transport_class, transport_name, use_client_cert_env
):
# This tests the endpoint autoswitch behavior. Endpoint is autoswitched to the default
# mtls endpoint, if GOOGLE_API_USE_CLIENT_CERTIFICATE is "true" and client cert exists.
# Check the case client_cert_source is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
options = client_options.ClientOptions(
client_cert_source=client_cert_source_callback
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
if use_client_cert_env == "false":
expected_client_cert_source = None
expected_host = client.DEFAULT_ENDPOINT
else:
expected_client_cert_source = client_cert_source_callback
expected_host = client.DEFAULT_MTLS_ENDPOINT
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case ADC client cert is provided. Whether client cert is used depends on
# GOOGLE_API_USE_CLIENT_CERTIFICATE value.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=client_cert_source_callback,
):
if use_client_cert_env == "false":
expected_host = client.DEFAULT_ENDPOINT
expected_client_cert_source = None
else:
expected_host = client.DEFAULT_MTLS_ENDPOINT
expected_client_cert_source = client_cert_source_callback
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=expected_host,
scopes=None,
client_cert_source_for_mtls=expected_client_cert_source,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# Check the case client_cert_source and ADC client cert are not provided.
with mock.patch.dict(
os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert_env}
):
with mock.patch.object(transport_class, "__init__") as patched:
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
patched.return_value = None
client = client_class(transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize("client_class", [DataCatalogClient, DataCatalogAsyncClient])
@mock.patch.object(
DataCatalogClient, "DEFAULT_ENDPOINT", modify_default_endpoint(DataCatalogClient)
)
@mock.patch.object(
DataCatalogAsyncClient,
"DEFAULT_ENDPOINT",
modify_default_endpoint(DataCatalogAsyncClient),
)
def test_data_catalog_client_get_mtls_endpoint_and_cert_source(client_class):
mock_client_cert_source = mock.Mock()
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "true".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source == mock_client_cert_source
# Test the case GOOGLE_API_USE_CLIENT_CERTIFICATE is "false".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "false"}):
mock_client_cert_source = mock.Mock()
mock_api_endpoint = "foo"
options = client_options.ClientOptions(
client_cert_source=mock_client_cert_source, api_endpoint=mock_api_endpoint
)
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source(
options
)
assert api_endpoint == mock_api_endpoint
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "never".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "never"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "always".
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_MTLS_ENDPOINT": "always"}):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert doesn't exist.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=False,
):
api_endpoint, cert_source = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_ENDPOINT
assert cert_source is None
# Test the case GOOGLE_API_USE_MTLS_ENDPOINT is "auto" and default cert exists.
with mock.patch.dict(os.environ, {"GOOGLE_API_USE_CLIENT_CERTIFICATE": "true"}):
with mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source",
return_value=True,
):
with mock.patch(
"google.auth.transport.mtls.default_client_cert_source",
return_value=mock_client_cert_source,
):
(
api_endpoint,
cert_source,
) = client_class.get_mtls_endpoint_and_cert_source()
assert api_endpoint == client_class.DEFAULT_MTLS_ENDPOINT
assert cert_source == mock_client_cert_source
@pytest.mark.parametrize(
"client_class,transport_class,transport_name",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc"),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
),
],
)
def test_data_catalog_client_client_options_scopes(
client_class, transport_class, transport_name
):
# Check the case scopes are provided.
options = client_options.ClientOptions(
scopes=["1", "2"],
)
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=["1", "2"],
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", grpc_helpers),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_catalog_client_client_options_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
def test_data_catalog_client_client_options_from_dict():
with mock.patch(
"google.cloud.datacatalog_v1.services.data_catalog.transports.DataCatalogGrpcTransport.__init__"
) as grpc_transport:
grpc_transport.return_value = None
client = DataCatalogClient(client_options={"api_endpoint": "squid.clam.whelk"})
grpc_transport.assert_called_once_with(
credentials=None,
credentials_file=None,
host="squid.clam.whelk",
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
@pytest.mark.parametrize(
"client_class,transport_class,transport_name,grpc_helpers",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport, "grpc", grpc_helpers),
(
DataCatalogAsyncClient,
transports.DataCatalogGrpcAsyncIOTransport,
"grpc_asyncio",
grpc_helpers_async,
),
],
)
def test_data_catalog_client_create_channel_credentials_file(
client_class, transport_class, transport_name, grpc_helpers
):
# Check the case credentials file is provided.
options = client_options.ClientOptions(credentials_file="credentials.json")
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options, transport=transport_name)
patched.assert_called_once_with(
credentials=None,
credentials_file="credentials.json",
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
# test that the credentials from file are saved and used as the credentials.
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel"
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
file_creds = ga_credentials.AnonymousCredentials()
load_creds.return_value = (file_creds, None)
adc.return_value = (creds, None)
client = client_class(client_options=options, transport=transport_name)
create_channel.assert_called_with(
"datacatalog.googleapis.com:443",
credentials=file_creds,
credentials_file=None,
quota_project_id=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=None,
default_host="datacatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.SearchCatalogRequest,
dict,
],
)
def test_search_catalog(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
response = client.search_catalog(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
def test_search_catalog_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
client.search_catalog()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
@pytest.mark.asyncio
async def test_search_catalog_async(
transport: str = "grpc_asyncio", request_type=datacatalog.SearchCatalogRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.SearchCatalogResponse(
next_page_token="next_page_token_value",
unreachable=["unreachable_value"],
)
)
response = await client.search_catalog(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.SearchCatalogRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.SearchCatalogAsyncPager)
assert response.next_page_token == "next_page_token_value"
assert response.unreachable == ["unreachable_value"]
@pytest.mark.asyncio
async def test_search_catalog_async_from_dict():
await test_search_catalog_async(request_type=dict)
def test_search_catalog_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.search_catalog(
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
)
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
def test_search_catalog_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.search_catalog(
datacatalog.SearchCatalogRequest(),
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
@pytest.mark.asyncio
async def test_search_catalog_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.SearchCatalogResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.SearchCatalogResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.search_catalog(
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].scope
mock_val = datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
)
assert arg == mock_val
arg = args[0].query
mock_val = "query_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_search_catalog_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.search_catalog(
datacatalog.SearchCatalogRequest(),
scope=datacatalog.SearchCatalogRequest.Scope(
include_org_ids=["include_org_ids_value"]
),
query="query_value",
)
def test_search_catalog_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(
results=[],
next_page_token="def",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
],
next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
),
RuntimeError,
)
metadata = ()
pager = client.search_catalog(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, search.SearchCatalogResult) for i in results)
def test_search_catalog_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.search_catalog), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(
results=[],
next_page_token="def",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
],
next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
),
RuntimeError,
)
pages = list(client.search_catalog(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_search_catalog_async_pager():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalog), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(
results=[],
next_page_token="def",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
],
next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
),
RuntimeError,
)
async_pager = await client.search_catalog(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, search.SearchCatalogResult) for i in responses)
@pytest.mark.asyncio
async def test_search_catalog_async_pages():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.search_catalog), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
next_page_token="abc",
),
datacatalog.SearchCatalogResponse(
results=[],
next_page_token="def",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
],
next_page_token="ghi",
),
datacatalog.SearchCatalogResponse(
results=[
search.SearchCatalogResult(),
search.SearchCatalogResult(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.search_catalog(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
datacatalog.CreateEntryGroupRequest,
dict,
],
)
def test_create_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_create_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
client.create_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
@pytest.mark.asyncio
async def test_create_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_entry_group_async_from_dict():
await test_create_entry_group_async(request_type=dict)
def test_create_entry_group_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryGroupRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
call.return_value = datacatalog.EntryGroup()
client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entry_group_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryGroupRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.create_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_entry_group_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_entry_group(
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_group_id
mock_val = "entry_group_id_value"
assert arg == mock_val
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
def test_create_entry_group_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_entry_group(
datacatalog.CreateEntryGroupRequest(),
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_entry_group_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_entry_group(
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_group_id
mock_val = "entry_group_id_value"
assert arg == mock_val
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_entry_group(
datacatalog.CreateEntryGroupRequest(),
parent="parent_value",
entry_group_id="entry_group_id_value",
entry_group=datacatalog.EntryGroup(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.GetEntryGroupRequest,
dict,
],
)
def test_get_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_get_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
client.get_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
@pytest.mark.asyncio
async def test_get_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_entry_group_async_from_dict():
await test_get_entry_group_async(request_type=dict)
def test_get_entry_group_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryGroupRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
call.return_value = datacatalog.EntryGroup()
client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entry_group_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryGroupRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.get_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_entry_group_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_entry_group(
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].read_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_get_entry_group_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_entry_group(
datacatalog.GetEntryGroupRequest(),
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_get_entry_group_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry_group), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_entry_group(
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].read_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_entry_group(
datacatalog.GetEntryGroupRequest(),
name="name_value",
read_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UpdateEntryGroupRequest,
dict,
],
)
def test_update_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
response = client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_update_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
client.update_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
@pytest.mark.asyncio
async def test_update_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup(
name="name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryGroupRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryGroup)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_entry_group_async_from_dict():
await test_update_entry_group_async(request_type=dict)
def test_update_entry_group_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryGroupRequest()
request.entry_group.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
call.return_value = datacatalog.EntryGroup()
client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"entry_group.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_entry_group_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryGroupRequest()
request.entry_group.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
await client.update_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"entry_group.name=name_value",
) in kw["metadata"]
def test_update_entry_group_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_entry_group(
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_entry_group_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_entry_group(
datacatalog.UpdateEntryGroupRequest(),
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_entry_group_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryGroup()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryGroup()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_entry_group(
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entry_group
mock_val = datacatalog.EntryGroup(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_entry_group(
datacatalog.UpdateEntryGroupRequest(),
entry_group=datacatalog.EntryGroup(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.DeleteEntryGroupRequest,
dict,
],
)
def test_delete_entry_group(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_entry_group_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
client.delete_entry_group()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
@pytest.mark.asyncio
async def test_delete_entry_group_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteEntryGroupRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryGroupRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_entry_group_async_from_dict():
await test_delete_entry_group_async(request_type=dict)
def test_delete_entry_group_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryGroupRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
call.return_value = None
client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_entry_group_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryGroupRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_entry_group(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_entry_group_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_entry_group(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_entry_group_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_entry_group(
datacatalog.DeleteEntryGroupRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_entry_group_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_entry_group), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_entry_group(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_entry_group_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_entry_group(
datacatalog.DeleteEntryGroupRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.ListEntryGroupsRequest,
dict,
],
)
def test_list_entry_groups(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse(
next_page_token="next_page_token_value",
)
response = client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntryGroupsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entry_groups_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
client.list_entry_groups()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
@pytest.mark.asyncio
async def test_list_entry_groups_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListEntryGroupsRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntryGroupsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntryGroupsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entry_groups_async_from_dict():
await test_list_entry_groups_async(request_type=dict)
def test_list_entry_groups_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntryGroupsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
call.return_value = datacatalog.ListEntryGroupsResponse()
client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entry_groups_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntryGroupsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse()
)
await client.list_entry_groups(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_entry_groups_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_entry_groups(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_entry_groups_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_entry_groups(
datacatalog.ListEntryGroupsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_entry_groups_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntryGroupsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntryGroupsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_entry_groups(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_entry_groups_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_entry_groups(
datacatalog.ListEntryGroupsRequest(),
parent="parent_value",
)
def test_list_entry_groups_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[],
next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
],
next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entry_groups(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, datacatalog.EntryGroup) for i in results)
def test_list_entry_groups_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups), "__call__"
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[],
next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
],
next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
),
RuntimeError,
)
pages = list(client.list_entry_groups(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entry_groups_async_pager():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[],
next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
],
next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
),
RuntimeError,
)
async_pager = await client.list_entry_groups(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datacatalog.EntryGroup) for i in responses)
@pytest.mark.asyncio
async def test_list_entry_groups_async_pages():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entry_groups),
"__call__",
new_callable=mock.AsyncMock,
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
next_page_token="abc",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[],
next_page_token="def",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
],
next_page_token="ghi",
),
datacatalog.ListEntryGroupsResponse(
entry_groups=[
datacatalog.EntryGroup(),
datacatalog.EntryGroup(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_entry_groups(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
datacatalog.CreateEntryRequest,
dict,
],
)
def test_create_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
database_table_spec=datacatalog.DatabaseTableSpec(
type_=datacatalog.DatabaseTableSpec.TableType.NATIVE
),
)
response = client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_create_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
client.create_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
@pytest.mark.asyncio
async def test_create_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_create_entry_async_from_dict():
await test_create_entry_async(request_type=dict)
def test_create_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateEntryRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.create_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_entry(
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_id
mock_val = "entry_id_value"
assert arg == mock_val
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
def test_create_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_entry(
datacatalog.CreateEntryRequest(),
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_entry(
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].entry_id
mock_val = "entry_id_value"
assert arg == mock_val
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_entry(
datacatalog.CreateEntryRequest(),
parent="parent_value",
entry_id="entry_id_value",
entry=datacatalog.Entry(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UpdateEntryRequest,
dict,
],
)
def test_update_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
database_table_spec=datacatalog.DatabaseTableSpec(
type_=datacatalog.DatabaseTableSpec.TableType.NATIVE
),
)
response = client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_update_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
client.update_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
@pytest.mark.asyncio
async def test_update_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_update_entry_async_from_dict():
await test_update_entry_async(request_type=dict)
def test_update_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryRequest()
request.entry.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"entry.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateEntryRequest()
request.entry.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.update_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"entry.name=name_value",
) in kw["metadata"]
def test_update_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_entry(
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_entry(
datacatalog.UpdateEntryRequest(),
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_entry(
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].entry
mock_val = datacatalog.Entry(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_entry(
datacatalog.UpdateEntryRequest(),
entry=datacatalog.Entry(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.DeleteEntryRequest,
dict,
],
)
def test_delete_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
client.delete_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
@pytest.mark.asyncio
async def test_delete_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteEntryRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_entry_async_from_dict():
await test_delete_entry_async(request_type=dict)
def test_delete_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
call.return_value = None
client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_entry(
datacatalog.DeleteEntryRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_entry(
datacatalog.DeleteEntryRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.GetEntryRequest,
dict,
],
)
def test_get_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
database_table_spec=datacatalog.DatabaseTableSpec(
type_=datacatalog.DatabaseTableSpec.TableType.NATIVE
),
)
response = client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_get_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
client.get_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
@pytest.mark.asyncio
async def test_get_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_get_entry_async_from_dict():
await test_get_entry_async(request_type=dict)
def test_get_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
call.return_value = datacatalog.Entry()
client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
await client.get_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_entry(
datacatalog.GetEntryRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(datacatalog.Entry())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_entry(
datacatalog.GetEntryRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.LookupEntryRequest,
dict,
],
)
def test_lookup_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
type_=datacatalog.EntryType.TABLE,
integrated_system=common.IntegratedSystem.BIGQUERY,
gcs_fileset_spec=gcs_fileset_spec.GcsFilesetSpec(
file_patterns=["file_patterns_value"]
),
database_table_spec=datacatalog.DatabaseTableSpec(
type_=datacatalog.DatabaseTableSpec.TableType.NATIVE
),
)
response = client.lookup_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
def test_lookup_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
client.lookup_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
@pytest.mark.asyncio
async def test_lookup_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.LookupEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.lookup_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Entry(
name="name_value",
linked_resource="linked_resource_value",
fully_qualified_name="fully_qualified_name_value",
display_name="display_name_value",
description="description_value",
)
)
response = await client.lookup_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.LookupEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Entry)
assert response.name == "name_value"
assert response.linked_resource == "linked_resource_value"
assert response.fully_qualified_name == "fully_qualified_name_value"
assert response.display_name == "display_name_value"
assert response.description == "description_value"
@pytest.mark.asyncio
async def test_lookup_entry_async_from_dict():
await test_lookup_entry_async(request_type=dict)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.ListEntriesRequest,
dict,
],
)
def test_list_entries(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse(
next_page_token="next_page_token_value",
)
response = client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntriesPager)
assert response.next_page_token == "next_page_token_value"
def test_list_entries_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
client.list_entries()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
@pytest.mark.asyncio
async def test_list_entries_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListEntriesRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListEntriesRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListEntriesAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_entries_async_from_dict():
await test_list_entries_async(request_type=dict)
def test_list_entries_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntriesRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
call.return_value = datacatalog.ListEntriesResponse()
client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_entries_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListEntriesRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse()
)
await client.list_entries(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_entries_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_entries(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_entries_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_entries(
datacatalog.ListEntriesRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_entries_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListEntriesResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListEntriesResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_entries(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_entries_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_entries(
datacatalog.ListEntriesRequest(),
parent="parent_value",
)
def test_list_entries_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(
entries=[],
next_page_token="def",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
],
next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_entries(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, datacatalog.Entry) for i in results)
def test_list_entries_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_entries), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(
entries=[],
next_page_token="def",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
],
next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
],
),
RuntimeError,
)
pages = list(client.list_entries(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_entries_async_pager():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entries), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(
entries=[],
next_page_token="def",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
],
next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
],
),
RuntimeError,
)
async_pager = await client.list_entries(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, datacatalog.Entry) for i in responses)
@pytest.mark.asyncio
async def test_list_entries_async_pages():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_entries), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
datacatalog.Entry(),
],
next_page_token="abc",
),
datacatalog.ListEntriesResponse(
entries=[],
next_page_token="def",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
],
next_page_token="ghi",
),
datacatalog.ListEntriesResponse(
entries=[
datacatalog.Entry(),
datacatalog.Entry(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_entries(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
datacatalog.ModifyEntryOverviewRequest,
dict,
],
)
def test_modify_entry_overview(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_overview), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.EntryOverview(
overview="overview_value",
)
response = client.modify_entry_overview(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryOverviewRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryOverview)
assert response.overview == "overview_value"
def test_modify_entry_overview_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_overview), "__call__"
) as call:
client.modify_entry_overview()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryOverviewRequest()
@pytest.mark.asyncio
async def test_modify_entry_overview_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ModifyEntryOverviewRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_overview), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryOverview(
overview="overview_value",
)
)
response = await client.modify_entry_overview(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryOverviewRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.EntryOverview)
assert response.overview == "overview_value"
@pytest.mark.asyncio
async def test_modify_entry_overview_async_from_dict():
await test_modify_entry_overview_async(request_type=dict)
def test_modify_entry_overview_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ModifyEntryOverviewRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_overview), "__call__"
) as call:
call.return_value = datacatalog.EntryOverview()
client.modify_entry_overview(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_modify_entry_overview_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ModifyEntryOverviewRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_overview), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.EntryOverview()
)
await client.modify_entry_overview(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.parametrize(
"request_type",
[
datacatalog.ModifyEntryContactsRequest,
dict,
],
)
def test_modify_entry_contacts(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_contacts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.Contacts()
response = client.modify_entry_contacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryContactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Contacts)
def test_modify_entry_contacts_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_contacts), "__call__"
) as call:
client.modify_entry_contacts()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryContactsRequest()
@pytest.mark.asyncio
async def test_modify_entry_contacts_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ModifyEntryContactsRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_contacts), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Contacts()
)
response = await client.modify_entry_contacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ModifyEntryContactsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.Contacts)
@pytest.mark.asyncio
async def test_modify_entry_contacts_async_from_dict():
await test_modify_entry_contacts_async(request_type=dict)
def test_modify_entry_contacts_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ModifyEntryContactsRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_contacts), "__call__"
) as call:
call.return_value = datacatalog.Contacts()
client.modify_entry_contacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_modify_entry_contacts_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ModifyEntryContactsRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.modify_entry_contacts), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.Contacts()
)
await client.modify_entry_contacts(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.parametrize(
"request_type",
[
datacatalog.CreateTagTemplateRequest,
dict,
],
)
def test_create_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
response = client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
def test_create_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
client.create_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
@pytest.mark.asyncio
async def test_create_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
)
response = await client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
@pytest.mark.asyncio
async def test_create_tag_template_async_from_dict():
await test_create_tag_template_async(request_type=dict)
def test_create_tag_template_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
call.return_value = tags.TagTemplate()
client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_template_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.create_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_tag_template_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag_template(
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_id
mock_val = "tag_template_id_value"
assert arg == mock_val
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
def test_create_tag_template_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag_template(
datacatalog.CreateTagTemplateRequest(),
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_template_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag_template(
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_id
mock_val = "tag_template_id_value"
assert arg == mock_val
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag_template(
datacatalog.CreateTagTemplateRequest(),
parent="parent_value",
tag_template_id="tag_template_id_value",
tag_template=tags.TagTemplate(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.GetTagTemplateRequest,
dict,
],
)
def test_get_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
response = client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
def test_get_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
client.get_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
@pytest.mark.asyncio
async def test_get_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.GetTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
)
response = await client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.GetTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
@pytest.mark.asyncio
async def test_get_tag_template_async_from_dict():
await test_get_tag_template_async(request_type=dict)
def test_get_tag_template_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetTagTemplateRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
call.return_value = tags.TagTemplate()
client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_tag_template_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.GetTagTemplateRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.get_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_get_tag_template_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_tag_template(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_get_tag_template_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_tag_template(
datacatalog.GetTagTemplateRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_get_tag_template_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_tag_template), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_tag_template(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_tag_template(
datacatalog.GetTagTemplateRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UpdateTagTemplateRequest,
dict,
],
)
def test_update_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
response = client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
def test_update_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
client.update_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
@pytest.mark.asyncio
async def test_update_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplate(
name="name_value",
display_name="display_name_value",
is_publicly_readable=True,
)
)
response = await client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplate)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_publicly_readable is True
@pytest.mark.asyncio
async def test_update_tag_template_async_from_dict():
await test_update_tag_template_async(request_type=dict)
def test_update_tag_template_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateRequest()
request.tag_template.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
call.return_value = tags.TagTemplate()
client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag_template.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_template_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateRequest()
request.tag_template.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
await client.update_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag_template.name=name_value",
) in kw["metadata"]
def test_update_tag_template_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag_template(
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_template_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag_template(
datacatalog.UpdateTagTemplateRequest(),
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_template_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplate()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.TagTemplate())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag_template(
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag_template
mock_val = tags.TagTemplate(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag_template(
datacatalog.UpdateTagTemplateRequest(),
tag_template=tags.TagTemplate(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.DeleteTagTemplateRequest,
dict,
],
)
def test_delete_tag_template(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_template_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
client.delete_tag_template()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
@pytest.mark.asyncio
async def test_delete_tag_template_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteTagTemplateRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_template_async_from_dict():
await test_delete_tag_template_async(request_type=dict)
def test_delete_tag_template_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
call.return_value = None
client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_template_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag_template(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_tag_template_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag_template(
name="name_value",
force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
def test_delete_tag_template_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag_template(
datacatalog.DeleteTagTemplateRequest(),
name="name_value",
force=True,
)
@pytest.mark.asyncio
async def test_delete_tag_template_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag_template(
name="name_value",
force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_template_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag_template(
datacatalog.DeleteTagTemplateRequest(),
name="name_value",
force=True,
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.CreateTagTemplateFieldRequest,
dict,
],
)
def test_create_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
response = client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
def test_create_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
client.create_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_create_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.CreateTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
)
response = await client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
@pytest.mark.asyncio
async def test_create_tag_template_field_async_from_dict():
await test_create_tag_template_field_async(request_type=dict)
def test_create_tag_template_field_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateFieldRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagTemplateFieldRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.create_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_tag_template_field_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag_template_field(
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_field_id
mock_val = "tag_template_field_id_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
def test_create_tag_template_field_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag_template_field(
datacatalog.CreateTagTemplateFieldRequest(),
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.create_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag_template_field(
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag_template_field_id
mock_val = "tag_template_field_id_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag_template_field(
datacatalog.CreateTagTemplateFieldRequest(),
parent="parent_value",
tag_template_field_id="tag_template_field_id_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UpdateTagTemplateFieldRequest,
dict,
],
)
def test_update_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
response = client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
def test_update_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
client.update_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_update_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.UpdateTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
)
response = await client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
@pytest.mark.asyncio
async def test_update_tag_template_field_async_from_dict():
await test_update_tag_template_field_async(request_type=dict)
def test_update_tag_template_field_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.update_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_update_tag_template_field_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag_template_field(
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_template_field_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag_template_field(
datacatalog.UpdateTagTemplateFieldRequest(),
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.update_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag_template_field(
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].tag_template_field
mock_val = tags.TagTemplateField(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag_template_field(
datacatalog.UpdateTagTemplateFieldRequest(),
name="name_value",
tag_template_field=tags.TagTemplateField(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.RenameTagTemplateFieldRequest,
dict,
],
)
def test_rename_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
response = client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
def test_rename_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
client.rename_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_rename_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.RenameTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
)
response = await client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
@pytest.mark.asyncio
async def test_rename_tag_template_field_async_from_dict():
await test_rename_tag_template_field_async(request_type=dict)
def test_rename_tag_template_field_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_rename_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.rename_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_rename_tag_template_field_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rename_tag_template_field(
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_tag_template_field_id
mock_val = "new_tag_template_field_id_value"
assert arg == mock_val
def test_rename_tag_template_field_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rename_tag_template_field(
datacatalog.RenameTagTemplateFieldRequest(),
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
@pytest.mark.asyncio
async def test_rename_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rename_tag_template_field(
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_tag_template_field_id
mock_val = "new_tag_template_field_id_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_rename_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rename_tag_template_field(
datacatalog.RenameTagTemplateFieldRequest(),
name="name_value",
new_tag_template_field_id="new_tag_template_field_id_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.RenameTagTemplateFieldEnumValueRequest,
dict,
],
)
def test_rename_tag_template_field_enum_value(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
response = client.rename_tag_template_field_enum_value(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldEnumValueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
def test_rename_tag_template_field_enum_value_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
client.rename_tag_template_field_enum_value()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldEnumValueRequest()
@pytest.mark.asyncio
async def test_rename_tag_template_field_enum_value_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.RenameTagTemplateFieldEnumValueRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField(
name="name_value",
display_name="display_name_value",
is_required=True,
description="description_value",
order=540,
)
)
response = await client.rename_tag_template_field_enum_value(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.RenameTagTemplateFieldEnumValueRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.TagTemplateField)
assert response.name == "name_value"
assert response.display_name == "display_name_value"
assert response.is_required is True
assert response.description == "description_value"
assert response.order == 540
@pytest.mark.asyncio
async def test_rename_tag_template_field_enum_value_async_from_dict():
await test_rename_tag_template_field_enum_value_async(request_type=dict)
def test_rename_tag_template_field_enum_value_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldEnumValueRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
call.return_value = tags.TagTemplateField()
client.rename_tag_template_field_enum_value(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_rename_tag_template_field_enum_value_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.RenameTagTemplateFieldEnumValueRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
await client.rename_tag_template_field_enum_value(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_rename_tag_template_field_enum_value_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.rename_tag_template_field_enum_value(
name="name_value",
new_enum_value_display_name="new_enum_value_display_name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_enum_value_display_name
mock_val = "new_enum_value_display_name_value"
assert arg == mock_val
def test_rename_tag_template_field_enum_value_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.rename_tag_template_field_enum_value(
datacatalog.RenameTagTemplateFieldEnumValueRequest(),
name="name_value",
new_enum_value_display_name="new_enum_value_display_name_value",
)
@pytest.mark.asyncio
async def test_rename_tag_template_field_enum_value_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.rename_tag_template_field_enum_value), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = tags.TagTemplateField()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.TagTemplateField()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.rename_tag_template_field_enum_value(
name="name_value",
new_enum_value_display_name="new_enum_value_display_name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].new_enum_value_display_name
mock_val = "new_enum_value_display_name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_rename_tag_template_field_enum_value_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.rename_tag_template_field_enum_value(
datacatalog.RenameTagTemplateFieldEnumValueRequest(),
name="name_value",
new_enum_value_display_name="new_enum_value_display_name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.DeleteTagTemplateFieldRequest,
dict,
],
)
def test_delete_tag_template_field(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_template_field_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
client.delete_tag_template_field()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
@pytest.mark.asyncio
async def test_delete_tag_template_field_async(
transport: str = "grpc_asyncio",
request_type=datacatalog.DeleteTagTemplateFieldRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagTemplateFieldRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_template_field_async_from_dict():
await test_delete_tag_template_field_async(request_type=dict)
def test_delete_tag_template_field_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
call.return_value = None
client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_template_field_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagTemplateFieldRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag_template_field(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_tag_template_field_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag_template_field(
name="name_value",
force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
def test_delete_tag_template_field_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag_template_field(
datacatalog.DeleteTagTemplateFieldRequest(),
name="name_value",
force=True,
)
@pytest.mark.asyncio
async def test_delete_tag_template_field_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.delete_tag_template_field), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag_template_field(
name="name_value",
force=True,
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
arg = args[0].force
mock_val = True
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_template_field_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag_template_field(
datacatalog.DeleteTagTemplateFieldRequest(),
name="name_value",
force=True,
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.CreateTagRequest,
dict,
],
)
def test_create_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
column="column_value",
)
response = client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
def test_create_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
client.create_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
@pytest.mark.asyncio
async def test_create_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.CreateTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
)
)
response = await client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.CreateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
@pytest.mark.asyncio
async def test_create_tag_async_from_dict():
await test_create_tag_async(request_type=dict)
def test_create_tag_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
call.return_value = tags.Tag()
client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_create_tag_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.CreateTagRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
await client.create_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_create_tag_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.create_tag(
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
def test_create_tag_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.create_tag(
datacatalog.CreateTagRequest(),
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
@pytest.mark.asyncio
async def test_create_tag_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.create_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.create_tag(
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
@pytest.mark.asyncio
async def test_create_tag_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.create_tag(
datacatalog.CreateTagRequest(),
parent="parent_value",
tag=tags.Tag(name="name_value"),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UpdateTagRequest,
dict,
],
)
def test_update_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
column="column_value",
)
response = client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
def test_update_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
client.update_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
@pytest.mark.asyncio
async def test_update_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UpdateTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
tags.Tag(
name="name_value",
template="template_value",
template_display_name="template_display_name_value",
)
)
response = await client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UpdateTagRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, tags.Tag)
assert response.name == "name_value"
assert response.template == "template_value"
assert response.template_display_name == "template_display_name_value"
@pytest.mark.asyncio
async def test_update_tag_async_from_dict():
await test_update_tag_async(request_type=dict)
def test_update_tag_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagRequest()
request.tag.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
call.return_value = tags.Tag()
client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag.name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_update_tag_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UpdateTagRequest()
request.tag.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
await client.update_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"tag.name=name_value",
) in kw["metadata"]
def test_update_tag_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.update_tag(
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
def test_update_tag_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.update_tag(
datacatalog.UpdateTagRequest(),
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.asyncio
async def test_update_tag_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.update_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = tags.Tag()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(tags.Tag())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.update_tag(
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].tag
mock_val = tags.Tag(name="name_value")
assert arg == mock_val
arg = args[0].update_mask
mock_val = field_mask_pb2.FieldMask(paths=["paths_value"])
assert arg == mock_val
@pytest.mark.asyncio
async def test_update_tag_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.update_tag(
datacatalog.UpdateTagRequest(),
tag=tags.Tag(name="name_value"),
update_mask=field_mask_pb2.FieldMask(paths=["paths_value"]),
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.DeleteTagRequest,
dict,
],
)
def test_delete_tag(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
response = client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
# Establish that the response is the type that we expect.
assert response is None
def test_delete_tag_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
client.delete_tag()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
@pytest.mark.asyncio
async def test_delete_tag_async(
transport: str = "grpc_asyncio", request_type=datacatalog.DeleteTagRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
response = await client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.DeleteTagRequest()
# Establish that the response is the type that we expect.
assert response is None
@pytest.mark.asyncio
async def test_delete_tag_async_from_dict():
await test_delete_tag_async(request_type=dict)
def test_delete_tag_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
call.return_value = None
client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_delete_tag_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.DeleteTagRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
await client.delete_tag(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_delete_tag_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.delete_tag(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_delete_tag_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.delete_tag(
datacatalog.DeleteTagRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_delete_tag_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.delete_tag), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = None
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(None)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.delete_tag(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_delete_tag_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.delete_tag(
datacatalog.DeleteTagRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.ListTagsRequest,
dict,
],
)
def test_list_tags(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse(
next_page_token="next_page_token_value",
)
response = client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagsPager)
assert response.next_page_token == "next_page_token_value"
def test_list_tags_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
client.list_tags()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
@pytest.mark.asyncio
async def test_list_tags_async(
transport: str = "grpc_asyncio", request_type=datacatalog.ListTagsRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse(
next_page_token="next_page_token_value",
)
)
response = await client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.ListTagsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, pagers.ListTagsAsyncPager)
assert response.next_page_token == "next_page_token_value"
@pytest.mark.asyncio
async def test_list_tags_async_from_dict():
await test_list_tags_async(request_type=dict)
def test_list_tags_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListTagsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
call.return_value = datacatalog.ListTagsResponse()
client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_list_tags_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.ListTagsRequest()
request.parent = "parent_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse()
)
await client.list_tags(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"parent=parent_value",
) in kw["metadata"]
def test_list_tags_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.list_tags(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
def test_list_tags_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.list_tags(
datacatalog.ListTagsRequest(),
parent="parent_value",
)
@pytest.mark.asyncio
async def test_list_tags_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.ListTagsResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.ListTagsResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.list_tags(
parent="parent_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].parent
mock_val = "parent_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_list_tags_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.list_tags(
datacatalog.ListTagsRequest(),
parent="parent_value",
)
def test_list_tags_pager(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
tags.Tag(),
],
next_page_token="abc",
),
datacatalog.ListTagsResponse(
tags=[],
next_page_token="def",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
],
next_page_token="ghi",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
],
),
RuntimeError,
)
metadata = ()
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", ""),)),
)
pager = client.list_tags(request={})
assert pager._metadata == metadata
results = list(pager)
assert len(results) == 6
assert all(isinstance(i, tags.Tag) for i in results)
def test_list_tags_pages(transport_name: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials,
transport=transport_name,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.list_tags), "__call__") as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
tags.Tag(),
],
next_page_token="abc",
),
datacatalog.ListTagsResponse(
tags=[],
next_page_token="def",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
],
next_page_token="ghi",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
],
),
RuntimeError,
)
pages = list(client.list_tags(request={}).pages)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.asyncio
async def test_list_tags_async_pager():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tags), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
tags.Tag(),
],
next_page_token="abc",
),
datacatalog.ListTagsResponse(
tags=[],
next_page_token="def",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
],
next_page_token="ghi",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
],
),
RuntimeError,
)
async_pager = await client.list_tags(
request={},
)
assert async_pager.next_page_token == "abc"
responses = []
async for response in async_pager: # pragma: no branch
responses.append(response)
assert len(responses) == 6
assert all(isinstance(i, tags.Tag) for i in responses)
@pytest.mark.asyncio
async def test_list_tags_async_pages():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials,
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.list_tags), "__call__", new_callable=mock.AsyncMock
) as call:
# Set the response to a series of pages.
call.side_effect = (
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
tags.Tag(),
],
next_page_token="abc",
),
datacatalog.ListTagsResponse(
tags=[],
next_page_token="def",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
],
next_page_token="ghi",
),
datacatalog.ListTagsResponse(
tags=[
tags.Tag(),
tags.Tag(),
],
),
RuntimeError,
)
pages = []
async for page_ in (
await client.list_tags(request={})
).pages: # pragma: no branch
pages.append(page_)
for page_, token in zip(pages, ["abc", "def", "ghi", ""]):
assert page_.raw_page.next_page_token == token
@pytest.mark.parametrize(
"request_type",
[
datacatalog.StarEntryRequest,
dict,
],
)
def test_star_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.StarEntryResponse()
response = client.star_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.StarEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.StarEntryResponse)
def test_star_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
client.star_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.StarEntryRequest()
@pytest.mark.asyncio
async def test_star_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.StarEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.StarEntryResponse()
)
response = await client.star_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.StarEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.StarEntryResponse)
@pytest.mark.asyncio
async def test_star_entry_async_from_dict():
await test_star_entry_async(request_type=dict)
def test_star_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.StarEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
call.return_value = datacatalog.StarEntryResponse()
client.star_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_star_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.StarEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.StarEntryResponse()
)
await client.star_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_star_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.StarEntryResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.star_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_star_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.star_entry(
datacatalog.StarEntryRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_star_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.star_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.StarEntryResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.StarEntryResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.star_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_star_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.star_entry(
datacatalog.StarEntryRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
datacatalog.UnstarEntryRequest,
dict,
],
)
def test_unstar_entry(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.UnstarEntryResponse()
response = client.unstar_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UnstarEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.UnstarEntryResponse)
def test_unstar_entry_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
client.unstar_entry()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UnstarEntryRequest()
@pytest.mark.asyncio
async def test_unstar_entry_async(
transport: str = "grpc_asyncio", request_type=datacatalog.UnstarEntryRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.UnstarEntryResponse()
)
response = await client.unstar_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == datacatalog.UnstarEntryRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, datacatalog.UnstarEntryResponse)
@pytest.mark.asyncio
async def test_unstar_entry_async_from_dict():
await test_unstar_entry_async(request_type=dict)
def test_unstar_entry_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UnstarEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
call.return_value = datacatalog.UnstarEntryResponse()
client.unstar_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_unstar_entry_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = datacatalog.UnstarEntryRequest()
request.name = "name_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.UnstarEntryResponse()
)
await client.unstar_entry(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"name=name_value",
) in kw["metadata"]
def test_unstar_entry_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.UnstarEntryResponse()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.unstar_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
def test_unstar_entry_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.unstar_entry(
datacatalog.UnstarEntryRequest(),
name="name_value",
)
@pytest.mark.asyncio
async def test_unstar_entry_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.unstar_entry), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = datacatalog.UnstarEntryResponse()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
datacatalog.UnstarEntryResponse()
)
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.unstar_entry(
name="name_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].name
mock_val = "name_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_unstar_entry_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.unstar_entry(
datacatalog.UnstarEntryRequest(),
name="name_value",
)
@pytest.mark.parametrize(
"request_type",
[
iam_policy_pb2.SetIamPolicyRequest,
dict,
],
)
def test_set_iam_policy(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
response = client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_set_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
client.set_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
@pytest.mark.asyncio
async def test_set_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.SetIamPolicyRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
)
response = await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.SetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_set_iam_policy_async_from_dict():
await test_set_iam_policy_async(request_type=dict)
def test_set_iam_policy_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_set_iam_policy_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.SetIamPolicyRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.set_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
def test_set_iam_policy_from_dict_foreign():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.set_iam_policy(
request={
"resource": "resource_value",
"policy": policy_pb2.Policy(version=774),
"update_mask": field_mask_pb2.FieldMask(paths=["paths_value"]),
}
)
call.assert_called()
def test_set_iam_policy_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.set_iam_policy(
resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_set_iam_policy_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(),
resource="resource_value",
)
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.set_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.set_iam_policy(
resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_set_iam_policy_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.set_iam_policy(
iam_policy_pb2.SetIamPolicyRequest(),
resource="resource_value",
)
@pytest.mark.parametrize(
"request_type",
[
iam_policy_pb2.GetIamPolicyRequest,
dict,
],
)
def test_get_iam_policy(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
response = client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
def test_get_iam_policy_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
client.get_iam_policy()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
@pytest.mark.asyncio
async def test_get_iam_policy_async(
transport: str = "grpc_asyncio", request_type=iam_policy_pb2.GetIamPolicyRequest
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
policy_pb2.Policy(
version=774,
etag=b"etag_blob",
)
)
response = await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.GetIamPolicyRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, policy_pb2.Policy)
assert response.version == 774
assert response.etag == b"etag_blob"
@pytest.mark.asyncio
async def test_get_iam_policy_async_from_dict():
await test_get_iam_policy_async(request_type=dict)
def test_get_iam_policy_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = policy_pb2.Policy()
client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_get_iam_policy_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.GetIamPolicyRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
await client.get_iam_policy(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
def test_get_iam_policy_from_dict_foreign():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
response = client.get_iam_policy(
request={
"resource": "resource_value",
"options": options_pb2.GetPolicyOptions(requested_policy_version=2598),
}
)
call.assert_called()
def test_get_iam_policy_flattened():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
client.get_iam_policy(
resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
def test_get_iam_policy_flattened_error():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(),
resource="resource_value",
)
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(type(client.transport.get_iam_policy), "__call__") as call:
# Designate an appropriate return value for the call.
call.return_value = policy_pb2.Policy()
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(policy_pb2.Policy())
# Call the method with a truthy value for each flattened field,
# using the keyword arguments to the method.
response = await client.get_iam_policy(
resource="resource_value",
)
# Establish that the underlying call was made with the expected
# request object values.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
arg = args[0].resource
mock_val = "resource_value"
assert arg == mock_val
@pytest.mark.asyncio
async def test_get_iam_policy_flattened_error_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Attempting to call a method with both a request object and flattened
# fields is an error.
with pytest.raises(ValueError):
await client.get_iam_policy(
iam_policy_pb2.GetIamPolicyRequest(),
resource="resource_value",
)
@pytest.mark.parametrize(
"request_type",
[
iam_policy_pb2.TestIamPermissionsRequest,
dict,
],
)
def test_test_iam_permissions(request_type, transport: str = "grpc"):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
response = client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
def test_test_iam_permissions_empty_call():
# This test is a coverage failsafe to make sure that totally empty calls,
# i.e. request == None and no flattened fields passed, work.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc",
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
client.test_iam_permissions()
call.assert_called()
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
@pytest.mark.asyncio
async def test_test_iam_permissions_async(
transport: str = "grpc_asyncio",
request_type=iam_policy_pb2.TestIamPermissionsRequest,
):
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# Everything is optional in proto3 as far as the runtime is concerned,
# and we are mocking out the actual API, so just send an empty request.
request = request_type()
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse(
permissions=["permissions_value"],
)
)
response = await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == iam_policy_pb2.TestIamPermissionsRequest()
# Establish that the response is the type that we expect.
assert isinstance(response, iam_policy_pb2.TestIamPermissionsResponse)
assert response.permissions == ["permissions_value"]
@pytest.mark.asyncio
async def test_test_iam_permissions_async_from_dict():
await test_test_iam_permissions_async(request_type=dict)
def test_test_iam_permissions_field_headers():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls) == 1
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
@pytest.mark.asyncio
async def test_test_iam_permissions_field_headers_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Any value that is part of the HTTP/1.1 URI should be sent as
# a field header. Set these to a non-empty value.
request = iam_policy_pb2.TestIamPermissionsRequest()
request.resource = "resource_value"
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
call.return_value = grpc_helpers_async.FakeUnaryUnaryCall(
iam_policy_pb2.TestIamPermissionsResponse()
)
await client.test_iam_permissions(request)
# Establish that the underlying gRPC stub method was called.
assert len(call.mock_calls)
_, args, _ = call.mock_calls[0]
assert args[0] == request
# Establish that the field header was sent.
_, _, kw = call.mock_calls[0]
assert (
"x-goog-request-params",
"resource=resource_value",
) in kw["metadata"]
def test_test_iam_permissions_from_dict_foreign():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
# Mock the actual call within the gRPC stub, and fake the request.
with mock.patch.object(
type(client.transport.test_iam_permissions), "__call__"
) as call:
# Designate an appropriate return value for the call.
call.return_value = iam_policy_pb2.TestIamPermissionsResponse()
response = client.test_iam_permissions(
request={
"resource": "resource_value",
"permissions": ["permissions_value"],
}
)
call.assert_called()
def test_credentials_transport_error():
# It is an error to provide credentials and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
transport=transport,
)
# It is an error to provide a credentials file and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options={"credentials_file": "credentials.json"},
transport=transport,
)
# It is an error to provide an api_key and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
options = client_options.ClientOptions()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options=options,
transport=transport,
)
# It is an error to provide an api_key and a credential.
options = mock.Mock()
options.api_key = "api_key"
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options=options, credentials=ga_credentials.AnonymousCredentials()
)
# It is an error to provide scopes and a transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
with pytest.raises(ValueError):
client = DataCatalogClient(
client_options={"scopes": ["1", "2"]},
transport=transport,
)
def test_transport_instance():
# A client may be instantiated with a custom transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
client = DataCatalogClient(transport=transport)
assert client.transport is transport
def test_transport_get_channel():
# A client may be instantiated with a custom transport instance.
transport = transports.DataCatalogGrpcTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
transport = transports.DataCatalogGrpcAsyncIOTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
channel = transport.grpc_channel
assert channel
@pytest.mark.parametrize(
"transport_class",
[
transports.DataCatalogGrpcTransport,
transports.DataCatalogGrpcAsyncIOTransport,
],
)
def test_transport_adc(transport_class):
# Test default credentials are used if not provided.
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class()
adc.assert_called_once()
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
],
)
def test_transport_kind(transport_name):
transport = DataCatalogClient.get_transport_class(transport_name)(
credentials=ga_credentials.AnonymousCredentials(),
)
assert transport.kind == transport_name
def test_transport_grpc_default():
# A client should use the gRPC transport by default.
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
)
assert isinstance(
client.transport,
transports.DataCatalogGrpcTransport,
)
def test_data_catalog_base_transport_error():
# Passing both a credentials object and credentials_file should raise an error
with pytest.raises(core_exceptions.DuplicateCredentialArgs):
transport = transports.DataCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
credentials_file="credentials.json",
)
def test_data_catalog_base_transport():
# Instantiate the base transport.
with mock.patch(
"google.cloud.datacatalog_v1.services.data_catalog.transports.DataCatalogTransport.__init__"
) as Transport:
Transport.return_value = None
transport = transports.DataCatalogTransport(
credentials=ga_credentials.AnonymousCredentials(),
)
# Every method on the transport should just blindly
# raise NotImplementedError.
methods = (
"search_catalog",
"create_entry_group",
"get_entry_group",
"update_entry_group",
"delete_entry_group",
"list_entry_groups",
"create_entry",
"update_entry",
"delete_entry",
"get_entry",
"lookup_entry",
"list_entries",
"modify_entry_overview",
"modify_entry_contacts",
"create_tag_template",
"get_tag_template",
"update_tag_template",
"delete_tag_template",
"create_tag_template_field",
"update_tag_template_field",
"rename_tag_template_field",
"rename_tag_template_field_enum_value",
"delete_tag_template_field",
"create_tag",
"update_tag",
"delete_tag",
"list_tags",
"star_entry",
"unstar_entry",
"set_iam_policy",
"get_iam_policy",
"test_iam_permissions",
)
for method in methods:
with pytest.raises(NotImplementedError):
getattr(transport, method)(request=object())
with pytest.raises(NotImplementedError):
transport.close()
# Catch all for all remaining methods and properties
remainder = [
"kind",
]
for r in remainder:
with pytest.raises(NotImplementedError):
getattr(transport, r)()
def test_data_catalog_base_transport_with_credentials_file():
# Instantiate the base transport with a credentials file
with mock.patch.object(
google.auth, "load_credentials_from_file", autospec=True
) as load_creds, mock.patch(
"google.cloud.datacatalog_v1.services.data_catalog.transports.DataCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
load_creds.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataCatalogTransport(
credentials_file="credentials.json",
quota_project_id="octopus",
)
load_creds.assert_called_once_with(
"credentials.json",
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
def test_data_catalog_base_transport_with_adc():
# Test the default credentials are used if credentials and credentials_file are None.
with mock.patch.object(google.auth, "default", autospec=True) as adc, mock.patch(
"google.cloud.datacatalog_v1.services.data_catalog.transports.DataCatalogTransport._prep_wrapped_messages"
) as Transport:
Transport.return_value = None
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport = transports.DataCatalogTransport()
adc.assert_called_once()
def test_data_catalog_auth_adc():
# If no credentials are provided, we should use ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
DataCatalogClient()
adc.assert_called_once_with(
scopes=None,
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id=None,
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DataCatalogGrpcTransport,
transports.DataCatalogGrpcAsyncIOTransport,
],
)
def test_data_catalog_transport_auth_adc(transport_class):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(google.auth, "default", autospec=True) as adc:
adc.return_value = (ga_credentials.AnonymousCredentials(), None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
adc.assert_called_once_with(
scopes=["1", "2"],
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
quota_project_id="octopus",
)
@pytest.mark.parametrize(
"transport_class",
[
transports.DataCatalogGrpcTransport,
transports.DataCatalogGrpcAsyncIOTransport,
],
)
def test_data_catalog_transport_auth_gdch_credentials(transport_class):
host = "https://language.com"
api_audience_tests = [None, "https://language2.com"]
api_audience_expect = [host, "https://language2.com"]
for t, e in zip(api_audience_tests, api_audience_expect):
with mock.patch.object(google.auth, "default", autospec=True) as adc:
gdch_mock = mock.MagicMock()
type(gdch_mock).with_gdch_audience = mock.PropertyMock(
return_value=gdch_mock
)
adc.return_value = (gdch_mock, None)
transport_class(host=host, api_audience=t)
gdch_mock.with_gdch_audience.assert_called_once_with(e)
@pytest.mark.parametrize(
"transport_class,grpc_helpers",
[
(transports.DataCatalogGrpcTransport, grpc_helpers),
(transports.DataCatalogGrpcAsyncIOTransport, grpc_helpers_async),
],
)
def test_data_catalog_transport_create_channel(transport_class, grpc_helpers):
# If credentials and host are not provided, the transport class should use
# ADC credentials.
with mock.patch.object(
google.auth, "default", autospec=True
) as adc, mock.patch.object(
grpc_helpers, "create_channel", autospec=True
) as create_channel:
creds = ga_credentials.AnonymousCredentials()
adc.return_value = (creds, None)
transport_class(quota_project_id="octopus", scopes=["1", "2"])
create_channel.assert_called_with(
"datacatalog.googleapis.com:443",
credentials=creds,
credentials_file=None,
quota_project_id="octopus",
default_scopes=("https://www.googleapis.com/auth/cloud-platform",),
scopes=["1", "2"],
default_host="datacatalog.googleapis.com",
ssl_credentials=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_grpc_transport_client_cert_source_for_mtls(transport_class):
cred = ga_credentials.AnonymousCredentials()
# Check ssl_channel_credentials is used if provided.
with mock.patch.object(transport_class, "create_channel") as mock_create_channel:
mock_ssl_channel_creds = mock.Mock()
transport_class(
host="squid.clam.whelk",
credentials=cred,
ssl_channel_credentials=mock_ssl_channel_creds,
)
mock_create_channel.assert_called_once_with(
"squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_channel_creds,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
# Check if ssl_channel_credentials is not provided, then client_cert_source_for_mtls
# is used.
with mock.patch.object(transport_class, "create_channel", return_value=mock.Mock()):
with mock.patch("grpc.ssl_channel_credentials") as mock_ssl_cred:
transport_class(
credentials=cred,
client_cert_source_for_mtls=client_cert_source_callback,
)
expected_cert, expected_key = client_cert_source_callback()
mock_ssl_cred.assert_called_once_with(
certificate_chain=expected_cert, private_key=expected_key
)
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_data_catalog_host_no_port(transport_name):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datacatalog.googleapis.com"
),
transport=transport_name,
)
assert client.transport._host == ("datacatalog.googleapis.com:443")
@pytest.mark.parametrize(
"transport_name",
[
"grpc",
"grpc_asyncio",
],
)
def test_data_catalog_host_with_port(transport_name):
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_options=client_options.ClientOptions(
api_endpoint="datacatalog.googleapis.com:8000"
),
transport=transport_name,
)
assert client.transport._host == ("datacatalog.googleapis.com:8000")
def test_data_catalog_grpc_transport_channel():
channel = grpc.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataCatalogGrpcTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
def test_data_catalog_grpc_asyncio_transport_channel():
channel = aio.secure_channel("http://localhost/", grpc.local_channel_credentials())
# Check that channel is used if provided.
transport = transports.DataCatalogGrpcAsyncIOTransport(
host="squid.clam.whelk",
channel=channel,
)
assert transport.grpc_channel == channel
assert transport._host == "squid.clam.whelk:443"
assert transport._ssl_channel_credentials == None
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_transport_channel_mtls_with_client_cert_source(transport_class):
with mock.patch(
"grpc.ssl_channel_credentials", autospec=True
) as grpc_ssl_channel_cred:
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_ssl_cred = mock.Mock()
grpc_ssl_channel_cred.return_value = mock_ssl_cred
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
cred = ga_credentials.AnonymousCredentials()
with pytest.warns(DeprecationWarning):
with mock.patch.object(google.auth, "default") as adc:
adc.return_value = (cred, None)
transport = transport_class(
host="squid.clam.whelk",
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=client_cert_source_callback,
)
adc.assert_called_once()
grpc_ssl_channel_cred.assert_called_once_with(
certificate_chain=b"cert bytes", private_key=b"key bytes"
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
assert transport._ssl_channel_credentials == mock_ssl_cred
# Remove this test when deprecated arguments (api_mtls_endpoint, client_cert_source) are
# removed from grpc/grpc_asyncio transport constructor.
@pytest.mark.parametrize(
"transport_class",
[transports.DataCatalogGrpcTransport, transports.DataCatalogGrpcAsyncIOTransport],
)
def test_data_catalog_transport_channel_mtls_with_adc(transport_class):
mock_ssl_cred = mock.Mock()
with mock.patch.multiple(
"google.auth.transport.grpc.SslCredentials",
__init__=mock.Mock(return_value=None),
ssl_credentials=mock.PropertyMock(return_value=mock_ssl_cred),
):
with mock.patch.object(
transport_class, "create_channel"
) as grpc_create_channel:
mock_grpc_channel = mock.Mock()
grpc_create_channel.return_value = mock_grpc_channel
mock_cred = mock.Mock()
with pytest.warns(DeprecationWarning):
transport = transport_class(
host="squid.clam.whelk",
credentials=mock_cred,
api_mtls_endpoint="mtls.squid.clam.whelk",
client_cert_source=None,
)
grpc_create_channel.assert_called_once_with(
"mtls.squid.clam.whelk:443",
credentials=mock_cred,
credentials_file=None,
scopes=None,
ssl_credentials=mock_ssl_cred,
quota_project_id=None,
options=[
("grpc.max_send_message_length", -1),
("grpc.max_receive_message_length", -1),
],
)
assert transport.grpc_channel == mock_grpc_channel
def test_entry_path():
project = "squid"
location = "clam"
entry_group = "whelk"
entry = "octopus"
expected = "projects/{project}/locations/{location}/entryGroups/{entry_group}/entries/{entry}".format(
project=project,
location=location,
entry_group=entry_group,
entry=entry,
)
actual = DataCatalogClient.entry_path(project, location, entry_group, entry)
assert expected == actual
def test_parse_entry_path():
expected = {
"project": "oyster",
"location": "nudibranch",
"entry_group": "cuttlefish",
"entry": "mussel",
}
path = DataCatalogClient.entry_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_entry_path(path)
assert expected == actual
def test_entry_group_path():
project = "winkle"
location = "nautilus"
entry_group = "scallop"
expected = (
"projects/{project}/locations/{location}/entryGroups/{entry_group}".format(
project=project,
location=location,
entry_group=entry_group,
)
)
actual = DataCatalogClient.entry_group_path(project, location, entry_group)
assert expected == actual
def test_parse_entry_group_path():
expected = {
"project": "abalone",
"location": "squid",
"entry_group": "clam",
}
path = DataCatalogClient.entry_group_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_entry_group_path(path)
assert expected == actual
def test_tag_path():
project = "whelk"
location = "octopus"
entry_group = "oyster"
entry = "nudibranch"
tag = "cuttlefish"
expected = "projects/{project}/locations/{location}/entryGroups/{entry_group}/entries/{entry}/tags/{tag}".format(
project=project,
location=location,
entry_group=entry_group,
entry=entry,
tag=tag,
)
actual = DataCatalogClient.tag_path(project, location, entry_group, entry, tag)
assert expected == actual
def test_parse_tag_path():
expected = {
"project": "mussel",
"location": "winkle",
"entry_group": "nautilus",
"entry": "scallop",
"tag": "abalone",
}
path = DataCatalogClient.tag_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_path(path)
assert expected == actual
def test_tag_template_path():
project = "squid"
location = "clam"
tag_template = "whelk"
expected = (
"projects/{project}/locations/{location}/tagTemplates/{tag_template}".format(
project=project,
location=location,
tag_template=tag_template,
)
)
actual = DataCatalogClient.tag_template_path(project, location, tag_template)
assert expected == actual
def test_parse_tag_template_path():
expected = {
"project": "octopus",
"location": "oyster",
"tag_template": "nudibranch",
}
path = DataCatalogClient.tag_template_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_template_path(path)
assert expected == actual
def test_tag_template_field_path():
project = "cuttlefish"
location = "mussel"
tag_template = "winkle"
field = "nautilus"
expected = "projects/{project}/locations/{location}/tagTemplates/{tag_template}/fields/{field}".format(
project=project,
location=location,
tag_template=tag_template,
field=field,
)
actual = DataCatalogClient.tag_template_field_path(
project, location, tag_template, field
)
assert expected == actual
def test_parse_tag_template_field_path():
expected = {
"project": "scallop",
"location": "abalone",
"tag_template": "squid",
"field": "clam",
}
path = DataCatalogClient.tag_template_field_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_template_field_path(path)
assert expected == actual
def test_tag_template_field_enum_value_path():
project = "whelk"
location = "octopus"
tag_template = "oyster"
tag_template_field_id = "nudibranch"
enum_value_display_name = "cuttlefish"
expected = "projects/{project}/locations/{location}/tagTemplates/{tag_template}/fields/{tag_template_field_id}/enumValues/{enum_value_display_name}".format(
project=project,
location=location,
tag_template=tag_template,
tag_template_field_id=tag_template_field_id,
enum_value_display_name=enum_value_display_name,
)
actual = DataCatalogClient.tag_template_field_enum_value_path(
project, location, tag_template, tag_template_field_id, enum_value_display_name
)
assert expected == actual
def test_parse_tag_template_field_enum_value_path():
expected = {
"project": "mussel",
"location": "winkle",
"tag_template": "nautilus",
"tag_template_field_id": "scallop",
"enum_value_display_name": "abalone",
}
path = DataCatalogClient.tag_template_field_enum_value_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_tag_template_field_enum_value_path(path)
assert expected == actual
def test_common_billing_account_path():
billing_account = "squid"
expected = "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
actual = DataCatalogClient.common_billing_account_path(billing_account)
assert expected == actual
def test_parse_common_billing_account_path():
expected = {
"billing_account": "clam",
}
path = DataCatalogClient.common_billing_account_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_billing_account_path(path)
assert expected == actual
def test_common_folder_path():
folder = "whelk"
expected = "folders/{folder}".format(
folder=folder,
)
actual = DataCatalogClient.common_folder_path(folder)
assert expected == actual
def test_parse_common_folder_path():
expected = {
"folder": "octopus",
}
path = DataCatalogClient.common_folder_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_folder_path(path)
assert expected == actual
def test_common_organization_path():
organization = "oyster"
expected = "organizations/{organization}".format(
organization=organization,
)
actual = DataCatalogClient.common_organization_path(organization)
assert expected == actual
def test_parse_common_organization_path():
expected = {
"organization": "nudibranch",
}
path = DataCatalogClient.common_organization_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_organization_path(path)
assert expected == actual
def test_common_project_path():
project = "cuttlefish"
expected = "projects/{project}".format(
project=project,
)
actual = DataCatalogClient.common_project_path(project)
assert expected == actual
def test_parse_common_project_path():
expected = {
"project": "mussel",
}
path = DataCatalogClient.common_project_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_project_path(path)
assert expected == actual
def test_common_location_path():
project = "winkle"
location = "nautilus"
expected = "projects/{project}/locations/{location}".format(
project=project,
location=location,
)
actual = DataCatalogClient.common_location_path(project, location)
assert expected == actual
def test_parse_common_location_path():
expected = {
"project": "scallop",
"location": "abalone",
}
path = DataCatalogClient.common_location_path(**expected)
# Check that the path construction is reversible.
actual = DataCatalogClient.parse_common_location_path(path)
assert expected == actual
def test_client_with_default_client_info():
client_info = gapic_v1.client_info.ClientInfo()
with mock.patch.object(
transports.DataCatalogTransport, "_prep_wrapped_messages"
) as prep:
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
with mock.patch.object(
transports.DataCatalogTransport, "_prep_wrapped_messages"
) as prep:
transport_class = DataCatalogClient.get_transport_class()
transport = transport_class(
credentials=ga_credentials.AnonymousCredentials(),
client_info=client_info,
)
prep.assert_called_once_with(client_info)
@pytest.mark.asyncio
async def test_transport_close_async():
client = DataCatalogAsyncClient(
credentials=ga_credentials.AnonymousCredentials(),
transport="grpc_asyncio",
)
with mock.patch.object(
type(getattr(client.transport, "grpc_channel")), "close"
) as close:
async with client:
close.assert_not_called()
close.assert_called_once()
def test_transport_close():
transports = {
"grpc": "_grpc_channel",
}
for transport, close_name in transports.items():
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
with mock.patch.object(
type(getattr(client.transport, close_name)), "close"
) as close:
with client:
close.assert_not_called()
close.assert_called_once()
def test_client_ctx():
transports = [
"grpc",
]
for transport in transports:
client = DataCatalogClient(
credentials=ga_credentials.AnonymousCredentials(), transport=transport
)
# Test client calls underlying transport.
with mock.patch.object(type(client.transport), "close") as close:
close.assert_not_called()
with client:
pass
close.assert_called()
@pytest.mark.parametrize(
"client_class,transport_class",
[
(DataCatalogClient, transports.DataCatalogGrpcTransport),
(DataCatalogAsyncClient, transports.DataCatalogGrpcAsyncIOTransport),
],
)
def test_api_key_credentials(client_class, transport_class):
with mock.patch.object(
google.auth._default, "get_api_key_credentials", create=True
) as get_api_key_credentials:
mock_cred = mock.Mock()
get_api_key_credentials.return_value = mock_cred
options = client_options.ClientOptions()
options.api_key = "api_key"
with mock.patch.object(transport_class, "__init__") as patched:
patched.return_value = None
client = client_class(client_options=options)
patched.assert_called_once_with(
credentials=mock_cred,
credentials_file=None,
host=client.DEFAULT_ENDPOINT,
scopes=None,
client_cert_source_for_mtls=None,
quota_project_id=None,
client_info=transports.base.DEFAULT_CLIENT_INFO,
always_use_jwt_access=True,
api_audience=None,
)
|
{
"content_hash": "a4cb458edbdf3e4d6ffb220aa247d0b0",
"timestamp": "",
"source": "github",
"line_count": 9882,
"max_line_length": 160,
"avg_line_length": 35.35448289819875,
"alnum_prop": 0.641194368196741,
"repo_name": "googleapis/python-datacatalog",
"id": "dcec63959dc542724bb929c799eff5ea1e38534d",
"size": "349973",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "tests/unit/gapic/datacatalog_v1/test_data_catalog.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2050"
},
{
"name": "Python",
"bytes": "3073442"
},
{
"name": "Shell",
"bytes": "30675"
}
],
"symlink_target": ""
}
|
import copy
import inspect
import logging
from django.core import urlresolvers
from django import forms
from django.forms.forms import NON_FIELD_ERRORS # noqa
from django import template
from django.template.defaultfilters import linebreaks # noqa
from django.template.defaultfilters import safe # noqa
from django.template.defaultfilters import slugify # noqa
from django.utils.encoding import force_unicode
from django.utils.importlib import import_module # noqa
from django.utils.translation import ugettext_lazy as _
from horizon import base
from horizon import exceptions
from horizon.templatetags.horizon import has_permissions # noqa
from horizon.utils import html
LOG = logging.getLogger(__name__)
class WorkflowContext(dict):
def __init__(self, workflow, *args, **kwargs):
super(WorkflowContext, self).__init__(*args, **kwargs)
self._workflow = workflow
def __setitem__(self, key, val):
super(WorkflowContext, self).__setitem__(key, val)
return self._workflow._trigger_handlers(key)
def __delitem__(self, key):
return self.__setitem__(key, None)
def set(self, key, val):
return self.__setitem__(key, val)
def unset(self, key):
return self.__delitem__(key)
class ActionMetaclass(forms.forms.DeclarativeFieldsMetaclass):
def __new__(mcs, name, bases, attrs):
# Pop Meta for later processing
opts = attrs.pop("Meta", None)
# Create our new class
cls = super(ActionMetaclass, mcs).__new__(mcs, name, bases, attrs)
# Process options from Meta
cls.name = getattr(opts, "name", name)
cls.slug = getattr(opts, "slug", slugify(name))
cls.permissions = getattr(opts, "permissions", ())
cls.progress_message = getattr(opts,
"progress_message",
_("Processing..."))
cls.help_text = getattr(opts, "help_text", "")
cls.help_text_template = getattr(opts, "help_text_template", None)
return cls
class Action(forms.Form):
"""An ``Action`` represents an atomic logical interaction you can have with
the system. This is easier to understand with a conceptual example: in the
context of a "launch instance" workflow, actions would include "naming
the instance", "selecting an image", and ultimately "launching the
instance".
Because ``Actions`` are always interactive, they always provide form
controls, and thus inherit from Django's ``Form`` class. However, they
have some additional intelligence added to them:
* ``Actions`` are aware of the permissions required to complete them.
* ``Actions`` have a meta-level concept of "help text" which is meant to be
displayed in such a way as to give context to the action regardless of
where the action is presented in a site or workflow.
* ``Actions`` understand how to handle their inputs and produce outputs,
much like :class:`~horizon.forms.SelfHandlingForm` does now.
``Action`` classes may define the following attributes in a ``Meta``
class within them:
.. attribute:: name
The verbose name for this action. Defaults to the name of the class.
.. attribute:: slug
A semi-unique slug for this action. Defaults to the "slugified" name
of the class.
.. attribute:: permissions
A list of permission names which this action requires in order to be
completed. Defaults to an empty list (``[]``).
.. attribute:: help_text
A string of simple help text to be displayed alongside the Action's
fields.
.. attribute:: help_text_template
A path to a template which contains more complex help text to be
displayed alongside the Action's fields. In conjunction with
:meth:`~horizon.workflows.Action.get_help_text` method you can
customize your help text template to display practically anything.
"""
__metaclass__ = ActionMetaclass
def __init__(self, request, context, *args, **kwargs):
if request.method == "POST":
super(Action, self).__init__(request.POST, initial=context)
else:
super(Action, self).__init__(initial=context)
if not hasattr(self, "handle"):
raise AttributeError("The action %s must define a handle method."
% self.__class__.__name__)
self.request = request
self._populate_choices(request, context)
self.required_css_class = 'required'
def __unicode__(self):
return force_unicode(self.name)
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def _populate_choices(self, request, context):
for field_name, bound_field in self.fields.items():
meth = getattr(self, "populate_%s_choices" % field_name, None)
if meth is not None and callable(meth):
bound_field.choices = meth(request, context)
def get_help_text(self, extra_context=None):
"""Returns the help text for this step."""
text = ""
extra_context = extra_context or {}
if self.help_text_template:
tmpl = template.loader.get_template(self.help_text_template)
context = template.RequestContext(self.request, extra_context)
text += tmpl.render(context)
else:
text += linebreaks(force_unicode(self.help_text))
return safe(text)
def add_error(self, message):
"""Adds an error to the Action's Step based on API issues."""
self.errors[NON_FIELD_ERRORS] = self.error_class([message])
def handle(self, request, context):
"""Handles any requisite processing for this action. The method should
return either ``None`` or a dictionary of data to be passed to
:meth:`~horizon.workflows.Step.contribute`.
Returns ``None`` by default, effectively making it a no-op.
"""
return None
class MembershipAction(Action):
"""An action that allows a user to add/remove members from a group.
Extend the Action class with additional helper method for membership
management.
"""
def get_default_role_field_name(self):
return "default_" + self.slug + "_role"
def get_member_field_name(self, role_id):
return self.slug + "_role_" + role_id
class Step(object):
"""A step is a wrapper around an action which defines its context in a
workflow. It knows about details such as:
* The workflow's context data (data passed from step to step).
* The data which must be present in the context to begin this step (the
step's dependencies).
* The keys which will be added to the context data upon completion of the
step.
* The connections between this step's fields and changes in the context
data (e.g. if that piece of data changes, what needs to be updated in
this step).
A ``Step`` class has the following attributes:
.. attribute:: action
The :class:`~horizon.workflows.Action` class which this step wraps.
.. attribute:: depends_on
A list of context data keys which this step requires in order to
begin interaction.
.. attribute:: contributes
A list of keys which this step will contribute to the workflow's
context data. Optional keys should still be listed, even if their
values may be set to ``None``.
.. attribute:: connections
A dictionary which maps context data key names to lists of callbacks.
The callbacks may be functions, dotted python paths to functions
which may be imported, or dotted strings beginning with ``"self"``
to indicate methods on the current ``Step`` instance.
.. attribute:: before
Another ``Step`` class. This optional attribute is used to provide
control over workflow ordering when steps are dynamically added to
workflows. The workflow mechanism will attempt to place the current
step before the step specified in the attribute.
.. attribute:: after
Another ``Step`` class. This attribute has the same purpose as
:meth:`~horizon.workflows.Step.before` except that it will instead
attempt to place the current step after the given step.
.. attribute:: help_text
A string of simple help text which will be prepended to the ``Action``
class' help text if desired.
.. attribute:: template_name
A path to a template which will be used to render this step. In
general the default common template should be used. Default:
``"horizon/common/_workflow_step.html"``.
.. attribute:: has_errors
A boolean value which indicates whether or not this step has any
errors on the action within it or in the scope of the workflow. This
attribute will only accurately reflect this status after validation
has occurred.
.. attribute:: slug
Inherited from the ``Action`` class.
.. attribute:: name
Inherited from the ``Action`` class.
.. attribute:: permissions
Inherited from the ``Action`` class.
"""
action_class = None
depends_on = ()
contributes = ()
connections = None
before = None
after = None
help_text = ""
template_name = "horizon/common/_workflow_step.html"
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __unicode__(self):
return force_unicode(self.name)
def __init__(self, workflow):
super(Step, self).__init__()
self.workflow = workflow
cls = self.__class__.__name__
if not (self.action_class and issubclass(self.action_class, Action)):
raise AttributeError("You must specify an action for %s." % cls)
self.slug = self.action_class.slug
self.name = self.action_class.name
self.permissions = self.action_class.permissions
self.has_errors = False
self._handlers = {}
if self.connections is None:
# We want a dict, but don't want to declare a mutable type on the
# class directly.
self.connections = {}
# Gather our connection handlers and make sure they exist.
for key, handlers in self.connections.items():
self._handlers[key] = []
# TODO(gabriel): This is a poor substitute for broader handling
if not isinstance(handlers, (list, tuple)):
raise TypeError("The connection handlers for %s must be a "
"list or tuple." % cls)
for possible_handler in handlers:
if callable(possible_handler):
# If it's callable we know the function exists and is valid
self._handlers[key].append(possible_handler)
continue
elif not isinstance(possible_handler, basestring):
return TypeError("Connection handlers must be either "
"callables or strings.")
bits = possible_handler.split(".")
if bits[0] == "self":
root = self
for bit in bits[1:]:
try:
root = getattr(root, bit)
except AttributeError:
raise AttributeError("The connection handler %s "
"could not be found on %s."
% (possible_handler, cls))
handler = root
elif len(bits) == 1:
# Import by name from local module not supported
raise ValueError("Importing a local function as a string "
"is not supported for the connection "
"handler %s on %s."
% (possible_handler, cls))
else:
# Try a general import
module_name = ".".join(bits[:-1])
try:
mod = import_module(module_name)
handler = getattr(mod, bits[-1])
except ImportError:
raise ImportError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
except AttributeError:
raise AttributeError("Could not import %s from the "
"module %s as a connection "
"handler on %s."
% (bits[-1], module_name, cls))
self._handlers[key].append(handler)
@property
def action(self):
if not getattr(self, "_action", None):
try:
# Hook in the action context customization.
workflow_context = dict(self.workflow.context)
context = self.prepare_action_context(self.workflow.request,
workflow_context)
self._action = self.action_class(self.workflow.request,
context)
except Exception:
LOG.exception("Problem instantiating action class.")
raise
return self._action
def prepare_action_context(self, request, context):
"""Allows for customization of how the workflow context is passed to
the action; this is the reverse of what "contribute" does to make the
action outputs sane for the workflow. Changes to the context are not
saved globally here. They are localized to the action.
Simply returns the unaltered context by default.
"""
return context
def get_id(self):
"""Returns the ID for this step. Suitable for use in HTML markup."""
return "%s__%s" % (self.workflow.slug, self.slug)
def _verify_contributions(self, context):
for key in self.contributes:
# Make sure we don't skip steps based on weird behavior of
# POST query dicts.
field = self.action.fields.get(key, None)
if field and field.required and not context.get(key):
context.pop(key, None)
failed_to_contribute = set(self.contributes)
failed_to_contribute -= set(context.keys())
if failed_to_contribute:
raise exceptions.WorkflowError("The following expected data was "
"not added to the workflow context "
"by the step %s: %s."
% (self.__class__,
failed_to_contribute))
return True
def contribute(self, data, context):
"""Adds the data listed in ``contributes`` to the workflow's shared
context. By default, the context is simply updated with all the data
returned by the action.
Note that even if the value of one of the ``contributes`` keys is
not present (e.g. optional) the key should still be added to the
context with a value of ``None``.
"""
if data:
for key in self.contributes:
context[key] = data.get(key, None)
return context
def render(self):
"""Renders the step."""
step_template = template.loader.get_template(self.template_name)
extra_context = {"form": self.action,
"step": self}
context = template.RequestContext(self.workflow.request, extra_context)
return step_template.render(context)
def get_help_text(self):
"""Returns the help text for this step."""
text = linebreaks(force_unicode(self.help_text))
text += self.action.get_help_text()
return safe(text)
def add_error(self, message):
"""Adds an error to the Step based on API issues."""
self.action.add_error(message)
def has_required_fields(self):
"""Returns True if action contains any required fields."""
return any(field.required for field in self.action.fields.values())
class WorkflowMetaclass(type):
def __new__(mcs, name, bases, attrs):
super(WorkflowMetaclass, mcs).__new__(mcs, name, bases, attrs)
attrs["_cls_registry"] = set([])
return type.__new__(mcs, name, bases, attrs)
class UpdateMembersStep(Step):
"""A step that allows a user to add/remove members from a group.
.. attribute:: show_roles
Set to False to disable the display of the roles dropdown.
.. attribute:: available_list_title
The title used for the available list column.
.. attribute:: members_list_title
The title used for the members list column.
.. attribute:: no_available_text
The placeholder text used when the available list is empty.
.. attribute:: no_members_text
The placeholder text used when the members list is empty.
"""
template_name = "horizon/common/_workflow_step_update_members.html"
show_roles = True
available_list_title = _("All available")
members_list_title = _("Members")
no_available_text = _("None available.")
no_members_text = _("No members.")
def get_member_field_name(self, role_id):
if issubclass(self.action_class, MembershipAction):
return self.action.get_member_field_name(role_id)
else:
return self.slug + "_role_" + role_id
class Workflow(html.HTMLElement):
"""A Workflow is a collection of Steps. Its interface is very
straightforward, but it is responsible for handling some very
important tasks such as:
* Handling the injection, removal, and ordering of arbitrary steps.
* Determining if the workflow can be completed by a given user at runtime
based on all available information.
* Dispatching connections between steps to ensure that when context data
changes all the applicable callback functions are executed.
* Verifying/validating the overall data integrity and subsequently
triggering the final method to complete the workflow.
The ``Workflow`` class has the following attributes:
.. attribute:: name
The verbose name for this workflow which will be displayed to the user.
Defaults to the class name.
.. attribute:: slug
The unique slug for this workflow. Required.
.. attribute:: steps
Read-only access to the final ordered set of step instances for
this workflow.
.. attribute:: default_steps
A list of :class:`~horizon.workflows.Step` classes which serve as the
starting point for this workflow's ordered steps. Defaults to an empty
list (``[]``).
.. attribute:: finalize_button_name
The name which will appear on the submit button for the workflow's
form. Defaults to ``"Save"``.
.. attribute:: success_message
A string which will be displayed to the user upon successful completion
of the workflow. Defaults to
``"{{ workflow.name }} completed successfully."``
.. attribute:: failure_message
A string which will be displayed to the user upon failure to complete
the workflow. Defaults to ``"{{ workflow.name }} did not complete."``
.. attribute:: depends_on
A roll-up list of all the ``depends_on`` values compiled from the
workflow's steps.
.. attribute:: contributions
A roll-up list of all the ``contributes`` values compiled from the
workflow's steps.
.. attribute:: template_name
Path to the template which should be used to render this workflow.
In general the default common template should be used. Default:
``"horizon/common/_workflow.html"``.
.. attribute:: entry_point
The slug of the step which should initially be active when the
workflow is rendered. This can be passed in upon initialization of
the workflow, or set anytime after initialization but before calling
either ``get_entry_point`` or ``render``.
.. attribute:: redirect_param_name
The name of a parameter used for tracking the URL to redirect to upon
completion of the workflow. Defaults to ``"next"``.
.. attribute:: object
The object (if any) which this workflow relates to. In the case of
a workflow which creates a new resource the object would be the created
resource after the relevant creation steps have been undertaken. In
the case of a workflow which updates a resource it would be the
resource being updated after it has been retrieved.
.. attribute:: wizard
Whether to present the workflow as a wizard, with "prev" and "next"
buttons and validation after every step.
.. attribute:: fullscreen
If the workflow is presented in a modal, and this attribute is
set to True, then the ``fullscreen`` css class will be added so
the modal can take advantage of the available screen estate.
Defaults to ``False``.
"""
__metaclass__ = WorkflowMetaclass
slug = None
default_steps = ()
template_name = "horizon/common/_workflow.html"
finalize_button_name = _("Save")
success_message = _("%s completed successfully.")
failure_message = _("%s did not complete.")
redirect_param_name = "next"
multipart = False
wizard = False
fullscreen = False
_registerable_class = Step
def __unicode__(self):
return self.name
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self.slug)
def __init__(self, request=None, context_seed=None, entry_point=None,
*args, **kwargs):
super(Workflow, self).__init__(*args, **kwargs)
if self.slug is None:
raise AttributeError("The workflow %s must have a slug."
% self.__class__.__name__)
self.name = getattr(self, "name", self.__class__.__name__)
self.request = request
self.depends_on = set([])
self.contributions = set([])
self.entry_point = entry_point
self.object = None
# Put together our steps in order. Note that we pre-register
# non-default steps so that we can identify them and subsequently
# insert them in order correctly.
self._registry = dict([(step_class, step_class(self)) for step_class
in self.__class__._cls_registry
if step_class not in self.default_steps])
self._gather_steps()
# Determine all the context data we need to end up with.
for step in self.steps:
self.depends_on = self.depends_on | set(step.depends_on)
self.contributions = self.contributions | set(step.contributes)
# Initialize our context. For ease we can preseed it with a
# regular dictionary. This should happen after steps have been
# registered and ordered.
self.context = WorkflowContext(self)
context_seed = context_seed or {}
clean_seed = dict([(key, val)
for key, val in context_seed.items()
if key in self.contributions | self.depends_on])
self.context_seed = clean_seed
self.context.update(clean_seed)
if request and request.method == "POST":
for step in self.steps:
valid = step.action.is_valid()
# Be sure to use the CLEANED data if the workflow is valid.
if valid:
data = step.action.cleaned_data
else:
data = request.POST
self.context = step.contribute(data, self.context)
@property
def steps(self):
if getattr(self, "_ordered_steps", None) is None:
self._gather_steps()
return self._ordered_steps
def get_step(self, slug):
"""Returns the instantiated step matching the given slug."""
for step in self.steps:
if step.slug == slug:
return step
def _gather_steps(self):
ordered_step_classes = self._order_steps()
for default_step in self.default_steps:
self.register(default_step)
self._registry[default_step] = default_step(self)
self._ordered_steps = [self._registry[step_class]
for step_class in ordered_step_classes
if has_permissions(self.request.user,
self._registry[step_class])]
def _order_steps(self):
steps = list(copy.copy(self.default_steps))
additional = self._registry.keys()
for step in additional:
try:
min_pos = steps.index(step.after)
except ValueError:
min_pos = 0
try:
max_pos = steps.index(step.before)
except ValueError:
max_pos = len(steps)
if min_pos > max_pos:
raise exceptions.WorkflowError("The step %(new)s can't be "
"placed between the steps "
"%(after)s and %(before)s; the "
"step %(before)s comes before "
"%(after)s."
% {"new": additional,
"after": step.after,
"before": step.before})
steps.insert(max_pos, step)
return steps
def get_entry_point(self):
"""Returns the slug of the step which the workflow should begin on.
This method takes into account both already-available data and errors
within the steps.
"""
# If we have a valid specified entry point, use it.
if self.entry_point:
if self.get_step(self.entry_point):
return self.entry_point
# Otherwise fall back to calculating the appropriate entry point.
for step in self.steps:
if step.has_errors:
return step.slug
try:
step._verify_contributions(self.context)
except exceptions.WorkflowError:
return step.slug
# If nothing else, just return the first step.
return self.steps[0].slug
def _trigger_handlers(self, key):
responses = []
handlers = [(step.slug, f) for step in self.steps
for f in step._handlers.get(key, [])]
for slug, handler in handlers:
responses.append((slug, handler(self.request, self.context)))
return responses
@classmethod
def register(cls, step_class):
"""Registers a :class:`~horizon.workflows.Step` with the workflow."""
if not inspect.isclass(step_class):
raise ValueError('Only classes may be registered.')
elif not issubclass(step_class, cls._registerable_class):
raise ValueError('Only %s classes or subclasses may be registered.'
% cls._registerable_class.__name__)
if step_class in cls._cls_registry:
return False
else:
cls._cls_registry.add(step_class)
return True
@classmethod
def unregister(cls, step_class):
"""Unregisters a :class:`~horizon.workflows.Step` from the workflow.
"""
try:
cls._cls_registry.remove(step_class)
except KeyError:
raise base.NotRegistered('%s is not registered' % cls)
return cls._unregister(step_class)
def validate(self, context):
"""Hook for custom context data validation. Should return a boolean
value or raise :class:`~horizon.exceptions.WorkflowValidationError`.
"""
return True
def is_valid(self):
"""Verified that all required data is present in the context and
calls the ``validate`` method to allow for finer-grained checks
on the context data.
"""
missing = self.depends_on - set(self.context.keys())
if missing:
raise exceptions.WorkflowValidationError(
"Unable to complete the workflow. The values %s are "
"required but not present." % ", ".join(missing))
# Validate each step. Cycle through all of them to catch all errors
# in one pass before returning.
steps_valid = True
for step in self.steps:
if not step.action.is_valid():
steps_valid = False
step.has_errors = True
if not steps_valid:
return steps_valid
return self.validate(self.context)
def finalize(self):
"""Finalizes a workflow by running through all the actions in order
and calling their ``handle`` methods. Returns ``True`` on full success,
or ``False`` for a partial success, e.g. there were non-critical
errors. (If it failed completely the function wouldn't return.)
"""
partial = False
for step in self.steps:
try:
data = step.action.handle(self.request, self.context)
if data is True or data is None:
continue
elif data is False:
partial = True
else:
self.context = step.contribute(data or {}, self.context)
except Exception:
partial = True
exceptions.handle(self.request)
if not self.handle(self.request, self.context):
partial = True
return not partial
def handle(self, request, context):
"""Handles any final processing for this workflow. Should return a
boolean value indicating success.
"""
return True
def get_success_url(self):
"""Returns a URL to redirect the user to upon completion. By default it
will attempt to parse a ``success_url`` attribute on the workflow,
which can take the form of a reversible URL pattern name, or a
standard HTTP URL.
"""
try:
return urlresolvers.reverse(self.success_url)
except urlresolvers.NoReverseMatch:
return self.success_url
def format_status_message(self, message):
"""Hook to allow customization of the message returned to the user
upon successful or unsuccessful completion of the workflow.
By default it simply inserts the workflow's name into the message
string.
"""
if "%s" in message:
return message % self.name
else:
return message
def render(self):
"""Renders the workflow."""
workflow_template = template.loader.get_template(self.template_name)
extra_context = {"workflow": self}
if self.request.is_ajax():
extra_context['modal'] = True
context = template.RequestContext(self.request, extra_context)
return workflow_template.render(context)
def get_absolute_url(self):
"""Returns the canonical URL for this workflow.
This is used for the POST action attribute on the form element
wrapping the workflow.
For convenience it defaults to the value of
``request.get_full_path()`` with any query string stripped off,
e.g. the path at which the workflow was requested.
"""
return self.request.get_full_path().partition('?')[0]
def add_error_to_step(self, message, slug):
"""Adds an error to the workflow's Step with the
specified slug based on API issues. This is useful
when you wish for API errors to appear as errors on
the form rather than using the messages framework.
"""
step = self.get_step(slug)
if step:
step.add_error(message)
|
{
"content_hash": "891ecac63ad3a54f09b56313b974ebd9",
"timestamp": "",
"source": "github",
"line_count": 850,
"max_line_length": 79,
"avg_line_length": 38.451764705882354,
"alnum_prop": 0.5922775670052625,
"repo_name": "tanglei528/horizon",
"id": "45f4df80e9e268854bbf455702e96031f39d87b8",
"size": "33334",
"binary": false,
"copies": "6",
"ref": "refs/heads/master",
"path": "horizon/workflows/base.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "181644"
},
{
"name": "JavaScript",
"bytes": "693540"
},
{
"name": "Python",
"bytes": "3066105"
},
{
"name": "Shell",
"bytes": "14657"
}
],
"symlink_target": ""
}
|
"""Definition of mali operator strategy."""
# pylint: disable=invalid-name,unused-argument,wildcard-import,unused-wildcard-import
import re
from tvm import topi
from .generic import *
from .. import op as _op
@conv2d_strategy.register("mali")
def conv2d_strategy_mali(attrs, inputs, out_type, target):
"""conv2d mali strategy"""
strategy = _op.OpStrategy()
data, kernel = inputs
dilation_h, dilation_w = attrs.get_int_tuple("dilation")
stride_h, stride_w = attrs.get_int_tuple("strides")
groups = attrs.groups
layout = attrs.data_layout
kernel_layout = attrs.kernel_layout
if dilation_h < 1 or dilation_w < 1:
raise ValueError("dilation should be positive value")
if groups == 1:
if layout == "NCHW":
if kernel_layout == "OIHW":
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.mali",
)
# check if winograd algorithm is applicable
_, _, kh, kw = get_const_tuple(kernel.shape)
if (
kh == 3
and kw == 3
and stride_h == 1
and stride_w == 1
and dilation_h == 1
and dilation_w == 1
):
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.mali",
plevel=5,
)
elif re.match(r"OIHW\d*o", kernel_layout):
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_spatial_pack),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_spatial_pack),
name="conv2d_nchw_spatial_pack.mali",
)
else:
raise RuntimeError(
"Unsupported weight layout {} for conv2d NCHW".format(kernel_layout)
)
else:
raise RuntimeError("Unsupported conv2d layout {} for mali".format(layout))
elif is_depthwise_conv2d(data.shape, layout, kernel.shape, kernel_layout, groups):
if layout == "NCHW":
assert kernel_layout == "OIHW"
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.depthwise_conv2d_nchw),
wrap_topi_schedule(topi.mali.schedule_depthwise_conv2d_nchw),
name="depthwise_conv2d_nchw.mali",
)
else:
raise RuntimeError("Unsupported depthwise_conv2d layout {} for mali".format(layout))
else: # group_conv2d
raise RuntimeError("group_conv2d is not supported for mali")
return strategy
@conv2d_winograd_without_weight_transfrom_strategy.register("mali")
def conv2d_winograd_without_weight_transfrom_strategy_mali(attrs, inputs, out_type, target):
"""conv2d_winograd_without_weight_transfrom mali strategy"""
dilation = attrs.get_int_tuple("dilation")
groups = attrs.get_int("groups")
layout = attrs.data_layout
strides = attrs.get_int_tuple("strides")
kernel = inputs[1]
assert dilation == (1, 1), "Do not support dilate now"
assert strides == (1, 1), "Do not support strides now"
assert groups == 1, "Do not supoort arbitrary group number"
strategy = _op.OpStrategy()
if layout == "NCHW":
assert len(kernel.shape) == 5, "Kernel must be packed into 5-dim"
strategy.add_implementation(
wrap_compute_conv2d(topi.mali.conv2d_nchw_winograd),
wrap_topi_schedule(topi.mali.schedule_conv2d_nchw_winograd),
name="conv2d_nchw_winograd.mali",
)
else:
raise RuntimeError(
"Unsupported conv2d_winograd_without_weight_transfrom layout {}".format(layout)
)
return strategy
@dense_strategy.register("mali")
def dense_strategy_mali(attrs, inputs, out_type, target):
"""dense mali strategy"""
strategy = _op.OpStrategy()
strategy.add_implementation(
wrap_compute_dense(topi.mali.dense),
wrap_topi_schedule(topi.mali.schedule_dense),
name="dense.mali",
)
return strategy
|
{
"content_hash": "bf3a1d1262e675a2dbcc2904354b9596",
"timestamp": "",
"source": "github",
"line_count": 108,
"max_line_length": 96,
"avg_line_length": 41.47222222222222,
"alnum_prop": 0.5912033936146461,
"repo_name": "sxjscience/tvm",
"id": "f6ea911a15bf598e599e8e073031b6255d93cca8",
"size": "5264",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "python/tvm/relay/op/strategy/mali.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ANTLR",
"bytes": "6056"
},
{
"name": "C",
"bytes": "95567"
},
{
"name": "C++",
"bytes": "5565032"
},
{
"name": "CMake",
"bytes": "67305"
},
{
"name": "Go",
"bytes": "112376"
},
{
"name": "HTML",
"bytes": "8625"
},
{
"name": "Java",
"bytes": "173219"
},
{
"name": "JavaScript",
"bytes": "49801"
},
{
"name": "Makefile",
"bytes": "50818"
},
{
"name": "Objective-C",
"bytes": "15264"
},
{
"name": "Objective-C++",
"bytes": "46673"
},
{
"name": "Python",
"bytes": "6763729"
},
{
"name": "Rust",
"bytes": "182027"
},
{
"name": "Scala",
"bytes": "184105"
},
{
"name": "Shell",
"bytes": "96967"
},
{
"name": "Tcl",
"bytes": "53645"
},
{
"name": "Verilog",
"bytes": "30605"
}
],
"symlink_target": ""
}
|
"""
Write Prismatic (http://prism-em.com/) input files.
"""
class Prismatic:
"""
Class to write Prismatic (http://prism-em.com/) input files.
This is designed for STEM image simulation.
"""
def __init__(self, structure, comment="Generated by pymatgen"):
"""
Args:
structure: pymatgen Structure
comment (str): comment
"""
self.structure = structure
self.comment = comment
def to_string(self):
"""
Returns: Prismatic XYZ file. This is similar to XYZ format
but has specific requirements for extra fields, headers, etc.
"""
l = self.structure.lattice
lines = [self.comment, "{} {} {}".format(l.a, l.b, l.c)]
for site in self.structure:
for sp, occu in site.species.items():
lines.append(
"{} {} {} {} {} {}".format(
sp.Z,
site.coords[0],
site.coords[1],
site.coords[2],
occu,
site.properties.get("thermal_sigma", 0),
)
)
lines.append("-1")
return "\n".join(lines)
|
{
"content_hash": "4b823f5b5225367f0fdfa17ae6c37fad",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 69,
"avg_line_length": 28.568181818181817,
"alnum_prop": 0.4701670644391408,
"repo_name": "gmatteo/pymatgen",
"id": "06d5eb7e649abb1650daafecb08ac1bd3577325f",
"size": "1367",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "pymatgen/io/prismatic.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "277"
},
{
"name": "Python",
"bytes": "7840569"
},
{
"name": "Shell",
"bytes": "711"
}
],
"symlink_target": ""
}
|
"""Tests for metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import metrics
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
import tensorflow.python.ops.data_flow_grad # pylint: disable=unused-import
import tensorflow.python.ops.nn_grad # pylint: disable=unused-import
from tensorflow.python.platform import test
NAN = float('nan')
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_2d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes].
Returns:
`SparseTensorValue` of shape [batch_size, num_classes], where num_classes
is the number of `1` values in each row of `labels`. Values are indices
of `1` values along the last dimension of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_1d_sparse_value(labels):
"""Convert dense 2D binary indicator to sparse ID.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator, shape [batch_size, num_classes]. Each
row must contain exactly 1 `1` value.
Returns:
`SparseTensorValue` of shape [batch_size]. Values are indices of `1` values
along the last dimension of `labels`.
Raises:
ValueError: if there is not exactly 1 `1` value per row of `labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
if indices != [[i] for i in range(len(labels))]:
raise ValueError('Expected 1 label/example, got %s.' % indices)
shape = [len(labels)]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
def _test_values(shape):
return np.reshape(np.cumsum(np.ones(shape)), newshape=shape)
class MeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUnweighted(self):
values = _test_values((3, 2, 4))
mean_results = (
metrics.mean(values),
metrics.mean(values, weights=1.0),
metrics.mean(values, weights=np.ones((1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 1, 1))),
metrics.mean(values, weights=np.ones((1, 1, 4))),
metrics.mean(values, weights=np.ones((1, 2, 1))),
metrics.mean(values, weights=np.ones((1, 2, 4))),
metrics.mean(values, weights=np.ones((3, 1, 1))),
metrics.mean(values, weights=np.ones((3, 1, 4))),
metrics.mean(values, weights=np.ones((3, 2, 1))),
metrics.mean(values, weights=np.ones((3, 2, 4))),
metrics.mean(values, weights=np.ones((3, 2, 4, 1))),)
expected = np.mean(values)
with self.test_session():
variables.local_variables_initializer().run()
for mean_result in mean_results:
mean, update_op = mean_result
self.assertAlmostEqual(expected, update_op.eval())
self.assertAlmostEqual(expected, mean.eval())
def _test_3d_weighted(self, values, weights):
expected = (
np.sum(np.multiply(weights, values)) /
np.sum(np.multiply(weights, np.ones_like(values)))
)
mean, update_op = metrics.mean(values, weights=weights)
with self.test_session():
variables.local_variables_initializer().run()
self.assertAlmostEqual(expected, update_op.eval(), places=5)
self.assertAlmostEqual(expected, mean.eval(), places=5)
def test1x1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5,)).reshape((1, 1, 1)))
def test1x1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3)).reshape((1, 1, 4)))
def test1xNx1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 11)).reshape((1, 2, 1)))
def test1xNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11, 3, 2, 13, 7, 5)).reshape((1, 2, 4)))
def testNx1x1Weighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((5, 7, 11)).reshape((3, 1, 1)))
def testNx1xNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3)).reshape((3, 1, 4)))
def testNxNxNWeighted(self):
self._test_3d_weighted(
_test_values((3, 2, 4)),
weights=np.asarray((
5, 7, 11, 3, 2, 12, 7, 5, 2, 17, 11, 3,
2, 17, 11, 3, 5, 7, 11, 3, 2, 12, 7, 5)).reshape((3, 2, 4)))
def testInvalidWeights(self):
values_placeholder = array_ops.placeholder(dtype=dtypes_lib.float32)
values = _test_values((3, 2, 4, 1))
invalid_weights = (
(1,),
(1, 1),
(1, 1, 1),
(3, 2),
(3, 2, 4),
(2, 4, 1),
(4, 2, 4, 1),
(3, 3, 4, 1),
(3, 2, 5, 1),
(3, 2, 4, 2),
(1, 1, 1, 1, 1))
expected_error_msg = 'weights can not be broadcast to values'
for invalid_weight in invalid_weights:
# Static shapes.
with self.assertRaisesRegexp(ValueError, expected_error_msg):
metrics.mean(values, invalid_weight)
# Dynamic shapes.
with self.assertRaisesRegexp(errors_impl.OpError, expected_error_msg):
with self.test_session():
_, update_op = metrics.mean(values_placeholder, invalid_weight)
variables.local_variables_initializer().run()
update_op.eval(feed_dict={values_placeholder: values})
class MeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class AccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.accuracy(labels, predictions, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
accuracy, update_op = metrics.accuracy(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithScalarWeight(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights=2.0)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaticShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.accuracy(labels, predictions,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.accuracy(labels, predictions, weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class PrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeightedScalar_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(labels, predictions, weights=2)
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 2.0
weighted_positives = (2.0 + 2.0) + (2.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels, predictions, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.precision(
labels,
predictions,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.precision(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class RecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect_multipleInputDtypes(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(
constant_op.constant([1, 0, 1, 0], shape=(1, 4)), dtype=dtype)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=dtype)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.recall(labels, predictions, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.recall(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class AUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.auc(predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
auc, update_op = metrics.auc(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.auc(labels, predictions, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.auc(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.auc(labels, predictions, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitely using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.auc(tf_labels,
tf_predictions,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class SpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.specificity_at_sensitivity(
labels, predictions, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class SensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=1)
sensitivity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted_multipleLabelDtypes(self):
for label_dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions_values = [
0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = math_ops.cast(labels_values, dtype=label_dtype)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.sensitivity_at_specificity(
labels, predictions, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class PrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions, thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect_multipleLabelDtypes(self):
with self.test_session() as sess:
for label_dtype in (
dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = math_ops.cast(
constant_op.constant([0, 1, 1, 0], shape=(1, 4)), dtype=label_dtype)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.precision_at_thresholds(
labels, predictions, thresholds, weights=weights)
rec, rec_op = metrics.recall_at_thresholds(
labels, predictions, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.precision_at_thresholds(labels, predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(labels, predictions,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.precision_at_thresholds(tf_labels, tf_predictions,
thresholds)
rec, rec_op = metrics.recall_at_thresholds(tf_labels, tf_predictions,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
def _test_sparse_precision_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
def _test_sparse_average_precision_at_k(predictions,
labels,
k,
expected,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.sparse_average_precision_at_k(
labels, predictions, k, weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertAlmostEqual(expected, update.eval())
test_case.assertAlmostEqual(expected, metric.eval())
class SingleLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_at_k1_nan(self):
for labels in self._labels:
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
self._predictions, labels, k=1, expected=1.0 / 2)
class MultiLabelSparsePrecisionTest(test.TestCase):
def setUp(self):
self._test_sparse_precision_at_k = functools.partial(
_test_sparse_precision_at_k, test_case=self)
self._test_sparse_average_precision_at_k = functools.partial(
_test_sparse_average_precision_at_k, test_case=self)
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sparse_labels = _binary_2d_label_to_2d_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def _test_recall_at_k(predictions,
labels,
k,
expected,
class_id=None,
weights=None,
test_case=None):
with ops.Graph().as_default() as g, test_case.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
test_case.assertRaises(errors_impl.OpError, metric.eval)
test_case.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(test_case, update.eval())
_assert_nan(test_case, metric.eval())
else:
test_case.assertEqual(expected, update.eval())
test_case.assertEqual(expected, metric.eval())
class SingleLabelRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.1, 0.3, 0.2, 0.4), (0.1, 0.2, 0.3, 0.4))
indicator_labels = ((0, 0, 0, 1), (0, 0, 1, 0))
class_labels = (3, 2)
# Sparse vs dense, and 1d vs 2d labels should all be handled the same.
self._labels = (
_binary_2d_label_to_1d_sparse_value(indicator_labels),
_binary_2d_label_to_2d_sparse_value(indicator_labels), np.array(
class_labels, dtype=np.int64), np.array(
[[class_id] for class_id in class_labels], dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k1_nan(self):
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in self._labels:
for class_id in (-1, 0, 1, 4):
self._test_recall_at_k(
self._predictions, labels, k=1, expected=NAN, class_id=class_id)
def test_at_k1_no_predictions(self):
for labels in self._labels:
# Class 2: 0 predictions.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
self._predictions, labels, k=1, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(self._predictions, labels, k=1, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = self._predictions
for labels in self._labels:
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
class MultiLabel2dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = ((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6))
indicator_labels = ((0, 0, 1, 0, 0, 0, 0, 1, 1, 0),
(0, 1, 1, 0, 0, 1, 0, 0, 0, 0))
class_labels = ((2, 7, 8), (1, 2, 5))
# Sparse vs dense labels should be handled the same.
self._labels = (_binary_2d_label_to_2d_sparse_value(indicator_labels),
np.array(
class_labels, dtype=np.int64))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_at_k5_nan(self):
for labels in self._labels:
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, labels, k=5, expected=NAN, class_id=class_id)
def test_at_k5_no_predictions(self):
for labels in self._labels:
# Class 8: 1 label, no predictions.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=8)
def test_at_k5(self):
for labels in self._labels:
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 6)
def test_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions, labels, k=5, expected=0.0 / 1, class_id=7)
# All classes: 8 labels, 3 correct.
self._test_recall_at_k(self._predictions, labels, k=5, expected=3.0 / 8)
class MultiLabel3dRecallAtKTest(test.TestCase):
def setUp(self):
self._predictions = (((0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9),
(0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6)),
((0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6),
(0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9)))
# Note: We don't test dense labels here, since examples have different
# numbers of labels.
self._labels = _binary_3d_label_to_sparse_value(((
(0, 0, 1, 0, 0, 0, 0, 1, 1, 0), (0, 1, 1, 0, 0, 1, 0, 0, 0, 0)), (
(0, 1, 1, 0, 0, 1, 0, 1, 0, 0), (0, 0, 1, 0, 0, 0, 0, 0, 1, 0))))
self._test_recall_at_k = functools.partial(
_test_recall_at_k, test_case=self)
def test_3d_nan(self):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=0.0, class_id=class_id)
def test_3d(self):
# Class 2: 4 labels, all correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=7.0 / 12)
def test_3d_ignore_all(self):
for class_id in xrange(10):
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_recall_at_k(
self._predictions, self._labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_recall_at_k(
self._predictions,
self._labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
class MeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_absolute_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_absolute_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class MeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.mean_relative_error(labels, predictions,
normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_relative_error(
labels, predictions, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class MeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.mean_squared_error(labels, predictions, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.mean_squared_error(
labels0, predictions0, name='msd0')
mse1, update_op1 = metrics.mean_squared_error(
labels1, predictions1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.mean_absolute_error(labels, predictions)
mse, ms_update_op = metrics.mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class RootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.root_mean_squared_error(labels, predictions)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.root_mean_squared_error(labels, predictions)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.root_mean_squared_error(labels, predictions,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
def _reweight(predictions, labels, weights):
return (np.concatenate([[p] * int(w) for p, w in zip(predictions, weights)]),
np.concatenate([[l] * int(w) for l, w in zip(labels, weights)]))
class MeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(labels, predictions, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.mean_cosine_distance(
labels, predictions, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.percentage_below(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.percentage_below(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(values, 100, name='high')
pcnt1, update_op1 = metrics.percentage_below(values, 7, name='medium')
pcnt2, update_op2 = metrics.percentage_below(values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.percentage_below(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.percentage_below(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.percentage_below(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class MeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.mean_iou(labels, predictions, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_mean_iou = mean_iou.eval()
for _ in range(10):
self.assertEqual(initial_mean_iou, mean_iou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
mean_iou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
variables.local_variables_initializer().run()
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, mean_iou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(labels, predictions, num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.mean_iou(
labels, predictions, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
if __name__ == '__main__':
test.main()
|
{
"content_hash": "2583bb281fc2d49f8b315da7732a3376",
"timestamp": "",
"source": "github",
"line_count": 3395,
"max_line_length": 80,
"avg_line_length": 37.9920471281296,
"alnum_prop": 0.6144608204181946,
"repo_name": "eerwitt/tensorflow",
"id": "91b3a88feb9d17725494525872f1d407b5983fa6",
"size": "129672",
"binary": false,
"copies": "11",
"ref": "refs/heads/master",
"path": "tensorflow/python/kernel_tests/metrics_test.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "2967"
},
{
"name": "C",
"bytes": "94360"
},
{
"name": "C++",
"bytes": "13836767"
},
{
"name": "CMake",
"bytes": "93933"
},
{
"name": "CSS",
"bytes": "774"
},
{
"name": "Go",
"bytes": "85550"
},
{
"name": "HTML",
"bytes": "525038"
},
{
"name": "Java",
"bytes": "56007"
},
{
"name": "JavaScript",
"bytes": "12235"
},
{
"name": "Jupyter Notebook",
"bytes": "1833475"
},
{
"name": "Makefile",
"bytes": "23468"
},
{
"name": "Objective-C",
"bytes": "7056"
},
{
"name": "Objective-C++",
"bytes": "64592"
},
{
"name": "Protocol Buffer",
"bytes": "142519"
},
{
"name": "Python",
"bytes": "13166441"
},
{
"name": "Shell",
"bytes": "262797"
},
{
"name": "TypeScript",
"bytes": "726452"
}
],
"symlink_target": ""
}
|
import pyxb.binding.generate
import pyxb.utils.domutils
from xml.dom import Node
import os.path
schema_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
'../schemas/po1.xsd'))
code = pyxb.binding.generate.GeneratePython(schema_location=schema_path)
#file('code.py', 'w').write(code)
rv = compile(code, 'test', 'exec')
eval(rv)
from pyxb.exceptions_ import *
from pyxb.utils import domutils
def ToDOM (instance, tag=None, dom_support=None):
return instance.toDOM(dom_support).documentElement
import unittest
class TestPO1 (unittest.TestCase):
street_content = '''95 Main St.
Anytown, AS 12345-6789'''
street_xml = '<street>%s</street>' % (street_content,)
street_dom = pyxb.utils.domutils.StringToDOM(street_xml).documentElement
address1_xml = '<name>Customer</name><street>95 Main St</street>'
address2_xml = '<name>Sugar Mama</name><street>24 E. Dearling Ave.</street>'
def tearDown (self):
pyxb.RequireValidWhenGenerating(True)
pyxb.RequireValidWhenParsing(True)
def testPythonElementSimpleContent (self):
elt = USAddress._ElementMap['street'].elementBinding()(self.street_content)
self.assertEqual(self.street_content, elt)
self.assertEqual(ToDOM(elt).toxml("utf-8"), self.street_xml)
def testDOMElementSimpleContent (self):
elt = USAddress._ElementMap['street'].elementBinding().createFromDOM(self.street_dom)
self.assertEqual(ToDOM(elt).toxml("utf-8"), self.street_xml)
def testPythonElementComplexContent_Element (self):
addr = USAddress(name='Customer', street='95 Main St')
self.assertEqual('95 Main St', addr.street)
addr = USAddress('Customer', '95 Main St')
self.assertEqual('95 Main St', addr.street)
addr.street = '43 West Oak'
self.assertEqual('43 West Oak', addr.street)
#self.assertEqual('<s>%s</s>' % (self.address1_xml,), ToDOM(addr, tag='s').toxml("utf-8"))
def testDOM_CTD_element (self):
# NB: USAddress is a CTD, not an element.
xml = '<shipTo>%s</shipTo>' % (self.address1_xml,)
dom = pyxb.utils.domutils.StringToDOM(xml)
addr2 = USAddress.Factory(_dom_node=dom.documentElement)
#self.assertEqual(xml, ToDOM(addr2, tag='shipTo').toxml("utf-8"))
def testPurchaseOrder (self):
po = purchaseOrder(shipTo=USAddress(name='Customer', street='95 Main St'),
billTo=USAddress(name='Sugar Mama', street='24 E. Dearling Ave'),
comment='Thanks!')
xml = ToDOM(po).toxml("utf-8")
xml1 = '<ns1:purchaseOrder xmlns:ns1="http://www.example.com/PO1"><shipTo><name>Customer</name><street>95 Main St</street></shipTo><billTo><name>Sugar Mama</name><street>24 E. Dearling Ave</street></billTo><ns1:comment>Thanks!</ns1:comment></ns1:purchaseOrder>'
self.assertEqual(xml, xml1)
dom = pyxb.utils.domutils.StringToDOM(xml)
po2 = purchaseOrder.createFromDOM(dom.documentElement)
self.assertEqual(xml1, ToDOM(po2).toxml("utf-8"))
loc = po2.shipTo._location()
self.assertTrue((not isinstance(loc, pyxb.utils.utility.Locatable_mixin)) or (58 == loc.columnNumber))
loc = po2.billTo.name._location()
self.assertTrue((not isinstance(loc, pyxb.utils.utility.Locatable_mixin)) or (131 == loc.columnNumber))
po2 = CreateFromDocument(xml)
self.assertEqual(xml1, ToDOM(po2).toxml("utf-8"))
loc = po2.shipTo._location()
self.assertTrue((not isinstance(loc, pyxb.utils.utility.Locatable_mixin)) or (58 == loc.columnNumber))
loc = po2.billTo.name._location()
self.assertTrue((not isinstance(loc, pyxb.utils.utility.Locatable_mixin)) or (131 == loc.columnNumber))
xml2 = '<purchaseOrder xmlns="http://www.example.com/PO1"><shipTo><name>Customer</name><street>95 Main St</street></shipTo><billTo><name>Sugar Mama</name><street>24 E. Dearling Ave</street></billTo><comment>Thanks!</comment></purchaseOrder>'
bds = pyxb.utils.domutils.BindingDOMSupport()
bds.setDefaultNamespace(Namespace)
self.assertEqual(xml2, ToDOM(po2, dom_support=bds).toxml("utf-8"))
def testGenerationValidation (self):
ship_to = USAddress('Robert Smith', 'General Delivery')
po = purchaseOrder(ship_to)
self.assertEqual('General Delivery', po.shipTo.street)
self.assertTrue(po.billTo is None)
self.assertTrue(pyxb.RequireValidWhenGenerating())
self.assertRaises(pyxb.DOMGenerationError, po.toxml)
try:
pyxb.RequireValidWhenGenerating(False)
self.assertFalse(pyxb.RequireValidWhenGenerating())
xmls = po.toxml("utf-8", root_only=True)
self.assertEqual('<ns1:purchaseOrder xmlns:ns1="http://www.example.com/PO1"><shipTo><street>General Delivery</street><name>Robert Smith</name></shipTo></ns1:purchaseOrder>', xmls)
finally:
pyxb.RequireValidWhenGenerating(True)
self.assertRaises(pyxb.UnrecognizedContentError, CreateFromDocument, xmls)
self.assertTrue(pyxb.RequireValidWhenParsing())
try:
pyxb.RequireValidWhenParsing(False)
self.assertFalse(pyxb.RequireValidWhenParsing())
po2 = CreateFromDocument(xmls)
finally:
pyxb.RequireValidWhenParsing(True)
self.assertEqual('General Delivery', po2.shipTo.street)
self.assertTrue(po2.billTo is None)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "919acf526bf392f33f5376056b08089f",
"timestamp": "",
"source": "github",
"line_count": 119,
"max_line_length": 269,
"avg_line_length": 46.739495798319325,
"alnum_prop": 0.6650485436893204,
"repo_name": "jonfoster/pyxb1",
"id": "bf3c9d2f0c5635eae42a36b08b6241bea6e6e94f",
"size": "5562",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/drivers/test-po1.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Python",
"bytes": "1564427"
},
{
"name": "Shell",
"bytes": "18946"
}
],
"symlink_target": ""
}
|
#
# @package pricecalculator
# @file pricecalculator.py
# @brief Python application "Price Calculator",
# an "Elephant Carpaccio Exercise",
# according to the "Story Slicing" approach
# @author Rolf Hemmerling <hemmerling@gmx.net>
# @version 1.00
# @date 2015-06-01
# @copyright Apache License, Version 2.0
#
# FizzBuzz - Python application "Price Calculator",
# an "Elephant Carpaccio Exercise",
# according to the "Story Slicing" approach
#
# Copyright 2015 Rolf Hemmerling
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Main development date: 2013-11-23
#
class PriceCalculator:
umsatzsteuer = {'de':19, 'it':22, 'se':25, 'lu':15, 'mt':18}
mengenrabatt = [ (1000, 0), (5000,3), (7000, 5), (10000, 7), (50000, 10)]
def __init__(self):
pass
def data_input(self):
print "Anzahl ="
anzahl = raw_input()
print "Preis ="
preis = raw_input()
print "Laendercode ="
land = raw_input()
return [ anzahl, preis, land ]
def data_calculation(self, data):
preis = int(data[0])*float(data[1])
rabatt = 15
for key in self.mengenrabatt:
print "key", key
if preis<key[0]:
rabatt = key[1]
break
print "rabatt =", rabatt
rabatt_preis = preis*( 1 - rabatt/100.0 )
mwst = rabatt_preis*int(self.umsatzsteuer[data[2]])/100.0
data.append(rabatt_preis+mwst)
return data
def data_output(self, data):
print data
def main(self):
data = self.data_input()
data2 = self.data_calculation(data)
self.data_output(data2)
pass
my_preis_rechner = PriceCalculator()
my_preis_rechner.main()
if __name__ == '__main__':
pass
|
{
"content_hash": "2c84d407093b143c328c57d41e23e350",
"timestamp": "",
"source": "github",
"line_count": 78,
"max_line_length": 77,
"avg_line_length": 30.96153846153846,
"alnum_prop": 0.5925465838509317,
"repo_name": "hemmerling/codingdojo",
"id": "080ca6a8a90d6c3adb23a5234b3bc102067ffee9",
"size": "2438",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "src/price_calculator/python_socramob_openspace_2013-11-23/pricecalculator.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "ABAP",
"bytes": "8159"
},
{
"name": "ASP",
"bytes": "4306"
},
{
"name": "Batchfile",
"bytes": "180"
},
{
"name": "C#",
"bytes": "48393"
},
{
"name": "Gherkin",
"bytes": "197"
},
{
"name": "HTML",
"bytes": "122607"
},
{
"name": "Haskell",
"bytes": "597"
},
{
"name": "Java",
"bytes": "77787"
},
{
"name": "JavaScript",
"bytes": "114736"
},
{
"name": "PHP",
"bytes": "2347"
},
{
"name": "PowerShell",
"bytes": "50481"
},
{
"name": "Python",
"bytes": "109650"
},
{
"name": "Ruby",
"bytes": "5256"
},
{
"name": "Swift",
"bytes": "2587"
},
{
"name": "Tcl",
"bytes": "13019"
}
],
"symlink_target": ""
}
|
from utils import perms
from discord import Embed, Color
import STATICS
description = "Stream announcing. (Only for zekro)"
DEVMODE = False
async def ex(message, client):
author = message.author
if not perms.check_if_zekro(author):
await client.send_message(author, embed=Embed(color=Color.red(), description="Sorry, this command is only available for zekro ;)"))
await client.delete_message(message)
return
with open("changelog.txt") as f:
content = ["".join(f.readlines())]
em = Embed(color=Color.gold(), title="Update %s Changelogs" % STATICS.VERSION)
if "#" in content[0]:
content = content[0].split("#")[1:]
for c in content:
em.add_field(name=c.split("\n")[0], value="\n".join(c.split("\n")[1:]), inline=False)
else:
em.description = content[0]
await client.send_message(message.channel, embed=em)
|
{
"content_hash": "77d27395fc294af0e319b14c04629e73",
"timestamp": "",
"source": "github",
"line_count": 30,
"max_line_length": 139,
"avg_line_length": 30.3,
"alnum_prop": 0.6435643564356436,
"repo_name": "zekroTJA/regiusBot",
"id": "c8b8439abae14c9946409e2f499f85a79caea3e9",
"size": "909",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "commands/cmd_update.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "76868"
},
{
"name": "Shell",
"bytes": "12"
}
],
"symlink_target": ""
}
|
from abc import ABC
class Automation(ABC):
@classmethod
def load(Class, path_or_folder):
return Class()
def run(self):
pass
def serve(self):
pass
class Batch(ABC):
def get_data(self, variable_definition):
'''
Get the data of the variable in one of the following formats:
{}
{'value': 1}
{'path': '/a/b/c.png'}
{'uri': 'upload:xyz'}
{'error': 'message'}
'''
return {}
def get_data_uri(self, variable_definition):
'Get the resolved variable data uri'
return ''
def get_data_configuration(self, variable_definition):
'Get the resolved variable configuration'
return {}
class Server(ABC):
def __init__(self, configuration, work=None, queue=None, settings=None):
pass
def serve(self):
pass
def watch(self):
pass
|
{
"content_hash": "20c727c53ddfa6a30d38581a64b74868",
"timestamp": "",
"source": "github",
"line_count": 48,
"max_line_length": 76,
"avg_line_length": 19.041666666666668,
"alnum_prop": 0.5568927789934355,
"repo_name": "crosscompute/crosscompute",
"id": "d691fc7ed827924dd9c64ad4c2aa854ef183d514",
"size": "914",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "crosscompute/routines/interface.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "227"
},
{
"name": "HTML",
"bytes": "1017"
},
{
"name": "JavaScript",
"bytes": "5264"
},
{
"name": "Jinja",
"bytes": "4487"
},
{
"name": "Python",
"bytes": "270455"
},
{
"name": "Shell",
"bytes": "67"
}
],
"symlink_target": ""
}
|
from setuptools import setup, find_packages
setup(
name='idalink',
description='An interface to the insides of IDA!',
long_description=open('README.md').read(),
version='0.12',
url='https://github.com/zardus/idalink',
license='GNU General Public License v3',
author='Zardus',
author_email='zardus@gmail.com',
maintainer='rhelmot',
maintainer_email='audrey@rhelmot.io',
packages=find_packages(),
install_requires=[
'rpyc',
],
)
|
{
"content_hash": "96daccdae60a9d22c2a6651cc3b5d8b6",
"timestamp": "",
"source": "github",
"line_count": 18,
"max_line_length": 54,
"avg_line_length": 27.11111111111111,
"alnum_prop": 0.6516393442622951,
"repo_name": "zardus/idalink",
"id": "b59af1e1d3de070dd2cf7e63b6a597a1563aa0c7",
"size": "488",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "setup.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Python",
"bytes": "22031"
}
],
"symlink_target": ""
}
|
"""
Our CPU based backend interface and tensor data structure. Our implementation
wraps :mod:`numpy` ndarray and related operations
"""
import logging
import numpy as np
from neon.backends.backend import Backend, Tensor
from neon.util.compat import range
logger = logging.getLogger(__name__)
class CPUTensor(Tensor):
"""
Our basic n-dimensional array data structure that resides in host memory,
and is meant to be manipulated on the CPU. wrapped `numpy.ndarray` tensor.
Arguments:
obj (numpy.ndarray): the actual data values. Python built-in
types like lists and tuples are also supported.
dtype (numpy.ndtype, optional): underlying data type of the elements.
If None will use float32.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor will
persist across multiple begin and end
calls. Setting to False may provide a
performance increase if values do
not need to be maintained across such
calls
See also:
CPU
Notes:
Unlike numpy, in this implementation we never collapse dimensions, and
the minimal number of dimensions will be _min_dims (currently set to 2
to match cudanet GPU implementation). So a wrapped scalar will have
dimension 1x1.
"""
_tensor = None
_min_dims = 2
def __init__(self, obj, dtype=None, persist_values=True):
if dtype is None:
dtype = 'float32'
if type(obj) != np.ndarray:
self._tensor = np.array(obj, dtype)
elif obj.dtype != dtype:
self._tensor = obj.astype(dtype)
else:
self._tensor = obj
while self._tensor.ndim < self._min_dims:
self._tensor = self._tensor.reshape(self._tensor.shape + (1, ))
self.shape = self._tensor.shape
self.dtype = dtype
self.persist_values = persist_values
@property
def raw(self):
return self._tensor
def __str__(self):
"""
Display a suitable representation of this Tensor.
Returns:
str: the representation.
"""
return str(self._tensor)
def __repr__(self):
return ("%s(%s)" % (self.__class__.__name__, str(self)))
def _clean(self, val):
"""
Replaces any CPUTensor indices with `numpy` arrays.
Arguments:
val (int, array_like, CPUTensor): the items to index by.
Returns:
int, array_like, CPUTensor: Transformed val
"""
if isinstance(val, tuple):
val = tuple(x._tensor.squeeze() if isinstance(x, self.__class__)
else x for x in val)
if isinstance(val, self.__class__):
val = val._tensor.squeeze()
return val
def asnumpyarray(self):
"""
Convert the CPUTensor to an in host memory `numpy.ndarray`. A copy of
the data may be made depending on where the CPUTensor normally resides.
Returns:
numpy.ndarray view or copy of the CPUTensor data.
"""
return self._tensor
def asbuffer(self):
"""
For the CPUTensor, the numpy ndarray itself exposes a buffer interface
Returns:
numpy.ndarray view or copy of the CPUTensor data.
"""
return self._tensor
def __getitem__(self, key):
"""
Extract a subset view of the items via slice style indexing
along each dimension. e.g. A[5:10, :]. Each slice consists of
start_idx:stop_idx:step_size triplets. If step_size isn't specified it
defaults to 1. If start_idx isn't specified it defaults to 0. If
stop_idx isn't specified it defaults to the total number of elements
along that dimension. As such a slice value of ':' allows one to
select all elements along that dimension.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
Returns:
CPUTensor: view of self corresponding to the subset items.
See Also:
take
"""
if isinstance(key, int) and len(self.shape) > 1:
# 1D index, ensure we treat as a row vector
key = slice(key, key + 1)
return self.__class__(self._tensor[self._clean(key)],
dtype=self._tensor.dtype)
def __setitem__(self, key, value):
"""
Assign the specified value to a subset of elements found via slice
style indexing along each dimension. e.g. A[5:10, :] = 4.5.
Each slice consists of start_idx:stop_idx:step_size triplets. If
step_size isn't specified it defaults to 1. If start_idx isn't
specified it defaults to 0. If stop_idx isn't specified it defaults
to the total number of elements along that dimension. As such a slice
value of ':' allows one to select all elements along that dimension.
Arguments:
key (int, slice, tuple): indices of each dimension's slice.
value (numeric array, CPUTensor): values to be assigned to the
extracted element subset. If an
array it should be the same shape
as what key indexes (or be
broadcastable as such).
"""
try:
self._tensor[self._clean(key)] = self._clean(value)
except ValueError:
# can come about due to numpy's dimension collapsing. ex. trying to
# assign a 5x1 value to a vector of length 5. Not sure there's a
# way to avoid the expensive reshape op here?
clean_key = self._clean(key)
req_shape = self._tensor[clean_key].shape
self._tensor[clean_key] = np.reshape(self._clean(value), req_shape)
def __delitem__(self, key):
raise ValueError("cannot delete array elements")
def copy_from(self, src):
self._tensor[:] = src
def transpose(self):
return self.__class__(self._tensor.transpose(),
dtype=self._tensor.dtype)
def reshape(self, shape):
return self.__class__(self._tensor.reshape(shape),
dtype=self._tensor.dtype)
def take(self, indices, axis=None):
if type(indices) == self.__class__:
indices = indices._tensor
# if indices are nx1 or 1xn, much of our code assumes these dims are
# collapsed, hence the squeeze call.
if type(indices) == np.ndarray:
indices = indices.squeeze()
return self.__class__(self._tensor.take(indices, axis),
self._tensor.dtype)
def fill(self, value):
"""
Assign specified value to each element of this CPUTensor.
Arguments:
value (numeric): The value to be assigned to each element.
Return:
CPUTensor: updated view of the data.
"""
self._tensor.fill(value)
return self
def repeat(self, repeats, axis):
return self.__class__(self._tensor.repeat(repeats, axis))
def log(self):
return self.__class__(np.log(self._tensor))
def exp(self):
return self.__class__(np.exp(self._tensor))
def sumsq(self, axis=None, dtype='float32', out=None):
res = np.sum(self._tensor * self._tensor, axis, dtype, out)
if axis is None:
return res
else:
return self.__class__(res)
class CPU(Backend):
"""
Sets up a :mod:`numpy` based backend for matrix ops. By default, we use
32-bit element data types for any arrays constructed.
Attributes:
default_dtype (dtype): default element data type. We assume 32-bit
float
See also:
CPUTensor
"""
default_dtype = 'float32'
tensor_cls = CPUTensor
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
self.err_init()
self.rng_init()
def default_dtype_if_missing(self, in_dtype):
if in_dtype is None:
in_dtype = self.default_dtype
return in_dtype
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary and populating each element with a value of 0.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.zeros(ary.shape, dtype),
dtype, persist_values)
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
"""
Instantiate a new instance of this backend's Tensor class, with the
shape taken from ary.
Arguments:
ary (tensor object): Tensor to inherit the dimensions of.
dtype (data-type, optional): If present, specifies the underlying
type to employ for each element.
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: array object
Raises:
NotImplementedError: Can't be instantiated directly.
See Also:
:py:func:`~neon.backends.backend.Backend.empty`,
:py:func:`~neon.backends.backend.Backend.ones`,
:py:func:`~neon.backends.backend.Backend.array`
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.empty(ary.shape, dtype),
dtype, persist_values)
def empty(self, shape, dtype=None, persist_values=True):
"""
Instantiate a new instance of the CPUTensor class without initializing
individual element values.
Arguments:
shape (int, list): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.empty(shape, dtype), dtype, persist_values)
def array(self, obj, dtype=None, persist_values=True):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to what is specified in obj.
Arguments:
obj (numpy.ndarray): The data structure containing element values
spread across a number of dimensions. Python
built-in types like ints and lists are
supported.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.array(obj, dtype), dtype, persist_values)
def zeros(self, shape, dtype=None, persist_values=True):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 0.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.zeros(shape, dtype), dtype, persist_values)
def ones(self, shape, dtype=None, persist_values=True):
"""
Instantiate a new instance of the CPUTensor class setting each element
value to 1.
Arguments:
shape (list of ints): The size of each dimension of the Tensor.
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
CPUTensor: newly created data structure reference
"""
dtype = self.default_dtype_if_missing(dtype)
return self.tensor_cls(np.ones(shape, dtype), dtype, persist_values)
def _unwrap(self, obj):
"""
Helper that extracts and returns the raw data underlying obj (if it is
a CPUTensor), otherwise returns the existing structure.
Arguments:
obj (numeric, CPUTensor): The object to extract raw data from
Returns:
numeric, numpy.ndarray: raw data from object.
"""
if isinstance(obj, self.tensor_cls):
return obj._tensor
else:
return obj
def copy(self, tsr):
"""
Construct and return a deep copy of the CPUTensor passed.
Arguments:
tsr (CPUTensor): the object to copy
Returns:
CPUTensor: new array object with the same values as tsr.
"""
return self.tensor_cls(np.copy(tsr._tensor))
def clip(self, a, a_min, a_max, out=None):
if out is None:
out = self.tensor_cls(np.empty_like(a._tensor))
np.clip(a._tensor, a_min, a_max, out._tensor)
return out
def err_init(self):
# support numpy.seterr settings:
# http://docs.scipy.org/doc/numpy/reference/generated/numpy.seterr.html
if ('seterr_handling' in self.__dict__ and self.seterr_handling is not
None):
logger.info("Updating numpy.seterr settings: %s",
str(self.seterr_handling))
np.seterr(**self.seterr_handling)
def rng_init(self):
seed = None
if 'rng_seed' in self.__dict__:
seed = self.rng_seed
logger.info("Seeding random number generator with: %s", str(seed))
np.random.seed(seed)
def uniform(self, low=0.0, high=1.0, size=1, dtype=None,
persist_values=True):
"""
Uniform random number sample generation.
Arguments:
low (numeric, optional): Minimal sample value that can be returned.
Defaults to 0.0
high (numeric, optional): Maximal sample value. Open ended range
so maximal value slightly less.
Defaults to 1.0
size (array_like or int, optional): Shape of generated samples
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: Of specified size filled with these random numbers.
"""
return self.tensor_cls(np.random.uniform(low, high, size), dtype,
persist_values)
def fill_uniform_thresh(self, a, keepthresh=0.5, dtype=None):
"""
Uniform random number sample generation.
Arguments:
a (dtype): CPUTensor to fill with zeros or ones based on whether
sample from uniform distribution is < keepthresh
keepthresh (numeric, optional): Minimal sample value that can be
returned. Defaults to 0.5
Returns:
Tensor: Of specified size filled with these random numbers.
"""
a._tensor[:] = np.array(
np.random.uniform(size=a._tensor.shape) < keepthresh,
dtype=a._tensor.dtype)
a._tensor[:] = a._tensor[:] / keepthresh
def make_binary_mask(self, tsr, keepthresh=0.5, dtype=None):
tsr._tensor[:] = np.array(
np.random.uniform(size=tsr._tensor.shape) < keepthresh,
dtype=tsr._tensor.dtype)
def normal(self, loc=0.0, scale=1.0, size=1, dtype=None,
persist_values=True):
"""
Gaussian/Normal random number sample generation
Arguments:
loc (numeric, optional): Where to center distribution. Defaults
to 0.0
scale (numeric, optional): Standard deviaion. Defaults to 1.0
size (array_like or int, optional): Shape of generated samples
dtype (dtype, optional): Element data type. If not specified we
use default_dtype value ('float32'
unless overridden).
persist_values (bool, optional): If set to True (the default), the
values assigned to this Tensor
will persist across multiple begin
and end calls. Setting to False
may provide a performance increase
if values do not need to be
maintained across such calls
Returns:
Tensor: Of specified size filled with these random numbers.
"""
return self.tensor_cls(np.random.normal(loc, scale, size), dtype,
persist_values)
def add(self, left, right, out):
"""
Perform element-wise addition on the operands left and right, storing
the result in the CPUTensor out. Each operand and out is assumed to
have identical shape, or be broadcastable as such.
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.add(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def subtract(self, left, right, out):
"""
Perform element-wise subtraction on the operands left and right,
storing the result in the CPUTensor out. Each operand and out is
assumed to have identical shape, or be broadcastable as such.
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.subtract(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def multiply(self, left, right, out):
"""
Perform element-wise multiplication on operands left and right,
storing the result in the CPUTensor out. Each operand and out is
assumed to have identical shape, or be broadcastable as such.
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.multiply(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def divide(self, left, right, out):
"""
Perform element-wise division on the operands left and right, storing
the resultant values in the CPUTensor out. Each operand and out is
assumed to have identical shape, or be broadcastable as such.
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.divide(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def power(self, tsr, power, out):
"""
Perform element-wise raise of tsr values to specified power,
storing the result in CPUTensor out. Both CPUTensor's should have
identical shape.
Arguments:
tsr (CPUTensor): input to be transformed.
power (CPUTensor, numeric): exponentiated value to be applied to
elements. Examples include 2 (square),
0.5 (sqaure root).
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.power(tsr._tensor, self._unwrap(power), out._tensor)
return out
def reciprocal(self, a, out):
np.divide(1.0, a._tensor, out._tensor)
return out
def dot(self, left, right, out, alpha=1, beta=0):
"""
Perform sum product between the last axis of left and the second last
axis of right, storing the result in out. Note that this dot product
is equivalent to the inner product if operands are vectors, and matrix
multiplication if both operands are matrices. We support BLAS Level 3
general matrix multiplication (GEMM) functionality by including
additional scalars alpha and beta. The general form of the multiply
is: out <- alpha * left * right + beta * out, but will be
short-circuited to: out <- alpha * left * right if beta has value 0
(the default). All CPUTensor's should have commensurate shape or be
broadcastable as such.
Arguments:
left (CPUTensor): left-hand side operand.
right (CPUTensor): right-hand side operand.
out (CPUTensor): where the result will be stored. Note that this
object should differ from left and right.
alpha (numeric, optional): scalar to multiply the resultant sum
product by. Defaults to 1.
beta (numeric, optional): scalar to pre-multiply out values by
prior to adding to sum product. Defaults
to 0, which implies no such addition of
prior out values.
Returns:
CPUTensor: reference to out
"""
if beta == 0:
np.dot(left._tensor, right._tensor, out._tensor)
else:
np.multiply(out._tensor, beta, out._tensor)
tmp = np.empty(out.shape)
np.dot(left._tensor, right._tensor, tmp)
np.multiply(tmp, alpha, tmp)
np.add(out._tensor, tmp, out._tensor)
return out
def equal(self, left, right, out):
"""
Performs element-wise equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.equal(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def not_equal(self, left, right, out):
"""
Performs element-wise non-equality testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.not_equal(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def greater(self, left, right, out):
"""
Performs element-wise greater than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.greater(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def greater_equal(self, left, right, out):
"""
Performs element-wise greater than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.greater_equal(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def less(self, left, right, out):
"""
Performs element-wise less than testing on each element of left and
right, storing the result in out. Each operand is assumed to be the
same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.less(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def less_equal(self, left, right, out):
"""
Performs element-wise less than or equal testing on each element of
left and right, storing the result in out. Each operand is assumed to
be the same shape (or broadcastable as such).
Arguments:
left (CPUTensor, numeric): left-hand side operand.
right (CPUTensor, numeric): right-hand side operand.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.less_equal(self._unwrap(left), self._unwrap(right), out._tensor)
return out
def norm(self, tsr, order=None, axis=None, out=None):
"""
Calculates and returns the vector p-norms of the CPUTensor along the
specified axis. The p-norm is defined on vector A as
:math:`||A||_p = \sum_i(|A_i|^p)^{1/p}`.
Arguments:
tsr (CPUTensor): the CPUTensor on which to find the norms
order (int): The order or p upon which the norm is calculated.
Valid values include:
None, inf, -inf, 0, 1, -1, 2, -2, ...
axis (int): The axis along which to compute vector norms.
out (CPUTensor, optional): where to write the results to. Must be
of the expected result shape. If not
specified, a new buffer is created and
returned.
Returns:
CPUTensor: p-norm of tsr along the specified axis.
Raises:
IndexError if invalid axis specified
AttributeError if invalid order specified
See Also:
`numpy.linalg.norm`
"""
if not isinstance(axis, int) or axis < 0 or axis >= len(tsr.shape):
raise IndexError("invalid axis value: %s", axis)
if not isinstance(order, (int, float)):
raise AttributeError("invalid order value: %s", order)
if out is None:
out_shape = list(tsr.shape)
out_shape[axis] = 1
out = self.empty(out_shape)
if order == float('Inf'):
np.max(np.abs(tsr._tensor), axis, out=out._tensor, keepdims=True)
elif order == float('-Inf'):
np.min(np.abs(tsr._tensor), axis, out=out._tensor, keepdims=True)
elif order == 0:
np.sum(tsr._tensor != 0, axis, out=out._tensor, keepdims=True)
else:
np.sum(np.abs(tsr._tensor) ** order, axis, out=out._tensor,
keepdims=True)
np.power(out._tensor, (1.0 / order), out._tensor)
return out
def xcov(self, a, b, out):
a0 = a._tensor - a._tensor.mean(1, keepdims=True)
b0 = b._tensor - b._tensor.mean(1, keepdims=True)
np.dot(a0, b0.T, out._tensor)
return self.divide(out, a.shape[1], out=out)
def mean_norm(self, a, axis, out):
if (axis == -1 or not axis):
out._tensor = a._tensor - a._tensor.mean()
else:
out._tensor = a._tensor - a._tensor.mean(axis, keepdims=True)
def exp(self, x, out):
np.exp(x._tensor, out=out._tensor)
return out
def log(self, x, out):
np.log(x._tensor, out=out._tensor)
return out
def logistic(self, x, out):
self.multiply(x, -1.0, out=out)
self.exp(out, out=out)
self.add(out, 1.0, out=out)
self.reciprocal(out, out=out)
return out
def tanh(self, x, out):
np.tanh(x._tensor, out=out._tensor)
return out
def rectlin(self, x, out):
# x and out are the same buffer
np.maximum(x._tensor, 0., out._tensor)
return out
def rectlin_derivative(self, x, out):
self.greater(x, 0, out=out)
return out
def rectleaky(self, x, slope, out):
self.multiply(x, slope, out=out)
np.maximum(x._tensor, out._tensor, out._tensor)
return out
def rectleaky_derivative(self, x, slope, out):
self.greater(x, 0, out=out)
self.multiply(out, (1.0 - slope), out=out)
self.add(out, slope, out=out)
return out
def sum(self, tsr, axes, out):
"""
Calculates the summation of the elements along the specified axes.
Arguments:
tsr (CPUTensor): the Tensor on which to perform the sum
axes (int, list, optional): the dimension(s) along which to sum.
If set to None, we will sum over all
dimensions.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.sum(tsr._tensor, axis=axes, out=out._tensor, keepdims=True)
return out
def mean(self, tsr, axes, out):
"""
Calculates the arithmetic mean of the elements along the specified
axes.
Arguments:
tsr (CPUTensor): the Tensor on which to compute the average
axes (int, list, optional): the dimension(s) along which to
average. If set to None, we will
average over all dimensions.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.mean(tsr._tensor, axis=axes, out=out._tensor, keepdims=True)
return out
def variance(self, tsr, axes, out, mean=None):
"""
Calculates the sample variance of the elements along the specified
axes.
Arguments:
tsr (CPUTensor): the Tensor on which to compute the variance
axes (int, list, optional): the dimension(s) along which to
variance. If set to None, we will
variance over all dimensions.
out (CPUTensor): where the result will be stored.
mean (CPUTensor, optional): The Tensor containing mean of tsr.
Value currently ignored if specified.
Returns:
CPUTensor: reference to out
"""
np.var(tsr._tensor, axis=axes, out=out._tensor, keepdims=True)
return out
def min(self, tsr, axes, out):
"""
Calculates the minimal element value along the specified axes.
Arguments:
tsr (CPUTensor): the CPUTensor on which to compute the minimum
axes (int, list, optional): the dimension(s) along which to find
the minimum. If set to None, we will
compute the overall minimal value
across all dimensions.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.amin(tsr._tensor, axis=axes, out=out._tensor, keepdims=True)
return out
def max(self, tsr, axes, out):
"""
Calculates the maximal element value along the specified axes.
Arguments:
tsr (CPUTensor): the CPUTensor on which to compute the maximum
axes (int, list, optional): the dimension(s) along which to find
the maximum. If set to None, we will
compute the overall maximal value
across all dimensions.
out (CPUTensor): where the result will be stored.
Returns:
CPUTensor: reference to out
"""
np.amax(tsr._tensor, axis=axes, out=out._tensor, keepdims=True)
return out
def argmin(self, tsr, axis, out):
"""
Calculates the indices of the minimal element value along the specified
axis. If multiple elements contain the minimum, only the elements of
the first are returned.
Arguments:
tsr (CPUTensor): The CPUTensor on which to find the minimum indices
axis (int): The dimension along which to find the minimum. If set
to None, find the overall minimum index of a flattened
representation of tsr.
out (CPUTensor): Where to store the result. Should be of the
appropriate type and expected shape
Returns:
CPUTensor: reference to out
"""
try:
tsr._tensor.argmin(axis, out._tensor)
except (ValueError, TypeError):
# numpy does not have the option to keepdims in the argmin result
# so we may be dealing with mismatched shapes that we need to
# restore in a costlier way.
out._tensor[:] = np.reshape(tsr._tensor.argmin(axis), out.shape)
return out
def argmax(self, tsr, axis, out):
"""
Calculates the indices of the maximal element value along the specified
axis. If multiple elements contain the maximum, only the elements of
the first are returned.
Arguments:
tsr (CPUTensor): The CPUTensor on which to find the maximum indices
axis (int): The dimension along which to find the maximum. If set
to None, find the overall maximum index of a flattened
representation of tsr.
out (CPUTensor): Where to store the result. Should be of the
appropriate type and expected shape
Returns:
CPUTensor: reference to out
"""
try:
tsr._tensor.argmax(axis, out._tensor)
except (ValueError, TypeError):
# numpy does not have the option to keepdims in the argmax result
# so we may be dealing with mismatched shapes that we need to
# restore in a costlier way.
out._tensor[:] = np.reshape(tsr._tensor.argmax(axis), out.shape)
return out
def fabs(self, x, out=None):
if out is not None:
res = np.fabs(x._tensor, out._tensor)
else:
res = np.fabs(x._tensor)
return self.tensor_cls(res)
def sqrt(self, x, out):
res = np.sqrt(x._tensor, out._tensor)
return self.tensor_cls(res)
def square(self, x, out):
np.multiply(x._tensor, x._tensor, out._tensor)
return out
def cube(self, x, out):
np.multiply(x._tensor, x._tensor, out._tensor)
np.multiply(out._tensor, x._tensor, out._tensor)
return out
# Not part of the API - can be moved to a utility class.
def hstack_maps(self, obj, nfm):
"""
Stack the feature maps horizontally.
"""
assert obj.shape[0] % nfm == 0
return self.tensor_cls(np.hstack(np.vsplit(obj._tensor, nfm)))
# Not part of the API - can be moved to a utility class.
def vstack_maps(self, obj, nfm):
"""
Stack the feature maps vertically.
"""
assert obj.shape[1] % nfm == 0
return self.tensor_cls(np.vstack(np.hsplit(obj._tensor, nfm)))
def softmax(self, x, out):
np.subtract(x._tensor, x._tensor.max(axis=0, keepdims=True),
out._tensor)
np.exp(out._tensor, out._tensor)
# This uses some temporary storage, but might be ok?
np.divide(out._tensor, np.sum(out._tensor, axis=0, keepdims=True),
out._tensor)
return out
def softmax_gradient(self, y, err, out):
a = np.einsum('ij,ji->i', err._tensor.T, y._tensor)
np.subtract(err._tensor, a[np.newaxis], out._tensor)
np.multiply(out._tensor, y._tensor, out._tensor)
return out
def fprop_fc(self, out, inputs, weights, layer=None):
"""
Forward propagate the inputs of a fully connected network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (CPUTensor): The weight coefficient values for this layer.
layer (Layer): The layer object.
"""
self.dot(weights, inputs, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (CPUTensor): Where to store the backward propagated errors.
weights (CPUTensor): The weight coefficient values for this layer.
deltas (CPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.dot(weights.transpose(), deltas, out)
def update_fc(self, out, inputs, deltas, layer=None):
"""
Compute the updated gradient for a fully connected network layer.
Arguments:
out (CPUTensor): Where to store the updated gradient value.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (CPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
self.dot(deltas, inputs.transpose(), out)
def update_fc_bias(self, err, out):
"""
Compute the updated bias gradient for a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the updated gradient value.
err (GPUTensor): backpropagated error
"""
self.sum(err, axes=1, out=out)
def add_fc_bias(self, inputs, bias):
"""
Add the bias for a fully connected network layer.
Arguments:
inputs (GPUTensor): the input to update.
bias (GPUTensor): the amount to increment
"""
self.add(inputs, bias, out=inputs)
def fprop_conv(self, out, inputs, weights, ofmshape, ofmsize, ofmlocs,
ifmshape, links, nifm, padding, stride, ngroups, fpropbuf,
local=False):
"""
Forward propagate the inputs of a convolutional network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (CPUTensor): The weight coefficient values for this layer.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (CPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
links (CPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fpropbuf (CPUTensor): Temporary storage buffer used to hold the
convolved outputs for a single receptive
field.
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
fsize = links.shape[1]
for dst in range(ofmsize):
# Compute the weighted average of the receptive field
# and store the result within the destination feature map.
# Do this for all filters in one shot.
rflinks = links[dst]
if local is False:
self.dot(weights.transpose(),
inputs.take(rflinks, axis=0), out=fpropbuf)
else:
self.dot(weights[(fsize*dst):(fsize*(dst+1))].transpose(),
inputs.take(rflinks, axis=0), out=fpropbuf)
out[ofmlocs[dst]] = fpropbuf
def bprop_conv(self, out, weights, deltas, ofmshape, ofmsize, ofmlocs,
ifmshape, links, padding, stride, nifm, ngroups, bpropbuf,
local=False):
"""
Backward propagate the error through a convolutional network layer.
Arguments:
out (CPUTensor): Where to store the backward propagated errors.
weights (CPUTensor): The weight coefficient values for this layer.
deltas (CPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (CPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (CPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
bpropbuf (CPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
"""
fsize = links.shape[1]
out.fill(0.0)
for dst in range(ofmsize):
rflinks = links[dst]
if local is False:
self.dot(weights,
deltas.take(ofmlocs[dst], axis=0), bpropbuf)
else:
self.dot(weights[(fsize*dst):(fsize*(dst+1))],
deltas.take(ofmlocs[dst], axis=0), out=bpropbuf)
self.add(bpropbuf, out.take(rflinks, axis=0), out=bpropbuf)
out[rflinks] = bpropbuf
def update_conv(self, out, inputs, weights, deltas, ofmshape, ofmsize,
ofmlocs, ifmshape, links, nifm, padding, stride, ngroups,
fwidth, updatebuf, local=False, layer=None):
"""
Compute the updated gradient for a convolutional network layer.
Arguments:
out (CPUTensor): Where to store the updated gradient value.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (CPUTensor): The weight coefficient values for this layer.
deltas (CPUTensor): The error values for this layer
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (CPUTensor): Indices giving the location of each element in
each output feature map stored in out.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (CPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
convolution operation.
stride (int): Number of neurons to shift the filter at each step.
ngroups (int): Number of groups.
fwidth (int): Filter width.
updatebuf (CPUTensor): Temporary storage buffer used to hold the
updated gradient for a single receptive
field
local (bool, optional): Whether to do local filtering (True) or
convolution (False, the default)
layer (Layer): The layer object.
"""
fsize = links.shape[1]
out.fill(0.0)
for dst in range(ofmsize):
# Accumulate the weight updates, going over all
# corresponding cells in the output feature maps.
rflinks = links[dst]
eslice = deltas.take(ofmlocs[dst], axis=0).transpose()
if local is False:
self.dot(inputs.take(rflinks, axis=0), eslice, out=updatebuf)
self.add(out, updatebuf.reshape(out.shape), out=out)
else:
self.dot(inputs.take(rflinks, axis=0), eslice,
out=out[(fsize*dst):(fsize*(dst+1))])
def fprop_pool(self, out, inputs, op, ofmshape, ofmsize, ofmlocs, fshape,
ifmshape, links, nifm, padding, stride, fpropbuf):
"""
Forward propagate the inputs of a Pooling network layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
number of height and width neurons).
ofmsize (int): Total size of each output feature map.
ofmlocs (CPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
links (CPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
fpropbuf (CPUTensor): Temporary storage buffer used to hold the
pooled outputs for a single receptive field.
"""
rinputs = self.hstack_maps(inputs, nifm)
for dst in range(ofmsize):
# For this output unit, get the corresponding receptive fields
# within all input feature maps.
rf = rinputs.take(links[dst], axis=0)
if op.lower() == "max":
# Save the index of the maximum value within the receptive
# fields.
ofmlocs[dst] = rf._tensor.argmax(axis=0)
# Set the pre-activations to the maximum value.
maxvals = rf[ofmlocs[dst], range(rf.shape[1])]
fpropbuf[dst] = maxvals
elif op.lower() == "avg" or op.lower() == "mean":
fpropbuf[dst] = rf._tensor.mean(axis=0)
elif op.lower() == "l2":
fpropbuf[dst] = self.norm(rf, 2, axis=0)
else:
raise AttributeError("unexpected pooling op type: %s", op)
out[:] = self.vstack_maps(fpropbuf, nifm)
def bprop_pool(self, out, fouts, inputs, deltas, op, ofmshape, ofmsize,
ofmlocs, fshape, fpsize, ifmshape, links, nifm, padding,
stride, bpropbuf):
"""
Backward propagate the error through a pooling network layer.
Arguments:
out (CPUTensor): Where to store the backward propagated errors.
fouts (CPUTensor): Forward propagated outputs from the previous
layer.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (CPUTensor): The error values for this layer
op (string): The type of pooling operation to apply. We support
"max", "avg", "l2" currently.
ofmshape (tuple): Dimensions of each output feature map (typically
height and width).
ofmsize (int): Total size of each output feature map.
ofmlocs (CPUTensor): Indices giving the location of each element in
each output feature map stored in out.
fshape (tuple): Dimensions of each filter (typically height and
width).
fpsize (int): The size of each filter.
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
links (CPUTensor): Input receptive field indices.
nifm (int): Total number of input feature maps.
padding (int): Number of additional elements to include along each
dimension of each local receptive field during the
pooling operation.
stride (int): Number of neurons to shift the filter at each step.
bpropbuf (CPUTensor): Temporary storage buffer used to hold the
backpropagated error for a single receptive
field
"""
op = op.lower()
bpropbuf.fill(0.0)
if op == "avg" or op == "mean":
self.divide(deltas, fpsize, deltas)
bprop_slice = self.empty([links.shape[1], bpropbuf.shape[1]])
elif op == "max":
col_inds = list(range(bpropbuf.shape[1]))
bprop_slice = self.empty(bpropbuf.shape[1])
elif op == "l2":
rinputs = self.hstack_maps(inputs, nifm)
rfouts = self.hstack_maps(fouts, nifm)
bprop_slice = self.empty([links.shape[1], bpropbuf.shape[1]])
rdeltas = self.hstack_maps(deltas, nifm)
for dst in range(ofmsize):
if op == "max":
rflinks = links[dst]
inds = rflinks.take(ofmlocs[dst], axis=0)
# Because we are using advanced indexing into bpropbuf, a
# copy is unavoidable, hence the additional temp buffer and
# assignment back
self.add(bpropbuf[inds, col_inds], rdeltas[dst].transpose(),
bprop_slice)
bpropbuf[inds, col_inds] = bprop_slice[:]
elif op == "avg" or op == "mean":
self.add(bpropbuf[links[dst]], rdeltas[dst].transpose(),
bprop_slice)
bpropbuf[links[dst]] = bprop_slice[:]
elif op == "l2":
inds = links[dst]
rf = rinputs.take(inds, axis=0)
denom = self.copy(rfouts[dst].transpose())
# If the L2 norm is zero, the entire receptive field must be
# zeros. In that case, we set the L2 norm to 1 before using
# it to normalize the receptive field.
denom[denom._tensor == 0] = 1
self.divide(rf, denom, out=rf)
self.multiply(rdeltas[dst].transpose(), rf, out=ofmlocs)
self.add(bpropbuf[inds], ofmlocs, bprop_slice)
bpropbuf[inds] = bprop_slice[:]
else:
raise AttributeError("unexpected pooling op type: %s", op)
out[:] = self.vstack_maps(bpropbuf, nifm)
def fprop_cmrnorm(self, out, inputs, ifmshape, nifm, ksize, alpha, beta):
"""
Forward propagate the inputs of a CrossMap response normalization layer
to produce output pre-activations (ready for transformation by an
activation function). The normalization is computed across feature
maps at each pixel point. The output will be same size as input.
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
nifm (int): Total number of input feature maps.
ksize (int): Kernel size. This defines the channel indices to sum
over.
alpha (int): scalar multiplier to multiply the normalization
denominator by.
beta (int): scalar power to raise the normalization denominator by
fpropbuf (CPUTensor): Temporary storage buffer used to hold the
normalized outputs for a single receptive
field.
"""
(H, W, N) = (ifmshape[-2], ifmshape[-1], inputs.shape[1])
rinputs = inputs._tensor.reshape((nifm, H, W, N))
rout = out._tensor.reshape((nifm, H, W, N))
for i in range(nifm):
x = rinputs[max(i-ksize/2, 0):min(i-ksize/2+ksize, nifm)]
np.square(x).sum(axis=0, out=rout[i])
self.multiply(out, alpha, out=out)
self.add(out, 1.0, out=out)
self.power(out, -beta, out=out)
self.multiply(inputs, out, out=out)
def bprop_cmrnorm(self, out, fouts, inputs, deltas, ifmshape, nifm, ksize,
alpha, beta, bpropbuf):
"""
Backward propagate the error through a CrossMap response normalization
layer.
Arguments:
out (CPUTensor): Where to store the backward propagated errors.
fouts (CPUTensor): The forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (CPUTensor): The error values for this layer
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
nifm (int): Total number of input feature maps.
ksize (int): Kernel size. This defines the channel indices to sum
over.
alpha (int): scalar multiplier to multiply the normalization
denominator by.
beta (int): scalar power to raise the normalization denominator by
bpropbuf (CPUTensor): Temporary storage buffer used to hold the
normalized outputs for a single receptive
field.
"""
(H, W, N) = (ifmshape[-2], ifmshape[-1], inputs.shape[1])
rinputs = inputs.reshape((nifm, H, W, N))
rout = out.reshape((nifm, H, W, N))
rfouts = fouts.reshape((nifm, H, W, N))
otemp = self.copy(rfouts)
# We can do this because rinputs[rfouts == 0].sum() == 0
otemp[otemp._tensor == 0] = 1.0
self.divide(rinputs, otemp, out=otemp)
itemp = self.copy(rinputs)
# We can do this because rfouts[rinputs == 0].sum() == 0
itemp[itemp._tensor == 0] = 1.0
self.divide(rfouts, itemp, out=itemp)
self.power(otemp, 1.0 / beta, out=otemp)
self.multiply(otemp, rfouts, out=otemp)
self.multiply(otemp, -2 * alpha * beta, out=otemp)
rout.fill(0.0)
for i in range(nifm):
for j in range(max(i-ksize/2, 0), min(i-ksize/2+ksize, nifm)):
self.multiply(otemp[i], rinputs[j], out=bpropbuf)
if i == j:
self.add(bpropbuf, itemp[i], out=bpropbuf)
self.add(rout[i], bpropbuf, out=rout[i])
self.multiply(deltas, out, out=out)
def fprop_lcnnorm(self, out, inputs, meandiffs, denoms, ifmshape, nifm,
ksize, alpha, beta):
"""
Forward propagate the inputs of a local contrast normalization layer
to produce output pre-activations (ready for transformation by an
activation function). The normalization is computed within feature
maps at each pixel point. The output will be same size as input.
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
meandiffs (CPUTensor): Storage buffer that keeps the difference
between the avg pools surrounding each
pixel and the pixel itself. Should not be
overwritten in between calls to fprop and
bprop.
denoms (CPUTensor): Storage buffer that keeps the denominators of
the normalization calculated during fprop.
Should not be overwritten in between calls to
fprop and bprop.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
nifm (int): Total number of input feature maps.
ksize (int): Kernel size. This defines the channel indices to sum
over.
alpha (int): scalar multiplier to multiply the normalization
denominator by.
beta (int): scalar power to raise the normalization denominator by
"""
(H, W, N) = (ifmshape[-2], ifmshape[-1], inputs.shape[1])
rinputs = inputs._tensor.reshape((nifm, H, W, N))
rmeandiff = meandiffs._tensor.reshape((nifm, H, W, N))
routputs = out._tensor.reshape((nifm, H, W, N))
for y in xrange(H):
starty = y - ksize/2
yidx = range(max(starty, 0), min(starty + ksize, H))
hh = len(yidx)
for x in xrange(W):
startx = x - ksize/2
xidx = range(max(startx, 0), min(startx + ksize, W))
ww = len(xidx)
patch = rinputs.take(xidx, axis=1).take(
yidx, axis=2).reshape((nifm, hh, ww, N))
rmeandiff[:, x, y, :] = rinputs[:, x, y, :] - patch.mean(
axis=(1, 2))
for y in xrange(H):
starty = y - ksize/2
yidx = range(max(starty, 0), min(starty + ksize, H))
hh = len(yidx)
for x in xrange(W):
startx = x - ksize/2
xidx = range(max(startx, 0), min(startx + ksize, W))
ww = len(xidx)
patch = rmeandiff.take(xidx, axis=1).take(
yidx, axis=2).reshape((nifm, hh, ww, N))
np.square(patch).sum(axis=(1, 2), out=routputs[:, x, y, :])
self.multiply(out, alpha, out=denoms)
self.add(denoms, 1, out=denoms)
self.power(denoms, -beta, out=out)
self.multiply(inputs, out, out=out)
def bprop_lcnnorm(self, out, fouts, deltas, meandiffs, denoms, ifmshape,
nifm, ksize, alpha, beta):
"""
Backward propagate the error through a local contrast normalization
layer.
Notes:
This will overwrite fouts
Arguments:
out (CPUTensor): Where to store the backward propagated errors.
fouts (CPUTensor): The forward propagated results.
deltas (CPUTensor): The error values for this layer
meandiffs (CPUTensor): Storage buffer that keeps the difference
between the avg pools surrounding each
pixel and the pixel itself. Should not be
overwritten in between calls to fprop and
bprop.
denoms (CPUTensor): Storage buffer that keeps the denominators of
the normalization calculated during fprop.
Should not be overwritten in between calls to
fprop and bprop.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
nifm (int): Total number of input feature maps.
ksize (int): Kernel size. This defines the channel indices to sum
over.
alpha (int): scalar multiplier to multiply the normalization
denominator by.
beta (int): scalar power to raise the normalization denominator by
"""
(H, W, N) = (ifmshape[-2], ifmshape[-1], fouts.shape[1])
self.multiply(fouts, -2 * alpha * beta, out=fouts)
self.multiply(fouts, deltas, out=fouts)
self.divide(fouts, denoms, out=fouts)
rfouts = fouts._tensor.reshape((nifm, H, W, N))
rdeltas = out._tensor.reshape((nifm, H, W, N))
offset = ksize/2 - ksize + 1
for y in xrange(H):
starty = y + offset
yidx = range(max(starty, 0), min(starty + ksize, H))
hh = len(yidx)
for x in xrange(W):
startx = x + offset
xidx = range(max(startx, 0), min(startx + ksize, W))
ww = len(xidx)
patch = rfouts.take(xidx, axis=1).take(
yidx, axis=2).reshape((nifm, hh, ww, N))
np.sum(patch, axis=(1, 2), out=rdeltas[:, x, y, :])
self.multiply(out, meandiffs, out=out)
self.power(denoms, -beta, out=fouts)
self.multiply(deltas, fouts, out=fouts)
self.add(out, fouts, out=out)
def fprop_cmpool(self, out, inputs, weights, ifmshape, ifmsize):
"""
Forward propagate the inputs of a CrossMap Pooling layer to
produce output pre-activations (ready for transformation by an
activation function).
Arguments:
out (CPUTensor): Where to store the forward propagated results.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
weights (CPUTensor): The weight coefficient values for this layer.
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
ifmsize (int): Total size of each input feature map.
"""
tmp = self.empty([ifmsize, out.shape[1]])
for ofmind in range(weights.shape[1]):
ofm = out[(ofmind * ifmsize):((ofmind + 1) * ifmsize)]
ofm.fill(0.0)
for ifmind in range(weights.shape[0]):
ifm = inputs[(ifmind * ifmsize):((ifmind + 1) * ifmsize)]
self.multiply(ifm, weights[ifmind, ofmind], tmp)
self.add(ofm, tmp, ofm)
def bprop_cmpool(self, out, weights, deltas, ifmshape, ifmsize):
"""
Backward propagate the error through a CrossMap pooling layer.
Arguments:
out (CPUTensor): Where to store the forward propagated results.
weights (CPUTensor): The weight coefficient values for this layer.
deltas (CPUTensor): The error values for this layer
ifmshape (tuple): Dimensions of each input feature map (typically
number of height and width neurons).
ifmsize (int): Total size of each input feature map.
"""
self.fprop_cmpool(out, deltas, weights.transpose(), ifmshape, ifmsize)
def update_cmpool(self, out, inputs, deltas, ifmshape, ifmsize, updatebuf):
"""
Compute the updated gradient for a CrossMap pooling layer.
Arguments:
out (CPUTensor): Where to store the updated gradient value.
inputs (CPUTensor): Will be either the dataset input values (first
layer), or the outputs from the previous layer.
deltas (CPUTensor): The error values for this layer
ifmshape (tuple): Dimensions of each input feature map (typically
height and width).
ifmsize (int): Total size of each input feature map.
updatebuf (CPUTensor): Temporary storage buffer used to hold the
updated gradient for a single receptive
field
"""
out.fill(0.0)
for ofmind in range(out.shape[1]):
ofmd = deltas[(ofmind * ifmsize):((ofmind + 1) * ifmsize)]
for ifmind in range(out.shape[0]):
ifm = inputs[(ifmind * ifmsize):((ifmind + 1) * ifmsize)]
ofmd = ofmd.reshape((1, ofmd.shape[0] * ofmd.shape[1]))
ifm = ifm.reshape((ifm.shape[0] * ifm.shape[1], 1))
self.dot(ofmd, ifm, updatebuf)
out[ifmind, ofmind] = updatebuf
def exp_mavg(self, mavg, newval, rho):
"""
Calculate the exponential moving average
Arguments:
mavg: The running value of the moving average
newval: New sample to be added to the moving average
rho: Interpolation value
"""
mavg._tensor[:] = rho * mavg._tensor + (1.0 - rho) * newval._tensor
def ada_update(self, ps_item, us_item, gs_item, ds_item, ls_item, ss_item,
rho, epsilon):
# Accumulate E[Grad^2]
self.multiply(gs_item, rho, out=gs_item)
self.multiply(us_item, us_item, out=ss_item)
self.multiply(ss_item, 1.0 - rho, out=ss_item)
self.add(gs_item, ss_item, out=gs_item)
# Calculate Updates
self.add(gs_item, epsilon, out=ss_item)
self.add(ds_item, epsilon, out=ls_item)
self.divide(ls_item, ss_item, out=ls_item)
self.sqrt(ls_item, out=ls_item)
self.multiply(ls_item, -1.0, out=ls_item)
self.multiply(ls_item, us_item, out=ls_item)
# Accumulate E[Delt^2]
self.multiply(ds_item, rho, out=ds_item)
self.multiply(ls_item, ls_item, out=ss_item)
self.multiply(ss_item, 1.0 - rho, out=ss_item)
self.add(ds_item, ss_item, out=ds_item)
# Final update to the params
self.add(ps_item, ls_item, out=ps_item)
def rms_update(self, params, updates, run_squares, velocity, scratch_space,
gamma, epsilon, learning_rate, momentum_coef):
# Update running squares
self.multiply(run_squares, gamma, out=run_squares)
self.multiply(updates, updates, out=scratch_space)
self.multiply(scratch_space, 1.0 - gamma, out=scratch_space)
self.add(run_squares, scratch_space, out=run_squares)
# Now scale the gradient by lr / rms(grad) (with a epsilon term for
# stability)
self.sqrt(run_squares, out=scratch_space)
self.add(scratch_space, epsilon, out=scratch_space)
self.divide(learning_rate, scratch_space, out=scratch_space)
self.multiply(scratch_space, updates, out=scratch_space)
# Now update the params
if momentum_coef == 0:
self.subtract(params, scratch_space, out=params)
else:
self.multiply(velocity, momentum_coef, out=velocity)
self.subtract(velocity, scratch_space, out=velocity)
self.add(params, velocity, out=params)
def set_weights(self, dev_weights, host_weights):
"""
copies the host_weights into dev_weights
"""
dev_weights[:] = host_weights
|
{
"content_hash": "299f0483863eb65284464f0b14c31653",
"timestamp": "",
"source": "github",
"line_count": 1717,
"max_line_length": 79,
"avg_line_length": 44.27897495631916,
"alnum_prop": 0.5516066660528497,
"repo_name": "ruguevara/neon",
"id": "0639055f90ae0d7714b47201727d1dc9bff2bf3f",
"size": "76768",
"binary": false,
"copies": "7",
"ref": "refs/heads/master",
"path": "neon/backends/cpu.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Makefile",
"bytes": "6451"
},
{
"name": "Python",
"bytes": "909126"
},
{
"name": "Shell",
"bytes": "4809"
}
],
"symlink_target": ""
}
|
from oslo_config import cfg
from oslo_log import log
from neutron.common import constants
from neutron.extensions import portbindings
from neutron.i18n import _LE, _LW
from neutron.plugins.common import constants as p_const
from neutron.plugins.ml2 import driver_api as api
from neutron.plugins.ml2.drivers.mech_sriov import exceptions as exc
LOG = log.getLogger(__name__)
FLAT_VLAN = 0
sriov_opts = [
cfg.ListOpt('supported_pci_vendor_devs',
default=['15b3:1004', '8086:10ca'],
help=_("Supported PCI vendor devices, defined by "
"vendor_id:product_id according to the PCI ID "
"Repository. Default enables support for Intel "
"and Mellanox SR-IOV capable NICs")),
cfg.BoolOpt('agent_required',
default=False,
help=_("SRIOV neutron agent is required for port binding")),
]
cfg.CONF.register_opts(sriov_opts, "ml2_sriov")
class SriovNicSwitchMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for SR-IOV capable NIC based switching.
The SriovNicSwitchMechanismDriver integrates the ml2 plugin with the
sriovNicSwitch L2 agent depending on configuration option.
Port binding with this driver may require the sriovNicSwitch agent
to be running on the port's host, and that agent to have connectivity
to at least one segment of the port's network.
L2 agent is not essential for port binding; port binding is handled by
VIF Driver via libvirt domain XML.
L2 Agent presents in order to manage port update events.
If vendor NIC does not support updates, setting agent_required = False
will allow to use Mechanism Driver without L2 agent.
"""
def __init__(self,
agent_type=constants.AGENT_TYPE_NIC_SWITCH,
vif_type=portbindings.VIF_TYPE_HW_VEB,
vif_details={portbindings.CAP_PORT_FILTER: False},
supported_vnic_types=[portbindings.VNIC_DIRECT,
portbindings.VNIC_MACVTAP],
supported_pci_vendor_info=None):
"""Initialize base class for SriovNicSwitch L2 agent type.
:param agent_type: Constant identifying agent type in agents_db
:param vif_type: Value for binding:vif_type when bound
:param vif_details: Dictionary with details for VIF driver when bound
:param supported_vnic_types: The binding:vnic_type values we can bind
:param supported_pci_vendor_info: The pci_vendor_info values to bind
"""
self.agent_type = agent_type
self.supported_vnic_types = supported_vnic_types
self.vif_type = vif_type
self.vif_details = vif_details
self.supported_network_types = (p_const.TYPE_VLAN, p_const.TYPE_FLAT)
def initialize(self):
try:
self.pci_vendor_info = self._parse_pci_vendor_config(
cfg.CONF.ml2_sriov.supported_pci_vendor_devs)
self.agent_required = cfg.CONF.ml2_sriov.agent_required
except ValueError:
LOG.exception(_LE("Failed to parse supported PCI vendor devices"))
raise cfg.Error(_("Parsing supported pci_vendor_devs failed"))
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
vnic_type = context.current.get(portbindings.VNIC_TYPE,
portbindings.VNIC_NORMAL)
if vnic_type not in self.supported_vnic_types:
LOG.debug("Refusing to bind due to unsupported vnic_type: %s",
vnic_type)
return
if not self._check_supported_pci_vendor_device(context):
LOG.debug("Refusing to bind due to unsupported pci_vendor device")
return
if self.agent_required:
for agent in context.host_agents(self.agent_type):
LOG.debug("Checking agent: %s", agent)
if agent['alive']:
if self.try_to_bind(context, agent):
return
else:
LOG.warning(_LW("Attempting to bind with dead agent: %s"),
agent)
else:
self.try_to_bind(context)
def try_to_bind(self, context, agent=None):
for segment in context.segments_to_bind:
if self.check_segment(segment, agent):
context.set_binding(segment[api.ID],
self.vif_type,
self._get_vif_details(segment),
constants.PORT_STATUS_ACTIVE)
LOG.debug("Bound using segment: %s", segment)
return True
return False
def check_segment(self, segment, agent=None):
"""Check if segment can be bound.
:param segment: segment dictionary describing segment to bind
:param agent: agents_db entry describing agent to bind or None
:returns: True if segment can be bound for agent
"""
network_type = segment[api.NETWORK_TYPE]
if network_type in self.supported_network_types:
if agent:
mappings = agent['configurations'].get('device_mappings', {})
LOG.debug("Checking segment: %(segment)s "
"for mappings: %(mappings)s ",
{'segment': segment, 'mappings': mappings})
return segment[api.PHYSICAL_NETWORK] in mappings
return True
return False
def _check_supported_pci_vendor_device(self, context):
if self.pci_vendor_info:
profile = context.current.get(portbindings.PROFILE, {})
if not profile:
LOG.debug("Missing profile in port binding")
return False
pci_vendor_info = profile.get('pci_vendor_info')
if not pci_vendor_info:
LOG.debug("Missing pci vendor info in profile")
return False
if pci_vendor_info not in self.pci_vendor_info:
LOG.debug("Unsupported pci_vendor %s", pci_vendor_info)
return False
return True
return False
def _get_vif_details(self, segment):
network_type = segment[api.NETWORK_TYPE]
if network_type == p_const.TYPE_FLAT:
vlan_id = FLAT_VLAN
elif network_type == p_const.TYPE_VLAN:
vlan_id = segment[api.SEGMENTATION_ID]
else:
raise exc.SriovUnsupportedNetworkType(net_type=network_type)
vif_details = self.vif_details.copy()
vif_details[portbindings.VIF_DETAILS_VLAN] = str(vlan_id)
return vif_details
def _parse_pci_vendor_config(self, pci_vendor_list):
parsed_list = []
for elem in pci_vendor_list:
elem = elem.strip()
if not elem:
continue
split_result = elem.split(':')
if len(split_result) != 2:
raise ValueError(_("Invalid pci_vendor_info: '%s'") % elem)
vendor_id = split_result[0].strip()
if not vendor_id:
raise ValueError(_("Missing vendor_id in: '%s'") % elem)
product_id = split_result[1].strip()
if not product_id:
raise ValueError(_("Missing product_id in: '%s'") % elem)
parsed_list.append(elem)
return parsed_list
|
{
"content_hash": "aca839531b37b6e58cd0abd69051946c",
"timestamp": "",
"source": "github",
"line_count": 178,
"max_line_length": 78,
"avg_line_length": 42.8876404494382,
"alnum_prop": 0.590123133350799,
"repo_name": "kongseokhwan/kulcloud-iitp-neutron",
"id": "d70edf22fd1cb7d09c99a3bdf0a12deffaf7b7b5",
"size": "8227",
"binary": false,
"copies": "13",
"ref": "refs/heads/master",
"path": "neutron/plugins/ml2/drivers/mech_sriov/mech_driver.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Mako",
"bytes": "980"
},
{
"name": "Python",
"bytes": "7271508"
},
{
"name": "Shell",
"bytes": "12819"
}
],
"symlink_target": ""
}
|
from ....tests.helper import catch_warnings
from .. import converters
from .. import exceptions
from .. import tree
def test_reraise():
def fail():
raise RuntimeError("This failed")
try:
try:
fail()
except RuntimeError as e:
exceptions.vo_reraise(e, additional="From here")
except RuntimeError as e:
assert "From here" in str(e)
else:
assert False
def test_parse_vowarning():
config = {'pedantic': True,
'filename': 'foo.xml'}
pos = (42, 64)
with catch_warnings(exceptions.W47) as w:
field = tree.Field(
None, name='c', datatype='char',
config=config, pos=pos)
c = converters.get_converter(field, config=config, pos=pos)
parts = exceptions.parse_vowarning(str(w[0].message))
match = {
'number' : 47,
'is_exception' : False,
'nchar' : 64,
'warning' : 'W47',
'is_something' : True,
'message' : 'Missing arraysize indicates length 1',
'doc_url' : 'io/votable/api_exceptions.html#w47',
'nline' : 42,
'is_warning' : True
}
assert parts == match
|
{
"content_hash": "841e617c411b304388030454a21bc57b",
"timestamp": "",
"source": "github",
"line_count": 46,
"max_line_length": 67,
"avg_line_length": 26.58695652173913,
"alnum_prop": 0.5437448896156991,
"repo_name": "piotroxp/scibibscan",
"id": "93f44495be0516475c7bbc506910cdb41c9204ef",
"size": "1296",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "scib/lib/python3.5/site-packages/astropy/io/votable/tests/exception_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "C",
"bytes": "568253"
},
{
"name": "C++",
"bytes": "8204"
},
{
"name": "CSS",
"bytes": "10578"
},
{
"name": "Fortran",
"bytes": "3707"
},
{
"name": "HTML",
"bytes": "1172"
},
{
"name": "Python",
"bytes": "13727486"
},
{
"name": "Shell",
"bytes": "4887"
},
{
"name": "TeX",
"bytes": "678"
}
],
"symlink_target": ""
}
|
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'User'
db.create_table(u'july_user', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('password', self.gf('django.db.models.fields.CharField')(max_length=128)),
('last_login', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('username', self.gf('django.db.models.fields.CharField')(unique=True, max_length=30)),
('first_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('last_name', self.gf('django.db.models.fields.CharField')(max_length=30, blank=True)),
('email', self.gf('django.db.models.fields.EmailField')(max_length=75, blank=True)),
('is_staff', self.gf('django.db.models.fields.BooleanField')(default=False)),
('is_active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('is_superuser', self.gf('django.db.models.fields.BooleanField')(default=False)),
('date_joined', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
('location', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='location_members', null=True, to=orm['people.Location'])),
('team', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='team_members', null=True, to=orm['people.Team'])),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('picture_url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
))
db.send_create_signal(u'july', ['User'])
# Adding M2M table for field groups on 'User'
db.create_table(u'july_user_groups', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'july.user'], null=False)),
('group', models.ForeignKey(orm[u'auth.group'], null=False))
))
db.create_unique(u'july_user_groups', ['user_id', 'group_id'])
# Adding M2M table for field user_permissions on 'User'
db.create_table(u'july_user_user_permissions', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'july.user'], null=False)),
('permission', models.ForeignKey(orm[u'auth.permission'], null=False))
))
db.create_unique(u'july_user_user_permissions', ['user_id', 'permission_id'])
# Adding M2M table for field projects on 'User'
db.create_table(u'july_user_projects', (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('user', models.ForeignKey(orm[u'july.user'], null=False)),
('project', models.ForeignKey(orm[u'people.project'], null=False))
))
db.create_unique(u'july_user_projects', ['user_id', 'project_id'])
def backwards(self, orm):
# Deleting model 'User'
db.delete_table(u'july_user')
# Removing M2M table for field groups on 'User'
db.delete_table('july_user_groups')
# Removing M2M table for field user_permissions on 'User'
db.delete_table('july_user_user_permissions')
# Removing M2M table for field projects on 'User'
db.delete_table('july_user_projects')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'july.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'location': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'location_members'", 'null': 'True', 'to': u"orm['people.Location']"}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'picture_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['people.Project']", 'null': 'True', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'team_members'", 'null': 'True', 'to': u"orm['people.Team']"}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'people.location': {
'Meta': {'object_name': 'Location'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.project': {
'Meta': {'object_name': 'Project'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'forked': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'forks': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'parent_url': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'watchers': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
u'people.team': {
'Meta': {'object_name': 'Team'},
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'primary_key': 'True'}),
'total': ('django.db.models.fields.IntegerField', [], {'default': '0'})
}
}
complete_apps = ['july']
|
{
"content_hash": "9df41d366f4ccf2d44a9cba8a3eb5abb",
"timestamp": "",
"source": "github",
"line_count": 140,
"max_line_length": 187,
"avg_line_length": 67.90714285714286,
"alnum_prop": 0.5746292205743136,
"repo_name": "ChimeraCoder/GOctober",
"id": "32aa5878d51ef265c6c4b09da8da9647be0d398c",
"size": "9531",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "july/migrations/0001_initial.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "198259"
},
{
"name": "JavaScript",
"bytes": "1155570"
},
{
"name": "Python",
"bytes": "238468"
},
{
"name": "Shell",
"bytes": "507"
}
],
"symlink_target": ""
}
|
"""Command line for Django."""
import os
import sys
from django.core.management import execute_from_command_line
def main():
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"{package}.settings".format(package=__package__))
execute_from_command_line(sys.argv)
if __name__ == "__main__":
main()
|
{
"content_hash": "c10c68587e0fcc88658275bdb93b466c",
"timestamp": "",
"source": "github",
"line_count": 15,
"max_line_length": 75,
"avg_line_length": 22.4,
"alnum_prop": 0.6339285714285714,
"repo_name": "novapost/django-ticketoffice",
"id": "3da68c86e577be2ed30ed0d72026bb8011be73aa",
"size": "382",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "demo/demoproject/manage.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "67301"
},
{
"name": "Shell",
"bytes": "5101"
}
],
"symlink_target": ""
}
|
print unichr(42)
print unichr(43)
try:
print unichr("foo")
except TypeError, E:
print "Could not convert string:", E
print unichr(42).__class__ == unicode
print type(unichr(42)), unicode
|
{
"content_hash": "98baa2666ef42711b924b1b83b9a92d4",
"timestamp": "",
"source": "github",
"line_count": 8,
"max_line_length": 40,
"avg_line_length": 24.375,
"alnum_prop": 0.6923076923076923,
"repo_name": "buchuki/pyjaco",
"id": "4dce2f7b85f5231171c4139966884e349f73bd13",
"size": "195",
"binary": false,
"copies": "1",
"ref": "refs/heads/devel",
"path": "tests/builtin/unichr.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "136685"
},
{
"name": "Python",
"bytes": "187115"
},
{
"name": "Shell",
"bytes": "879"
}
],
"symlink_target": ""
}
|
AUTOMERGER_NAME = 'Chromium+Blink automerger'
AUTOMERGER_EMAIL = 'chrome-blink-automerger@chromium.org'
BLINK_REPO_URL = 'https://chromium.googlesource.com/chromium/blink.git'
CHROMIUM_REPO_URL = 'https://chromium.googlesource.com/chromium/src.git'
BRANCHES_TO_MERGE = [
# Chromium ref, Blink ref, append_commit_position
('refs/heads/master', 'refs/heads/master', True),
('refs/pending/heads/master', 'refs/heads/master', False),
('refs/branch-heads/2454', 'refs/branch-heads/chromium/2454', True),
('refs/pending/branch-heads/2454', 'refs/branch-heads/chromium/2454', False),
('refs/branch-heads/2490', 'refs/branch-heads/chromium/2490', True),
('refs/pending/branch-heads/2490', 'refs/branch-heads/chromium/2490', False),
]
MERGE_MSG = """Merge Chromium + Blink git repositories
Blink SHA1: %(blink_sha)s
Blink revision: %(blink_branch)s@%(blink_rev)s
BUG=431458
"""
|
{
"content_hash": "458b8d5a1c8d1050dc7cd161fbe43f07",
"timestamp": "",
"source": "github",
"line_count": 25,
"max_line_length": 96,
"avg_line_length": 40.36,
"alnum_prop": 0.6392467789890981,
"repo_name": "primiano/chrome-blink-automerger",
"id": "574ffd719758da3c32902f81c45fd36f0fd319b1",
"size": "1173",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "history_rewrite_scripts/config.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "30073"
}
],
"symlink_target": ""
}
|
"""Helper functions for running commands as subprocesses."""
import logging
import pathlib
import subprocess
from typing import Optional, Sequence, Union
def run_command(
command: Sequence[str],
*, # Ensures that the rest of the args are passed explicitly.
cwd: Optional[Union[str, pathlib.Path]] = None,
cmd_input: Optional[str] = None,
exitcode_only: bool = False) -> str:
"""Runs a command and returns the output as a string.
For example, run_command(['echo', ' Hello, world!\n']) returns the string
'Hello, world!' and run_command(['pwd'], cwd='/usr/local/bin') returns the
string '/usr/local/bin'.
Args:
command:
A sequence of strings representing the command to run, starting with the
executable name followed by the arguments.
cwd:
The working directory in which to run the command; if not specified,
defaults to the current working directory.
cmd_input:
Input text that should be automatically passed to the process's stdin.
exitcode_only:
Avoid re-raising any errors or logging any output for errors. Useful for
commands that are expected to return a non-zero exit status.
Returns:
If |exitcode_only| is True, then only the exitcode is returned.
Otherwise, output (stdout) of the command as a string. Leading and trailing
whitespace are stripped from the output.
Raises
CalledProcessError:
The command returned a non-zero exit code indicating failure; the error
code is printed along with the error message (stderr) and output (stdout),
if any.
FileNotFoundError: The executable specified in the command was not found.
"""
try:
run_result: subprocess.CompletedProcess = subprocess.run(
command,
capture_output=True,
text=True,
check=not exitcode_only,
cwd=cwd,
input=cmd_input)
except subprocess.CalledProcessError as e:
command_str = ' '.join(command)
error_msg = f'Command "{command_str}" failed with code {e.returncode}.'
if e.stderr:
error_msg += f'\nSTDERR: {e.stderr}'
if e.stdout:
error_msg += f'\nSTDOUT: {e.stdout}'
logging.error(error_msg)
raise
if exitcode_only:
return run_result.returncode
return str(run_result.stdout).strip()
|
{
"content_hash": "87b7fe7b09015cff554d9be7147994de",
"timestamp": "",
"source": "github",
"line_count": 68,
"max_line_length": 80,
"avg_line_length": 35.161764705882355,
"alnum_prop": 0.6587202007528231,
"repo_name": "chromium/chromium",
"id": "8efc59f978347aa32fd9f375ae281f7a86a75ec8",
"size": "2550",
"binary": false,
"copies": "8",
"ref": "refs/heads/main",
"path": "tools/android/python_utils/subprocess_utils.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
import re
from django.utils.encoding import force_unicode
from django.utils.functional import allow_lazy
from django.utils.translation import ugettext_lazy, ugettext as _
from htmlentitydefs import name2codepoint
# Capitalizes the first letter of a string.
capfirst = lambda x: x and force_unicode(x)[0].upper() + force_unicode(x)[1:]
capfirst = allow_lazy(capfirst, unicode)
def wrap(text, width):
"""
A word-wrap function that preserves existing line breaks and most spaces in
the text. Expects that existing line breaks are posix newlines.
"""
text = force_unicode(text)
def _generator():
it = iter(text.split(' '))
word = it.next()
yield word
pos = len(word) - word.rfind('\n') - 1
for word in it:
if "\n" in word:
lines = word.split('\n')
else:
lines = (word,)
pos += len(lines[0]) + 1
if pos > width:
yield '\n'
pos = len(lines[-1])
else:
yield ' '
if len(lines) > 1:
pos = len(lines[-1])
yield word
return u''.join(_generator())
wrap = allow_lazy(wrap, unicode)
def truncate_words(s, num, end_text='...'):
"""Truncates a string after a certain number of words. Takes an optional
argument of what should be used to notify that the string has been
truncated, defaulting to ellipsis (...)
Newlines in the string will be stripped.
"""
s = force_unicode(s)
length = int(num)
words = s.split()
if len(words) > length:
words = words[:length]
if not words[-1].endswith(end_text):
words.append(end_text)
return u' '.join(words)
truncate_words = allow_lazy(truncate_words, unicode)
def truncate_html_words(s, num, end_text='...'):
"""Truncates HTML to a certain number of words (not counting tags and
comments). Closes opened tags if they were correctly closed in the given
html. Takes an optional argument of what should be used to notify that the
string has been truncated, defaulting to ellipsis (...).
Newlines in the HTML are preserved.
"""
s = force_unicode(s)
length = int(num)
if length <= 0:
return u''
html4_singlets = ('br', 'col', 'link', 'base', 'img', 'param', 'area', 'hr', 'input')
# Set up regular expressions
re_words = re.compile(r'&.*?;|<.*?>|(\w[\w-]*)', re.U)
re_tag = re.compile(r'<(/)?([^ ]+?)(?: (/)| .*?)?>')
# Count non-HTML words and keep note of open tags
pos = 0
end_text_pos = 0
words = 0
open_tags = []
while words <= length:
m = re_words.search(s, pos)
if not m:
# Checked through whole string
break
pos = m.end(0)
if m.group(1):
# It's an actual non-HTML word
words += 1
if words == length:
end_text_pos = pos
continue
# Check for tag
tag = re_tag.match(m.group(0))
if not tag or end_text_pos:
# Don't worry about non tags or tags after our truncate point
continue
closing_tag, tagname, self_closing = tag.groups()
tagname = tagname.lower() # Element names are always case-insensitive
if self_closing or tagname in html4_singlets:
pass
elif closing_tag:
# Check for match in open tags list
try:
i = open_tags.index(tagname)
except ValueError:
pass
else:
# SGML: An end tag closes, back to the matching start tag, all unclosed intervening start tags with omitted end tags
open_tags = open_tags[i+1:]
else:
# Add it to the start of the open tags list
open_tags.insert(0, tagname)
if words <= length:
# Don't try to close tags if we don't need to truncate
return s
out = s[:end_text_pos]
if end_text:
out += ' ' + end_text
# Close any tags still open
for tag in open_tags:
out += '</%s>' % tag
# Return string
return out
truncate_html_words = allow_lazy(truncate_html_words, unicode)
def get_valid_filename(s):
"""
Returns the given string converted to a string that can be used for a clean
filename. Specifically, leading and trailing spaces are removed; other
spaces are converted to underscores; and anything that is not a unicode
alphanumeric, dash, underscore, or dot, is removed.
>>> get_valid_filename("john's portrait in 2004.jpg")
u'johns_portrait_in_2004.jpg'
"""
s = force_unicode(s).strip().replace(' ', '_')
return re.sub(r'(?u)[^-\w.]', '', s)
get_valid_filename = allow_lazy(get_valid_filename, unicode)
def get_text_list(list_, last_word=ugettext_lazy(u'or')):
"""
>>> get_text_list(['a', 'b', 'c', 'd'])
u'a, b, c or d'
>>> get_text_list(['a', 'b', 'c'], 'and')
u'a, b and c'
>>> get_text_list(['a', 'b'], 'and')
u'a and b'
>>> get_text_list(['a'])
u'a'
>>> get_text_list([])
u''
"""
if len(list_) == 0: return u''
if len(list_) == 1: return force_unicode(list_[0])
return u'%s %s %s' % (
# Translators: This string is used as a separator between list elements
_(', ').join([force_unicode(i) for i in list_][:-1]),
force_unicode(last_word), force_unicode(list_[-1]))
get_text_list = allow_lazy(get_text_list, unicode)
def normalize_newlines(text):
return force_unicode(re.sub(r'\r\n|\r|\n', '\n', text))
normalize_newlines = allow_lazy(normalize_newlines, unicode)
def recapitalize(text):
"Recapitalizes text, placing caps after end-of-sentence punctuation."
text = force_unicode(text).lower()
capsRE = re.compile(r'(?:^|(?<=[\.\?\!] ))([a-z])')
text = capsRE.sub(lambda x: x.group(1).upper(), text)
return text
recapitalize = allow_lazy(recapitalize)
def phone2numeric(phone):
"Converts a phone number with letters into its numeric equivalent."
letters = re.compile(r'[A-Z]', re.I)
char2number = lambda m: {'a': '2', 'b': '2', 'c': '2', 'd': '3', 'e': '3',
'f': '3', 'g': '4', 'h': '4', 'i': '4', 'j': '5', 'k': '5', 'l': '5',
'm': '6', 'n': '6', 'o': '6', 'p': '7', 'q': '7', 'r': '7', 's': '7',
't': '8', 'u': '8', 'v': '8', 'w': '9', 'x': '9', 'y': '9', 'z': '9',
}.get(m.group(0).lower())
return letters.sub(char2number, phone)
phone2numeric = allow_lazy(phone2numeric)
# From http://www.xhaus.com/alan/python/httpcomp.html#gzip
# Used with permission.
def compress_string(s):
import cStringIO, gzip
zbuf = cStringIO.StringIO()
zfile = gzip.GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)
zfile.write(s)
zfile.close()
return zbuf.getvalue()
ustring_re = re.compile(u"([\u0080-\uffff])")
def javascript_quote(s, quote_double_quotes=False):
def fix(match):
return r"\u%04x" % ord(match.group(1))
if type(s) == str:
s = s.decode('utf-8')
elif type(s) != unicode:
raise TypeError(s)
s = s.replace('\\', '\\\\')
s = s.replace('\r', '\\r')
s = s.replace('\n', '\\n')
s = s.replace('\t', '\\t')
s = s.replace("'", "\\'")
if quote_double_quotes:
s = s.replace('"', '"')
return str(ustring_re.sub(fix, s))
javascript_quote = allow_lazy(javascript_quote, unicode)
# Expression to match some_token and some_token="with spaces" (and similarly
# for single-quoted strings).
smart_split_re = re.compile(r"""
((?:
[^\s'"]*
(?:
(?:"(?:[^"\\]|\\.)*" | '(?:[^'\\]|\\.)*')
[^\s'"]*
)+
) | \S+)
""", re.VERBOSE)
def smart_split(text):
r"""
Generator that splits a string by spaces, leaving quoted phrases together.
Supports both single and double quotes, and supports escaping quotes with
backslashes. In the output, strings will keep their initial and trailing
quote marks and escaped quotes will remain escaped (the results can then
be further processed with unescape_string_literal()).
>>> list(smart_split(r'This is "a person\'s" test.'))
[u'This', u'is', u'"a person\\\'s"', u'test.']
>>> list(smart_split(r"Another 'person\'s' test."))
[u'Another', u"'person\\'s'", u'test.']
>>> list(smart_split(r'A "\"funky\" style" test.'))
[u'A', u'"\\"funky\\" style"', u'test.']
"""
text = force_unicode(text)
for bit in smart_split_re.finditer(text):
yield bit.group(0)
smart_split = allow_lazy(smart_split, unicode)
def _replace_entity(match):
text = match.group(1)
if text[0] == u'#':
text = text[1:]
try:
if text[0] in u'xX':
c = int(text[1:], 16)
else:
c = int(text)
return unichr(c)
except ValueError:
return match.group(0)
else:
try:
return unichr(name2codepoint[text])
except (ValueError, KeyError):
return match.group(0)
_entity_re = re.compile(r"&(#?[xX]?(?:[0-9a-fA-F]+|\w{1,8}));")
def unescape_entities(text):
return _entity_re.sub(_replace_entity, text)
unescape_entities = allow_lazy(unescape_entities, unicode)
def unescape_string_literal(s):
r"""
Convert quoted string literals to unquoted strings with escaped quotes and
backslashes unquoted::
>>> unescape_string_literal('"abc"')
'abc'
>>> unescape_string_literal("'abc'")
'abc'
>>> unescape_string_literal('"a \"bc\""')
'a "bc"'
>>> unescape_string_literal("'\'ab\' c'")
"'ab' c"
"""
if s[0] not in "\"'" or s[-1] != s[0]:
raise ValueError("Not a string literal: %r" % s)
quote = s[0]
return s[1:-1].replace(r'\%s' % quote, quote).replace(r'\\', '\\')
unescape_string_literal = allow_lazy(unescape_string_literal)
|
{
"content_hash": "a0a8a2bd507698aa98a095b40a05c003",
"timestamp": "",
"source": "github",
"line_count": 285,
"max_line_length": 132,
"avg_line_length": 35.89824561403509,
"alnum_prop": 0.5488222070178869,
"repo_name": "rimbalinux/LMD3",
"id": "5315653bc3c690e5e099b6cf413c8a8cd98e8025",
"size": "10231",
"binary": false,
"copies": "3",
"ref": "refs/heads/master",
"path": "django/utils/text.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "187820"
},
{
"name": "Python",
"bytes": "7470548"
}
],
"symlink_target": ""
}
|
from django.conf.urls.defaults import *
from diogenis.students.views import *
urlpatterns = patterns('',
url(r'^(?P<username>\w{0,50})/$', display_labs, name='students.index'),
url(r'^(?P<username>\w{0,50})/settings/$', settings, name='students.settings'),
url(r'^(?P<username>\w{0,50})/has_laptop/$', has_laptop, name='students.has_laptop'),
url(r'^add-new-lab/$', add_new_lab),
)
|
{
"content_hash": "cce6bc6e1dea390220f60223a3f6c085",
"timestamp": "",
"source": "github",
"line_count": 11,
"max_line_length": 89,
"avg_line_length": 36.45454545454545,
"alnum_prop": 0.6433915211970075,
"repo_name": "gtsiokos/diogenis",
"id": "a1fff81d160fa5d0f93c59fc7d8e7fc0df1ff072",
"size": "401",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "students/urls.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "JavaScript",
"bytes": "445116"
},
{
"name": "Python",
"bytes": "119388"
}
],
"symlink_target": ""
}
|
import pytest
from common import SCHEMAS_PATH, list_schema_paths, list_latest_schema_paths
@pytest.mark.parametrize("path", list_schema_paths(SCHEMAS_PATH / "unit"))
def test_unit(path, assert_schema_correct):
assert_schema_correct(path)
@pytest.mark.parametrize("path", list_latest_schema_paths(SCHEMAS_PATH / "unit"))
def test_unit_latest(path, assert_latest_schema_correct):
assert_latest_schema_correct(path)
|
{
"content_hash": "8d339b3c8429d62bfa864238466c6562",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 81,
"avg_line_length": 32.76923076923077,
"alnum_prop": 0.7582159624413145,
"repo_name": "spacetelescope/asdf-standard",
"id": "a0be15fe5d533137da0a1e90276665d533418288",
"size": "426",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tests/test_unit.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "24751"
}
],
"symlink_target": ""
}
|
"""Inplace operations.
"""
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.util import deprecation
def _inplace_helper(x, i, v, op):
"""Applies an inplace op on (x, i, v).
op is one of gen_array_ops.alias_inplace_update,
gen_array_ops.alias_inplace_add, or gen_array_ops.alias_inplace_sub.
If i is None, x and v must be the same shape. Computes
x op v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] op v;
Otherwise, x and v must have the same rank. Computes
x[i, :] op v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
op: alias_inplace_update, alias_inplace_add, or alias_inplace_sub.
Returns:
Returns x.
"""
x = ops.convert_to_tensor(x)
v = ops.convert_to_tensor(v, x.dtype)
if i is None:
# Full tensor.
return array_ops.reshape(
op(array_ops.reshape(x, [1, -1]), [0], array_ops.reshape(v, [1, -1])),
array_ops.shape(x))
i = math_ops.cast(i, dtypes.int32)
if i.get_shape().ndims == 0:
# Single 0-dim update.
return op(x, array_ops.reshape(i, [1]), array_ops.expand_dims(v, 0))
return op(x, i, v)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_update, which offers the same functionality '
'with well-defined read-write semantics.'))
def alias_inplace_update(x, i, v):
"""Applies an inplace update on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x = v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] = v;
Otherwise, x and v must have the same rank. Computes
x[i, :] = v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_update)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_add, which offers the same functionality '
'with well-defined read-write semantics.'))
def alias_inplace_add(x, i, v):
"""Applies an inplace add on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x += v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] += v;
Otherwise, x and v must have the same rank. Computes
x[i, :] += v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_add)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_sub, which offers the same functionality '
'with well-defined read-write semantics.'))
def alias_inplace_sub(x, i, v):
"""Applies an inplace sub on input x at index i with value v. Aliases x.
If i is None, x and v must be the same shape. Computes
x -= v;
If i is a scalar, x has a rank 1 higher than v's. Computes
x[i, :] -= v;
Otherwise, x and v must have the same rank. Computes
x[i, :] -= v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns x.
"""
return _inplace_helper(x, i, v, gen_array_ops.inplace_sub)
def empty_like(x, init=None):
"""Returns a non-initialized tensor with the same shape and dtype as x.
Args:
x: A Tensor.
init: Initialize the returned tensor with the default value of
x.dtype(), if True. Otherwise, do not initialize. Defaults to
None.
Returns:
A tensor y, whose dtype and shape are the same as those of x.
y is guaranteed not to be an alias of x. Upon return, y may contain
arbitrary data.
"""
x = ops.convert_to_tensor(x)
return gen_array_ops.empty(array_ops.shape(x), x.dtype, init=init)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_update, which offers the same functionality '
'with well-defined read-write semantics.'))
def inplace_update(x, i, v):
"""Applies an inplace update on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y = v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] = v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] = v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_update(gen_array_ops.deep_copy(x), i, v)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_add, which offers the same functionality '
'with well-defined read-write semantics.'))
def inplace_add(x, i, v):
"""Applies an inplace add on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y += v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] += v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] += v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_add(gen_array_ops.deep_copy(x), i, v)
@deprecation.deprecated(
None,
('Prefer tf.tensor_scatter_nd_sub, which offers the same functionality '
'with well-defined read-write semantics.'))
def inplace_sub(x, i, v):
"""Applies an inplace sub on input x at index i with value v.
Note that this function is not actually inplace - it allocates
a copy of x. The utility is not avoiding memory copies but rather
specifying a sparse update.
If i is None, x and v must be the same shape. Computes
y = x; y -= v;
If i is a scalar, x has a rank 1 higher than v's. Computes
y = x; y[i, :] -= v;
Otherwise, x and v must have the same rank. Computes
y = x; y[i, :] -= v;
Args:
x: A Tensor.
i: None, a scalar or a vector.
v: A Tensor.
Returns:
Returns y, which is guaranteed not to be an alias of x.
"""
return alias_inplace_sub(gen_array_ops.deep_copy(x), i, v)
empty = gen_array_ops.empty
|
{
"content_hash": "d543a2976a2afb6013ed28fb36f95a19",
"timestamp": "",
"source": "github",
"line_count": 234,
"max_line_length": 79,
"avg_line_length": 28.085470085470085,
"alnum_prop": 0.651095556908095,
"repo_name": "Intel-tensorflow/tensorflow",
"id": "aeaeae2ca9033458dba90bfc764311b49d4ca04c",
"size": "7262",
"binary": false,
"copies": "12",
"ref": "refs/heads/master",
"path": "tensorflow/python/ops/inplace_ops.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Batchfile",
"bytes": "36962"
},
{
"name": "C",
"bytes": "1400913"
},
{
"name": "C#",
"bytes": "13584"
},
{
"name": "C++",
"bytes": "126099634"
},
{
"name": "CMake",
"bytes": "182430"
},
{
"name": "Cython",
"bytes": "5003"
},
{
"name": "Dockerfile",
"bytes": "416133"
},
{
"name": "Go",
"bytes": "2129888"
},
{
"name": "HTML",
"bytes": "4686483"
},
{
"name": "Java",
"bytes": "1074438"
},
{
"name": "Jupyter Notebook",
"bytes": "792906"
},
{
"name": "LLVM",
"bytes": "6536"
},
{
"name": "MLIR",
"bytes": "11447433"
},
{
"name": "Makefile",
"bytes": "2760"
},
{
"name": "Objective-C",
"bytes": "172666"
},
{
"name": "Objective-C++",
"bytes": "300213"
},
{
"name": "Pawn",
"bytes": "5552"
},
{
"name": "Perl",
"bytes": "7536"
},
{
"name": "Python",
"bytes": "42782002"
},
{
"name": "Roff",
"bytes": "5034"
},
{
"name": "Ruby",
"bytes": "9199"
},
{
"name": "Shell",
"bytes": "621917"
},
{
"name": "Smarty",
"bytes": "89538"
},
{
"name": "SourcePawn",
"bytes": "14625"
},
{
"name": "Starlark",
"bytes": "7738020"
},
{
"name": "Swift",
"bytes": "78435"
},
{
"name": "Vim Snippet",
"bytes": "58"
}
],
"symlink_target": ""
}
|
from __future__ import print_function
from unittest import TestCase
import numpy as np
from nose import SkipTest
from nose.tools import assert_raises
from numpy.testing import assert_array_equal, assert_array_almost_equal
from sklearn.datasets.samples_generator import make_spd_matrix
from sklearn import mixture
from sklearn.utils import check_random_state
from hmmlearn import hmm
from hmmlearn.utils import normalize
np.seterr(all='warn')
def fit_hmm_and_monitor_log_likelihood(h, X, lengths=None, n_iter=1):
h.n_iter = 1 # make sure we do a single iteration at a time
h.init_params = '' # and don't re-init params
loglikelihoods = np.empty(n_iter, dtype=float)
for i in range(n_iter):
h.fit(X, lengths=lengths)
loglikelihoods[i] = h.score(X, lengths=lengths)
return loglikelihoods
class GaussianHMMTestMixin(object):
covariance_type = None # set by subclasses
def setUp(self):
self.prng = prng = np.random.RandomState(10)
self.n_components = n_components = 3
self.n_features = n_features = 3
self.startprob = prng.rand(n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = prng.rand(n_components, n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, n_components))
self.means = prng.randint(-20, 20, (n_components, n_features))
self.covars = {
'spherical': (1.0 + 2 * np.dot(prng.rand(n_components, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)),
'diag': (1.0 + 2 * prng.rand(n_components, n_features)) ** 2,
'full': np.array([make_spd_matrix(n_features, random_state=0)
+ np.eye(n_features)
for x in range(n_components)]),
}
self.expanded_covars = {
'spherical': [np.eye(n_features) * cov
for cov in self.covars['spherical']],
'diag': [np.diag(cov) for cov in self.covars['diag']],
'tied': [self.covars['tied']] * n_components,
'full': self.covars['full'],
}
def test_bad_covariance_type(self):
with assert_raises(ValueError):
h = hmm.GaussianHMM(20, covariance_type='badcovariance_type')
h.means_ = self.means
h.covars_ = []
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h._check()
def test_score_samples_and_decode(self):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.means_ = self.means
h.covars_ = self.covars[self.covariance_type]
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * h.means_
gaussidx = np.repeat(np.arange(self.n_components), 5)
n_samples = len(gaussidx)
X = self.prng.randn(n_samples, self.n_features) + h.means_[gaussidx]
h._init(X, params="st")
ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
viterbi_ll, stateseq = h.decode(X)
assert_array_equal(stateseq, gaussidx)
def test_sample(self, n=1000):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
h.means_ = 20 * self.means
h.covars_ = np.maximum(self.covars[self.covariance_type], 0.1)
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmc', n_iter=5, **kwargs):
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.means_ = 20 * self.means
h.covars_ = self.covars[self.covariance_type]
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
message = ("Decreasing log-likelihood for {0} covariance: {1}"
.format(self.covariance_type, diff))
self.assertTrue(np.all(diff >= -1e-6), message)
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
def test_fit_with_length_one_signal(self):
lengths = [10, 8, 1]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
# This shouldn't raise
# ValueError: zero-size array to reduction operation maximum which
# has no identity
h.fit(X, lengths=lengths)
def test_fit_with_priors(self, params='stmc', n_iter=5):
startprob_prior = 10 * self.startprob + 2.0
transmat_prior = 10 * self.transmat + 2.0
means_prior = self.means
means_weight = 2.0
covars_weight = 2.0
if self.covariance_type in ('full', 'tied'):
covars_weight += self.n_features
covars_prior = self.covars[self.covariance_type]
h = hmm.GaussianHMM(self.n_components, self.covariance_type)
h.startprob_ = self.startprob
h.startprob_prior = startprob_prior
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.transmat_prior = transmat_prior
h.means_ = 20 * self.means
h.means_prior = means_prior
h.means_weight = means_weight
h.covars_ = self.covars[self.covariance_type]
h.covars_prior = covars_prior
h.covars_weight = covars_weight
lengths = [100] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Re-initialize the parameters and check that we can converge to the
# original parameter values.
h_learn = hmm.GaussianHMM(self.n_components, self.covariance_type,
params=params)
h_learn.n_iter = 0
h_learn.fit(X, lengths=lengths)
fit_hmm_and_monitor_log_likelihood(
h_learn, X, lengths=lengths, n_iter=n_iter)
# Make sure we've converged to the right parameters.
# a) means
self.assertTrue(np.allclose(sorted(h.means_.tolist()),
sorted(h_learn.means_.tolist()),
0.01))
# b) covars are hard to estimate precisely from a relatively small
# sample, thus the large threshold
self.assertTrue(np.allclose(sorted(h._covars_.tolist()),
sorted(h_learn._covars_.tolist()),
10))
def test_fit_non_ergodic_transmat(self):
h = hmm.GaussianHMM(n_components=5, covariance_type='full',
n_iter=100, init_params='st')
h.startprob_ = np.array([1, 0, 0, 0, 0])
h.transmat_ = np.array([[0.9, 0.1, 0, 0, 0],
[0, 0.9, 0.1, 0, 0],
[0, 0, 0.9, 0.1, 0],
[0, 0, 0, 0.9, 0.1],
[0, 0, 0, 0, 1.0]])
h.means_ = np.zeros((5, 10))
h.covars_ = np.tile(np.identity(10), (5, 1, 1))
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
h.fit(X, lengths=lengths)
# TODO: write the actual test
class TestGaussianHMMWithSphericalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'spherical'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
class TestGaussianHMMWithDiagonalCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'diag'
class TestGaussianHMMWithTiedCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGaussianHMMWithFullCovars(GaussianHMMTestMixin, TestCase):
covariance_type = 'full'
class MultinomialHMMTestCase(TestCase):
"""Using examples from Wikipedia
- http://en.wikipedia.org/wiki/Hidden_Markov_model
- http://en.wikipedia.org/wiki/Viterbi_algorithm
"""
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 2 # ('Rainy', 'Sunny')
self.n_features = 3 # ('walk', 'shop', 'clean')
self.emissionprob = np.array([[0.1, 0.4, 0.5], [0.6, 0.3, 0.1]])
self.startprob = np.array([0.6, 0.4])
self.transmat = np.array([[0.7, 0.3], [0.4, 0.6]])
self.h = hmm.MultinomialHMM(self.n_components)
self.h.startprob_ = self.startprob
self.h.transmat_ = self.transmat
self.h.emissionprob_ = self.emissionprob
def test_set_emissionprob(self):
h = hmm.MultinomialHMM(self.n_components)
emissionprob = np.array([[0.8, 0.2, 0.0], [0.7, 0.2, 1.0]])
h.emissionprob = emissionprob
assert np.allclose(emissionprob, h.emissionprob)
def test_wikipedia_viterbi_example(self):
# From http://en.wikipedia.org/wiki/Viterbi_algorithm:
# "This reveals that the observations ['walk', 'shop', 'clean']
# were most likely generated by states ['Sunny', 'Rainy',
# 'Rainy'], with probability 0.01344."
X = [[0], [1], [2]]
logprob, state_sequence = self.h.decode(X)
self.assertAlmostEqual(np.exp(logprob), 0.01344)
assert_array_equal(state_sequence, [1, 0, 0])
def test_decode_map_algorithm(self):
X = [[0], [1], [2]]
h = hmm.MultinomialHMM(self.n_components, algorithm="map")
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
_logprob, state_sequence = h.decode(X)
assert_array_equal(state_sequence, [1, 0, 0])
def test_predict(self):
X = [[0], [1], [2]]
state_sequence = self.h.predict(X)
posteriors = self.h.predict_proba(X)
assert_array_equal(state_sequence, [1, 0, 0])
assert_array_almost_equal(posteriors, [
[0.23170303, 0.76829697],
[0.62406281, 0.37593719],
[0.86397706, 0.13602294],
])
def test_attributes(self):
h = hmm.MultinomialHMM(self.n_components)
self.assertEqual(h.n_components, self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.emissionprob_ = self.emissionprob
assert_array_almost_equal(h.emissionprob_, self.emissionprob)
with assert_raises(ValueError):
h.emissionprob_ = []
h._check()
with assert_raises(ValueError):
h.emissionprob_ = np.zeros((self.n_components - 2,
self.n_features))
h._check()
def test_score_samples(self):
idx = np.repeat(np.arange(self.n_components), 10)
n_samples = len(idx)
X = np.atleast_2d(
(self.prng.rand(n_samples) * self.n_features).astype(int)).T
ll, posteriors = self.h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
def test_sample(self, n=1000):
X, state_sequence = self.h.sample(n, random_state=self.prng)
self.assertEqual(X.ndim, 2)
self.assertEqual(len(X), n)
self.assertEqual(len(state_sequence), n)
self.assertEqual(len(np.unique(X)), self.n_features)
def test_fit(self, params='ste', n_iter=5, **kwargs):
h = self.h
h.params = params
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.startprob_ = normalize(self.prng.rand(self.n_components))
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.emissionprob_ = normalize(
self.prng.rand(self.n_components, self.n_features), axis=1)
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
# Check that the log-likelihood is always increasing during training.
diff = np.diff(trainll)
self.assertTrue(np.all(diff >= -1e-6),
"Decreasing log-likelihood: {0}" .format(diff))
def test_fit_emissionprob(self):
self.test_fit('e')
def test_fit_with_init(self, params='ste', n_iter=5, verbose=False,
**kwargs):
h = self.h
learner = hmm.MultinomialHMM(self.n_components)
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# use init_function to initialize paramerters
learner._init(X, lengths=lengths, params=params)
trainll = fit_hmm_and_monitor_log_likelihood(learner, X, n_iter=n_iter)
# Check that the loglik is always increasing during training
if not np.all(np.diff(trainll) > 0) and verbose:
print()
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
self.assertTrue(np.all(np.diff(trainll) > -1.e-3))
def test__check_input_symbols(self):
self.assertTrue(self.h._check_input_symbols([[0, 0, 2, 1, 3, 1, 1]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, 3, 5, 10]]))
self.assertFalse(self.h._check_input_symbols([[0]]))
self.assertFalse(self.h._check_input_symbols([[0., 2., 1., 3.]]))
self.assertFalse(self.h._check_input_symbols([[0, 0, -2, 1, 3, 1, 1]]))
def create_random_gmm(n_mix, n_features, covariance_type, prng=0):
prng = check_random_state(prng)
g = mixture.GMM(n_mix, covariance_type=covariance_type)
g.means_ = prng.randint(-20, 20, (n_mix, n_features))
mincv = 0.1
g.covars_ = {
'spherical': (mincv + mincv * np.dot(prng.rand(n_mix, 1),
np.ones((1, n_features)))) ** 2,
'tied': (make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features)),
'diag': (mincv + mincv * prng.rand(n_mix, n_features)) ** 2,
'full': np.array(
[make_spd_matrix(n_features, random_state=prng)
+ mincv * np.eye(n_features) for x in range(n_mix)])
}[covariance_type]
g.weights_ = normalize(prng.rand(n_mix))
return g
class GMMHMMTestMixin(object):
def setUp(self):
self.prng = np.random.RandomState(9)
self.n_components = 3
self.n_mix = 2
self.n_features = 2
self.covariance_type = 'diag'
self.startprob = self.prng.rand(self.n_components)
self.startprob = self.startprob / self.startprob.sum()
self.transmat = self.prng.rand(self.n_components, self.n_components)
self.transmat /= np.tile(self.transmat.sum(axis=1)[:, np.newaxis],
(1, self.n_components))
self.gmms = []
for state in range(self.n_components):
self.gmms.append(create_random_gmm(
self.n_mix, self.n_features, self.covariance_type,
prng=self.prng))
def test_score_samples_and_decode(self):
h = hmm.GMMHMM(self.n_components)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
# Make sure the means are far apart so posteriors.argmax()
# picks the actual component used to generate the observations.
for g in h.gmms_:
g.means_ *= 20
refstateseq = np.repeat(np.arange(self.n_components), 5)
n_samples = len(refstateseq)
X = [h.gmms_[x].sample(1, random_state=self.prng).flatten()
for x in refstateseq]
_ll, posteriors = h.score_samples(X)
self.assertEqual(posteriors.shape, (n_samples, self.n_components))
assert_array_almost_equal(posteriors.sum(axis=1), np.ones(n_samples))
_logprob, stateseq = h.decode(X)
assert_array_equal(stateseq, refstateseq)
def test_sample(self, n=1000):
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
h.startprob_ = self.startprob
h.transmat_ = self.transmat
h.gmms_ = self.gmms
X, state_sequence = h.sample(n, random_state=self.prng)
self.assertEqual(X.shape, (n, self.n_features))
self.assertEqual(len(state_sequence), n)
def test_fit(self, params='stmwc', n_iter=5, verbose=False, **kwargs):
h = hmm.GMMHMM(self.n_components, covars_prior=1.0)
h.startprob_ = self.startprob
h.transmat_ = normalize(
self.transmat + np.diag(self.prng.rand(self.n_components)), 1)
h.gmms_ = self.gmms
lengths = [10] * 10
X, _state_sequence = h.sample(sum(lengths), random_state=self.prng)
# Mess up the parameters and see if we can re-learn them.
h.n_iter = 0
h.fit(X, lengths=lengths)
h.transmat_ = normalize(self.prng.rand(self.n_components,
self.n_components), axis=1)
h.startprob_ = normalize(self.prng.rand(self.n_components))
trainll = fit_hmm_and_monitor_log_likelihood(
h, X, lengths=lengths, n_iter=n_iter)
if not np.all(np.diff(trainll) > 0) and verbose:
print('Test train: (%s)\n %s\n %s' % (params, trainll,
np.diff(trainll)))
# XXX: this test appears to check that training log likelihood should
# never be decreasing (up to a tolerance of 0.5, why?) but this is not
# the case when the seed changes.
raise SkipTest("Unstable test: trainll is not always increasing "
"depending on seed")
self.assertTrue(np.all(np.diff(trainll) > -0.5))
def test_fit_works_on_sequences_of_different_length(self):
lengths = [3, 4, 5]
X = self.prng.rand(sum(lengths), self.n_features)
h = hmm.GMMHMM(self.n_components, covariance_type=self.covariance_type)
# This shouldn't raise
# ValueError: setting an array element with a sequence.
h.fit(X, lengths=lengths)
class TestGMMHMMWithDiagCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'diag'
def test_fit_startprob_and_transmat(self):
self.test_fit('st')
def test_fit_means(self):
self.test_fit('m')
class TestGMMHMMWithTiedCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'tied'
class TestGMMHMMWithFullCovars(GMMHMMTestMixin, TestCase):
covariance_type = 'full'
|
{
"content_hash": "911d0f7ade4dc47caa560f684f93a1b6",
"timestamp": "",
"source": "github",
"line_count": 501,
"max_line_length": 79,
"avg_line_length": 39.72055888223553,
"alnum_prop": 0.5890954773869347,
"repo_name": "dhuppenkothen/hmmlearn",
"id": "6170f8a9670647a568a341dad64cb6e986916dff",
"size": "19900",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "hmmlearn/tests/test_hmm.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Batchfile",
"bytes": "1786"
},
{
"name": "Makefile",
"bytes": "1056"
},
{
"name": "PowerShell",
"bytes": "3310"
},
{
"name": "Python",
"bytes": "85912"
},
{
"name": "Shell",
"bytes": "1700"
}
],
"symlink_target": ""
}
|
DEPS = [
'env',
'flavor',
'recipe_engine/context',
'recipe_engine/file',
'recipe_engine/path',
'recipe_engine/platform',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/raw_io',
'recipe_engine/step',
'run',
'vars',
]
def upload_dm_results(buildername):
skip_upload_bots = [
'ASAN',
'Coverage',
'MSAN',
'TSAN',
'Valgrind',
]
for s in skip_upload_bots:
if s in buildername:
return False
return True
def dm_flags(api, bot):
args = []
configs = []
blacklisted = []
def blacklist(quad):
config, src, options, name = (
quad.split(' ') if isinstance(quad, str) else quad)
if (config == '_' or
config in configs or
(config[0] == '~' and config[1:] in configs)):
blacklisted.extend([config, src, options, name])
# We've been spending lots of time writing out and especially uploading
# .pdfs, but not doing anything further with them. skia:6821
args.extend(['--dont_write', 'pdf'])
# This enables non-deterministic random seeding of the GPU FP optimization
# test.
# Not Android due to:
# - https://skia.googlesource.com/skia/+/
# 5910ed347a638ded8cd4c06dbfda086695df1112/BUILD.gn#160
# - https://skia.googlesource.com/skia/+/
# ce06e261e68848ae21cac1052abc16bc07b961bf/tests/ProcessorTest.cpp#307
# Not MSAN due to:
# - https://skia.googlesource.com/skia/+/
# 0ac06e47269a40c177747310a613d213c95d1d6d/infra/bots/recipe_modules/
# flavor/gn_flavor.py#80
if 'Android' not in bot and 'MSAN' not in bot:
args.append('--randomProcessorTest')
if 'Pixel3' in bot and 'Vulkan' in bot:
args.extend(['--dontReduceOpsTaskSplitting'])
thread_limit = None
MAIN_THREAD_ONLY = 0
# 32-bit desktop bots tend to run out of memory, because they have relatively
# far more cores than RAM (e.g. 32 cores, 3G RAM). Hold them back a bit.
if '-x86-' in bot:
thread_limit = 4
# These bots run out of memory easily.
if 'Chromecast' in bot or 'MotoG4' in bot or 'Nexus7' in bot:
thread_limit = MAIN_THREAD_ONLY
# Avoid issues with dynamically exceeding resource cache limits.
if 'Test' in bot and 'DISCARDABLE' in bot:
thread_limit = MAIN_THREAD_ONLY
if thread_limit is not None:
args.extend(['--threads', str(thread_limit)])
# Android's kernel will occasionally attempt to kill our process, using
# SIGINT, in an effort to free up resources. If requested, that signal
# is ignored and dm will keep attempting to proceed until we actually
# exhaust the available resources.
if 'Chromecast' in bot:
args.append('--ignoreSigInt')
if 'SwiftShader' in api.vars.extra_tokens:
configs.extend(['gles', 'glesdft'])
args.append('--disableDriverCorrectnessWorkarounds')
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'CPU':
args.append('--nogpu')
configs.append('8888')
if 'BonusConfigs' in bot:
configs = [
'pdf',
'g8', '565',
'pic-8888', 'serialize-8888',
'f16', 'srgb', 'esrgb', 'narrow', 'enarrow',
'p3', 'ep3', 'rec2020', 'erec2020']
elif api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
args.append('--nocpu')
# Add in either gles or gl configs to the canonical set based on OS
sample_count = '8'
gl_prefix = 'gl'
if 'Android' in bot or 'iOS' in bot:
sample_count = '4'
# We want to test the OpenGL config not the GLES config on the Shield
if 'NVIDIA_Shield' not in bot:
gl_prefix = 'gles'
# MSAA is disabled on Pixel3a (https://b.corp.google.com/issues/143074513).
if ('Pixel3a' in bot):
sample_count = ''
elif 'Intel' in bot:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
sample_count = ''
elif 'ChromeOS' in bot:
gl_prefix = 'gles'
if 'NativeFonts' in bot:
configs.append(gl_prefix)
else:
configs.extend([gl_prefix,
gl_prefix + 'dft',
gl_prefix + 'srgb'])
if sample_count:
configs.append(gl_prefix + 'msaa' + sample_count)
# The Tegra3 doesn't support MSAA
if ('Tegra3' in bot or
# We aren't interested in fixing msaa bugs on current iOS devices.
'iPad4' in bot or
'iPadPro' in bot or
'iPhone6' in bot or
'iPhone7' in bot or
# skia:5792
'IntelHD530' in bot or
'IntelIris540' in bot):
configs = [x for x in configs if 'msaa' not in x]
# We want to test both the OpenGL config and the GLES config on Linux Intel:
# GL is used by Chrome, GLES is used by ChromeOS.
# Also do the Ganesh threading verification test (render with and without
# worker threads, using only the SW path renderer, and compare the results).
if 'Intel' in bot and api.vars.is_linux:
configs.extend(['gles',
'glesdft',
'glessrgb',
'gltestthreading'])
# skbug.com/6333, skbug.com/6419, skbug.com/6702
blacklist('gltestthreading gm _ lcdblendmodes')
blacklist('gltestthreading gm _ lcdoverlap')
blacklist('gltestthreading gm _ textbloblooper')
# All of these GMs are flaky, too:
blacklist('gltestthreading gm _ bleed_alpha_bmp')
blacklist('gltestthreading gm _ bleed_alpha_bmp_shader')
blacklist('gltestthreading gm _ bleed_alpha_image')
blacklist('gltestthreading gm _ bleed_alpha_image_shader')
blacklist('gltestthreading gm _ savelayer_with_backdrop')
blacklist('gltestthreading gm _ persp_shaders_bw')
blacklist('gltestthreading gm _ dftext_blob_persp')
blacklist('gltestthreading gm _ dftext')
# skbug.com/7523 - Flaky on various GPUs
blacklist('gltestthreading gm _ orientation')
# These GMs only differ in the low bits
blacklist('gltestthreading gm _ stroketext')
blacklist('gltestthreading gm _ draw_image_set')
# CommandBuffer bot *only* runs the command_buffer config.
if 'CommandBuffer' in bot:
configs = ['commandbuffer']
# ANGLE bot *only* runs the angle configs
if 'ANGLE' in bot:
configs = ['angle_d3d11_es2',
'angle_d3d9_es2',
'angle_gl_es2',
'angle_d3d11_es3']
if sample_count:
configs.append('angle_d3d11_es2_msaa' + sample_count)
configs.append('angle_d3d11_es3_msaa' + sample_count)
if 'LenovoYogaC630' in bot:
# LenovoYogaC630 only supports D3D11, and to save time, we only test ES3
configs = ['angle_d3d11_es3',
'angle_d3d11_es3_msaa' + sample_count]
if 'GTX' in bot or 'Quadro' in bot:
# See skia:7823 and chromium:693090.
configs.append('angle_gl_es3')
if sample_count:
configs.append('angle_gl_es2_msaa' + sample_count)
configs.append('angle_gl_es3_msaa' + sample_count)
if 'NUC5i7RYH' in bot:
# skbug.com/7376
blacklist('_ test _ ProcessorCloneTest')
if 'AndroidOne' in bot or ('Nexus' in bot and 'Nexus5x' not in bot) or 'GalaxyS6' in bot:
# skbug.com/9019
blacklist('_ test _ ProcessorCloneTest')
blacklist('_ test _ Programs')
blacklist('_ test _ ProcessorOptimizationValidationTest')
if 'CommandBuffer' in bot and 'MacBook10.1-' in bot:
# skbug.com/9235
blacklist('_ test _ Programs')
# skbug.com/9033 - these devices run out of memory on this test
# when opList splitting reduction is enabled
if 'GPU' in bot and ('Nexus7' in bot or
'NVIDIA_Shield' in bot or
'Nexus5x' in bot or
('Win10' in bot and 'GTX660' in bot and 'Vulkan' in bot) or
'Chorizo' in bot):
blacklist(['_', 'gm', '_', 'savelayer_clipmask'])
# skbug.com/9123
if 'CommandBuffer' in bot and 'IntelIris5100' in bot:
blacklist(['_', 'test', '_', 'AsyncReadPixels'])
# skbug.com/9043 - these devices render this test incorrectly
# when opList splitting reduction is enabled
if 'GPU' in bot and 'Vulkan' in bot and ('RadeonR9M470X' in bot or
'RadeonHD7770' in bot):
blacklist(['_', 'tests', '_', 'VkDrawableImportTest'])
if 'Vulkan' in bot:
configs = ['vk']
if 'Android' in bot:
configs.append('vkmsaa4')
else:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926, skia:9023
if 'Intel' not in bot:
configs.append('vkmsaa8')
if 'Metal' in bot:
configs = ['mtl']
if 'iOS' in bot:
configs.append('mtlmsaa4')
else:
# MSAA doesn't work well on Intel GPUs chromium:527565, chromium:983926
if 'Intel' not in bot:
configs.append('mtlmsaa8')
# Test 1010102 on our Linux/NVIDIA bots and the persistent cache config
# on the GL bots.
if ('QuadroP400' in bot and 'PreAbandonGpuContext' not in bot and
'TSAN' not in bot and api.vars.is_linux):
if 'Vulkan' in bot:
configs.append('vk1010102')
# Decoding transparent images to 1010102 just looks bad
blacklist('vk1010102 image _ _')
else:
configs.extend(['gl1010102',
'gltestpersistentcache',
'gltestglslcache',
'gltestprecompile'])
# Decoding transparent images to 1010102 just looks bad
blacklist('gl1010102 image _ _')
# These tests produce slightly different pixels run to run on NV.
blacklist('gltestpersistentcache gm _ atlastext')
blacklist('gltestpersistentcache gm _ dftext')
blacklist('gltestpersistentcache gm _ glyph_pos_h_b')
blacklist('gltestglslcache gm _ atlastext')
blacklist('gltestglslcache gm _ dftext')
blacklist('gltestglslcache gm _ glyph_pos_h_b')
blacklist('gltestprecompile gm _ atlastext')
blacklist('gltestprecompile gm _ dftext')
blacklist('gltestprecompile gm _ glyph_pos_h_b')
# Tessellation shaders do not yet participate in the persistent cache.
blacklist('gltestpersistentcache gm _ tessellation')
blacklist('gltestglslcache gm _ tessellation')
blacklist('gltestprecompile gm _ tessellation')
# We also test the SkSL precompile config on Pixel2XL as a representative
# Android device - this feature is primarily used by Flutter.
if 'Pixel2XL' in bot and 'Vulkan' not in bot:
configs.append('glestestprecompile')
# Test rendering to wrapped dsts on a few bots
# Also test 'glenarrow', which hits F16 surfaces and F16 vertex colors.
if 'BonusConfigs' in api.vars.extra_tokens:
configs = ['glbetex', 'glbert', 'glenarrow']
if 'ChromeOS' in bot:
# Just run GLES for now - maybe add gles_msaa4 in the future
configs = ['gles']
if 'Chromecast' in bot:
configs = ['gles']
# Test coverage counting path renderer.
if 'CCPR' in bot:
configs = [c for c in configs if c == 'gl' or c == 'gles']
args.extend(['--pr', 'ccpr', '--cc', 'true', '--cachePathMasks', 'false'])
# Test GPU tessellation path renderer.
if 'GpuTess' in bot:
configs = [gl_prefix + 'msaa4']
args.extend(['--pr', 'gtess'])
# Test non-nvpr on NVIDIA.
if 'NonNVPR' in bot:
configs = ['gl', 'glmsaa4']
args.extend(['--pr', '~nvpr'])
# DDL is a GPU-only feature
if 'DDL1' in bot:
# This bot generates gl and vk comparison images for the large skps
configs = [c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
args.extend(['--skpViewportSize', "2048"])
args.extend(['--pr', '~small'])
if 'DDL3' in bot:
# This bot generates the ddl-gl and ddl-vk images for the
# large skps and the gms
ddl_configs = ['ddl-' + c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
ddl2_configs = ['ddl2-' + c for c in configs if c == 'gl' or c == 'vk' or c == 'mtl']
configs = ddl_configs + ddl2_configs
args.extend(['--skpViewportSize', "2048"])
args.extend(['--gpuThreads', "0"])
tf = api.vars.builder_cfg.get('test_filter')
if 'All' != tf:
# Expected format: shard_XX_YY
parts = tf.split('_')
if len(parts) == 3:
args.extend(['--shard', parts[1]])
args.extend(['--shards', parts[2]])
else: # pragma: nocover
raise Exception('Invalid task name - bad shards: %s' % tf)
args.append('--config')
args.extend(configs)
# Run tests, gms, and image decoding tests everywhere.
args.extend('--src tests gm image lottie colorImage svg skp'.split(' '))
if api.vars.builder_cfg.get('cpu_or_gpu') == 'GPU':
# Don't run the 'svgparse_*' svgs on GPU.
blacklist('_ svg _ svgparse_')
elif bot == 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN':
# Only run the CPU SVGs on 8888.
blacklist('~8888 svg _ _')
else:
# On CPU SVGs we only care about parsing. Only run them on the above bot.
args.remove('svg')
# Eventually I'd like these to pass, but for now just skip 'em.
if 'SK_FORCE_RASTER_PIPELINE_BLITTER' in bot:
args.remove('tests')
if 'NativeFonts' in bot: # images won't exercise native font integration :)
args.remove('image')
args.remove('colorImage')
def remove_from_args(arg):
if arg in args:
args.remove(arg)
if 'DDL' in bot:
# The DDL bots just render the large skps and the gms
remove_from_args('tests')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
else:
# Currently, only the DDL bots render skps
remove_from_args('skp')
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
# Only run the lotties on Lottie bots.
remove_from_args('tests')
remove_from_args('gm')
remove_from_args('image')
remove_from_args('colorImage')
remove_from_args('svg')
remove_from_args('skp')
else:
remove_from_args('lottie')
# TODO: ???
blacklist('f16 _ _ dstreadshuffle')
blacklist('glsrgb image _ _')
blacklist('glessrgb image _ _')
# --src image --config g8 means "decode into Gray8", which isn't supported.
blacklist('g8 image _ _')
blacklist('g8 colorImage _ _')
if 'Valgrind' in bot:
# These take 18+ hours to run.
blacklist('pdf gm _ fontmgr_iter')
blacklist('pdf _ _ PANO_20121023_214540.jpg')
blacklist('pdf skp _ worldjournal')
blacklist('pdf skp _ desk_baidu.skp')
blacklist('pdf skp _ desk_wikipedia.skp')
blacklist('_ svg _ _')
# skbug.com/9171 and 8847
blacklist('_ test _ InitialTextureClear')
if 'TecnoSpark3Pro' in bot:
# skbug.com/9421
blacklist('_ test _ InitialTextureClear')
if 'iOS' in bot:
blacklist(gl_prefix + ' skp _ _')
if 'Mac' in bot or 'iOS' in bot:
# CG fails on questionable bmps
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
# CG has unpredictable behavior on this questionable gif
# It's probably using uninitialized memory
blacklist('_ image gen_platf frame_larger_than_image.gif')
# CG has unpredictable behavior on incomplete pngs
# skbug.com/5774
blacklist('_ image gen_platf inc0.png')
blacklist('_ image gen_platf inc1.png')
blacklist('_ image gen_platf inc2.png')
blacklist('_ image gen_platf inc3.png')
blacklist('_ image gen_platf inc4.png')
blacklist('_ image gen_platf inc5.png')
blacklist('_ image gen_platf inc6.png')
blacklist('_ image gen_platf inc7.png')
blacklist('_ image gen_platf inc8.png')
blacklist('_ image gen_platf inc9.png')
blacklist('_ image gen_platf inc10.png')
blacklist('_ image gen_platf inc11.png')
blacklist('_ image gen_platf inc12.png')
blacklist('_ image gen_platf inc13.png')
blacklist('_ image gen_platf inc14.png')
blacklist('_ image gen_platf incInterlaced.png')
# These images fail after Mac 10.13.1 upgrade.
blacklist('_ image gen_platf incInterlaced.gif')
blacklist('_ image gen_platf inc1.gif')
blacklist('_ image gen_platf inc0.gif')
blacklist('_ image gen_platf butterfly.gif')
# WIC fails on questionable bmps
if 'Win' in bot:
blacklist('_ image gen_platf pal8os2v2.bmp')
blacklist('_ image gen_platf pal8os2v2-16.bmp')
blacklist('_ image gen_platf rgba32abf.bmp')
blacklist('_ image gen_platf rgb24prof.bmp')
blacklist('_ image gen_platf rgb24lprof.bmp')
blacklist('_ image gen_platf 8bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 4bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 32bpp-pixeldata-cropped.bmp')
blacklist('_ image gen_platf 24bpp-pixeldata-cropped.bmp')
if 'x86_64' in bot and 'CPU' in bot:
# This GM triggers a SkSmallAllocator assert.
blacklist('_ gm _ composeshader_bitmap')
if 'Win' in bot or 'Mac' in bot:
# WIC and CG fail on arithmetic jpegs
blacklist('_ image gen_platf testimgari.jpg')
# More questionable bmps that fail on Mac, too. skbug.com/6984
blacklist('_ image gen_platf rle8-height-negative.bmp')
blacklist('_ image gen_platf rle4-height-negative.bmp')
# These PNGs have CRC errors. The platform generators seem to draw
# uninitialized memory without reporting an error, so skip them to
# avoid lots of images on Gold.
blacklist('_ image gen_platf error')
if 'Android' in bot or 'iOS' in bot or 'Chromecast' in bot:
# This test crashes the N9 (perhaps because of large malloc/frees). It also
# is fairly slow and not platform-specific. So we just disable it on all of
# Android and iOS. skia:5438
blacklist('_ test _ GrShape')
if api.vars.internal_hardware_label == '5':
# http://b/118312149#comment9
blacklist('_ test _ SRGBReadWritePixels')
# skia:4095
bad_serialize_gms = ['bleed_image',
'c_gms',
'colortype',
'colortype_xfermodes',
'drawfilter',
'fontmgr_bounds_0.75_0',
'fontmgr_bounds_1_-0.25',
'fontmgr_bounds',
'fontmgr_match',
'fontmgr_iter',
'imagemasksubset',
'wacky_yuv_formats_domain',
'imagemakewithfilter',
'imagemakewithfilter_crop',
'imagemakewithfilter_crop_ref',
'imagemakewithfilter_ref']
# skia:5589
bad_serialize_gms.extend(['bitmapfilters',
'bitmapshaders',
'bleed',
'bleed_alpha_bmp',
'bleed_alpha_bmp_shader',
'convex_poly_clip',
'extractalpha',
'filterbitmap_checkerboard_32_32_g8',
'filterbitmap_image_mandrill_64',
'shadows',
'simpleaaclip_aaclip'])
# skia:5595
bad_serialize_gms.extend(['composeshader_bitmap',
'scaled_tilemodes_npot',
'scaled_tilemodes'])
# skia:5778
bad_serialize_gms.append('typefacerendering_pfaMac')
# skia:5942
bad_serialize_gms.append('parsedpaths')
# these use a custom image generator which doesn't serialize
bad_serialize_gms.append('ImageGeneratorExternal_rect')
bad_serialize_gms.append('ImageGeneratorExternal_shader')
# skia:6189
bad_serialize_gms.append('shadow_utils')
# skia:7938
bad_serialize_gms.append('persp_images')
# Not expected to round trip encoding/decoding.
bad_serialize_gms.append('all_bitmap_configs')
bad_serialize_gms.append('makecolorspace')
bad_serialize_gms.append('readpixels')
bad_serialize_gms.append('draw_image_set_rect_to_rect')
bad_serialize_gms.append('compositor_quads_shader')
# This GM forces a path to be convex. That property doesn't survive
# serialization.
bad_serialize_gms.append('analytic_antialias_convex')
for test in bad_serialize_gms:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' not in bot:
for test in ['bleed_alpha_image', 'bleed_alpha_image_shader']:
blacklist(['serialize-8888', 'gm', '_', test])
# It looks like we skip these only for out-of-memory concerns.
if 'Win' in bot or 'Android' in bot:
for test in ['verylargebitmap', 'verylarge_picture_image']:
blacklist(['serialize-8888', 'gm', '_', test])
if 'Mac' in bot and 'CPU' in bot:
# skia:6992
blacklist(['pic-8888', 'gm', '_', 'encode-platform'])
blacklist(['serialize-8888', 'gm', '_', 'encode-platform'])
# skia:4769
for test in ['drawfilter']:
blacklist([ 'pic-8888', 'gm', '_', test])
# skia:4703
for test in ['image-cacherator-from-picture',
'image-cacherator-from-raster',
'image-cacherator-from-ctable']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM that requires raster-backed canvas
for test in ['complexclip4_bw', 'complexclip4_aa', 'p3',
'async_rescale_and_read_text_up_large',
'async_rescale_and_read_text_up',
'async_rescale_and_read_text_down',
'async_rescale_and_read_dog_up',
'async_rescale_and_read_dog_down',
'async_rescale_and_read_rose',
'async_rescale_and_read_no_bleed']:
blacklist([ 'pic-8888', 'gm', '_', test])
blacklist(['serialize-8888', 'gm', '_', test])
# GM requries canvas->makeSurface() to return a valid surface.
blacklist([ 'pic-8888', 'gm', '_', "blurrect_compare"])
blacklist(['serialize-8888', 'gm', '_', "blurrect_compare"])
# Extensions for RAW images
r = ['arw', 'cr2', 'dng', 'nef', 'nrw', 'orf', 'raf', 'rw2', 'pef', 'srw',
'ARW', 'CR2', 'DNG', 'NEF', 'NRW', 'ORF', 'RAF', 'RW2', 'PEF', 'SRW']
# skbug.com/4888
# Blacklist RAW images (and a few large PNGs) on GPU bots
# until we can resolve failures.
if 'GPU' in bot:
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
# Blacklist memory intensive tests on 32-bit bots.
if 'Win8' in bot and 'x86-' in bot:
blacklist('_ image f16 _')
blacklist('_ image _ abnormal.wbmp')
blacklist('_ image _ interlaced1.png')
blacklist('_ image _ interlaced2.png')
blacklist('_ image _ interlaced3.png')
for raw_ext in r:
blacklist('_ image _ .%s' % raw_ext)
if 'Nexus5' in bot and 'GPU' in bot:
# skia:5876
blacklist(['_', 'gm', '_', 'encode-platform'])
if 'AndroidOne-GPU' in bot: # skia:4697, skia:4704, skia:4694, skia:4705
blacklist(['_', 'gm', '_', 'bigblurs'])
blacklist(['_', 'gm', '_', 'bleed'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp'])
blacklist(['_', 'gm', '_', 'bleed_alpha_bmp_shader'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image'])
blacklist(['_', 'gm', '_', 'bleed_alpha_image_shader'])
blacklist(['_', 'gm', '_', 'bleed_image'])
blacklist(['_', 'gm', '_', 'dropshadowimagefilter'])
blacklist(['_', 'gm', '_', 'filterfastbounds'])
blacklist([gl_prefix, 'gm', '_', 'imageblurtiled'])
blacklist(['_', 'gm', '_', 'imagefiltersclipped'])
blacklist(['_', 'gm', '_', 'imagefiltersscaled'])
blacklist(['_', 'gm', '_', 'imageresizetiled'])
blacklist(['_', 'gm', '_', 'matrixconvolution'])
blacklist(['_', 'gm', '_', 'strokedlines'])
if sample_count:
gl_msaa_config = gl_prefix + 'msaa' + sample_count
blacklist([gl_msaa_config, 'gm', '_', 'imageblurtiled'])
blacklist([gl_msaa_config, 'gm', '_', 'imagefiltersbase'])
match = []
if 'Valgrind' in bot: # skia:3021
match.append('~Threaded')
if 'Valgrind' in bot and 'PreAbandonGpuContext' in bot:
# skia:6575
match.append('~multipicturedraw_')
if 'AndroidOne' in bot:
match.append('~WritePixels') # skia:4711
match.append('~PremulAlphaRoundTrip_Gpu') # skia:7501
match.append('~ReimportImageTextureWithMipLevels') # skia:8090
if 'Chromecast' in bot:
if 'GPU' in bot:
# skia:6687
match.append('~animated-image-blurs')
match.append('~blur_0.01')
match.append('~blur_image_filter')
match.append('~check_small_sigma_offset')
match.append('~imageblur2')
match.append('~lighting')
match.append('~longpathdash')
match.append('~matrixconvolution')
match.append('~textblobmixedsizes_df')
match.append('~textblobrandomfont')
# Blacklisted to avoid OOM (we see DM just end with "broken pipe")
match.append('~bigbitmaprect_')
match.append('~DrawBitmapRect')
match.append('~drawbitmaprect')
match.append('~GM_animated-image-blurs')
match.append('~ImageFilterBlurLargeImage')
match.append('~savelayer_clipmask')
match.append('~TextBlobCache')
match.append('~verylarge')
if 'GalaxyS6' in bot:
match.append('~SpecialImage') # skia:6338
match.append('~skbug6653') # skia:6653
if 'MSAN' in bot:
match.extend(['~Once', '~Shared']) # Not sure what's up with these tests.
if 'TSAN' in bot:
match.extend(['~ReadWriteAlpha']) # Flaky on TSAN-covered on nvidia bots.
match.extend(['~RGBA4444TextureTest', # Flakier than they are important.
'~RGB565TextureTest'])
# By default, we test with GPU threading enabled, unless specifically
# disabled.
if 'NoGPUThreads' in bot:
args.extend(['--gpuThreads', '0'])
if 'Vulkan' in bot and 'Adreno530' in bot:
# skia:5777
match.extend(['~CopySurface'])
if 'Vulkan' in bot and 'Adreno' in bot:
# skia:7663
match.extend(['~WritePixelsNonTextureMSAA_Gpu'])
match.extend(['~WritePixelsMSAA_Gpu'])
if 'Vulkan' in bot and api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~VkHeapTests']) # skia:6245
if api.vars.is_linux and 'IntelIris640' in bot:
match.extend(['~Programs']) # skia:7849
if 'IntelIris640' in bot or 'IntelHD615' in bot or 'IntelHDGraphics615' in bot:
match.append('~^SRGBReadWritePixels$') # skia:9225
if 'Vulkan' in bot and api.vars.is_linux and 'IntelHD405' in bot:
# skia:7322
blacklist(['vk', 'gm', '_', 'skbug_257'])
blacklist(['vk', 'gm', '_', 'filltypespersp'])
match.append('~^ClearOp$')
match.append('~^CopySurface$')
match.append('~^ImageNewShader_GPU$')
match.append('~^InitialTextureClear$')
match.append('~^PinnedImageTest$')
match.append('~^ReadPixels_Gpu$')
match.append('~^ReadPixels_Texture$')
match.append('~^SRGBReadWritePixels$')
match.append('~^VkUploadPixelsTests$')
match.append('~^WritePixelsNonTexture_Gpu$')
match.append('~^WritePixelsNonTextureMSAA_Gpu$')
match.append('~^WritePixels_Gpu$')
match.append('~^WritePixelsMSAA_Gpu$')
if 'Vulkan' in bot and 'GTX660' in bot and 'Win' in bot:
# skbug.com/8047
match.append('~FloatingPointTextureTest$')
if 'Metal' in bot and 'HD8870M' in bot and 'Mac' in bot:
# skia:9255
match.append('~WritePixelsNonTextureMSAA_Gpu')
if 'ANGLE' in bot:
# skia:7835
match.append('~BlurMaskBiggerThanDest')
if 'IntelIris6100' in bot and 'ANGLE' in bot and 'Release' in bot:
# skia:7376
match.append('~^ProcessorOptimizationValidationTest$')
if ('IntelIris6100' in bot or 'IntelHD4400' in bot) and 'ANGLE' in bot:
# skia:6857
blacklist(['angle_d3d9_es2', 'gm', '_', 'lighting'])
if 'Chorizo' in bot:
# skia:8869
blacklist(['_', 'gm', '_', 'compositor_quads_filter'])
if 'PowerVRGX6250' in bot:
match.append('~gradients_view_perspective_nodither') #skia:6972
if '-arm-' in bot and 'ASAN' in bot:
# TODO: can we run with env allocator_may_return_null=1 instead?
match.append('~BadImage')
if 'Mac' in bot and 'IntelHD6000' in bot:
# skia:7574
match.append('~^ProcessorCloneTest$')
match.append('~^GrMeshTest$')
if 'Mac' in bot and 'IntelHD615' in bot:
# skia:7603
match.append('~^GrMeshTest$')
if 'LenovoYogaC630' in bot and 'ANGLE' in api.vars.extra_tokens:
# skia:9275
blacklist(['_', 'tests', '_', 'Programs'])
# skia:8976
blacklist(['_', 'tests', '_', 'GrDefaultPathRendererTest'])
# https://bugs.chromium.org/p/angleproject/issues/detail?id=3414
blacklist(['_', 'tests', '_', 'PinnedImageTest'])
if blacklisted:
args.append('--blacklist')
args.extend(blacklisted)
if match:
args.append('--match')
args.extend(match)
# These bots run out of memory running RAW codec tests. Do not run them in
# parallel
if 'Nexus5' in bot or 'Nexus9' in bot:
args.append('--noRAW_threading')
if 'FSAA' in bot:
args.extend(['--analyticAA', 'false'])
if 'FAAA' in bot:
args.extend(['--forceAnalyticAA'])
if 'NativeFonts' not in bot:
args.append('--nonativeFonts')
if 'GDI' in bot:
args.append('--gdi')
# Let's make all bots produce verbose output by default.
args.append('--verbose')
return args
def key_params(api):
"""Build a unique key from the builder name (as a list).
E.g. arch x86 gpu GeForce320M mode MacMini4.1 os Mac10.6
"""
# Don't bother to include role, which is always Test.
blacklist = ['role', 'test_filter']
flat = []
for k in sorted(api.vars.builder_cfg.keys()):
if k not in blacklist:
flat.append(k)
flat.append(api.vars.builder_cfg[k])
return flat
def test_steps(api):
"""Run the DM test."""
b = api.properties['buildername']
use_hash_file = False
if upload_dm_results(b):
host_dm_dir = str(api.flavor.host_dirs.dm_dir)
api.flavor.create_clean_host_dir(api.path['start_dir'].join('test'))
device_dm_dir = str(api.flavor.device_dirs.dm_dir)
if host_dm_dir != device_dm_dir:
api.flavor.create_clean_device_dir(device_dm_dir)
# Obtain the list of already-generated hashes.
hash_filename = 'uninteresting_hashes.txt'
host_hashes_file = api.vars.tmp_dir.join(hash_filename)
hashes_file = api.flavor.device_path_join(
api.flavor.device_dirs.tmp_dir, hash_filename)
api.run(
api.python.inline,
'get uninteresting hashes',
program="""
import contextlib
import math
import socket
import sys
import time
import urllib2
HASHES_URL = sys.argv[1]
RETRIES = 5
TIMEOUT = 60
WAIT_BASE = 15
socket.setdefaulttimeout(TIMEOUT)
for retry in range(RETRIES):
try:
with contextlib.closing(
urllib2.urlopen(HASHES_URL, timeout=TIMEOUT)) as w:
hashes = w.read()
with open(sys.argv[2], 'w') as f:
f.write(hashes)
break
except Exception as e:
print 'Failed to get uninteresting hashes from %s:' % HASHES_URL
print e
if retry == RETRIES:
raise
waittime = WAIT_BASE * math.pow(2, retry)
print 'Retry in %d seconds.' % waittime
time.sleep(waittime)
""",
args=[api.properties['gold_hashes_url'], host_hashes_file],
abort_on_failure=False,
fail_build_on_failure=False,
infra_step=True)
if api.path.exists(host_hashes_file):
api.flavor.copy_file_to_device(host_hashes_file, hashes_file)
use_hash_file = True
# Run DM.
properties = [
'gitHash', api.properties['revision'],
'builder', api.vars.builder_name,
'buildbucket_build_id', api.properties.get('buildbucket_build_id', ''),
'task_id', api.properties['task_id'],
]
if api.vars.is_trybot:
properties.extend([
'issue', api.vars.issue,
'patchset', api.vars.patchset,
'patch_storage', api.vars.patch_storage,
])
properties.extend(['swarming_bot_id', api.vars.swarming_bot_id])
properties.extend(['swarming_task_id', api.vars.swarming_task_id])
if 'Chromecast' in api.vars.builder_cfg.get('os', ''):
# Due to limited disk space, we only deal with skps and one image.
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'images', 'color_wheel.jpg'),
'--nameByHash',
'--dontReduceOpsTaskSplitting',
'--properties'
] + properties
else:
args = [
'dm',
'--resourcePath', api.flavor.device_dirs.resource_dir,
'--skps', api.flavor.device_dirs.skp_dir,
'--images', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'dm'),
'--colorImages', api.flavor.device_path_join(
api.flavor.device_dirs.images_dir, 'colorspace'),
'--nameByHash',
'--properties'
] + properties
args.extend(['--svgs', api.flavor.device_dirs.svg_dir])
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
args.extend([
'--lotties',
api.flavor.device_path_join(
api.flavor.device_dirs.resource_dir, 'skottie'),
api.flavor.device_dirs.lotties_dir])
args.append('--key')
keys = key_params(api)
if 'Lottie' in api.vars.builder_cfg.get('extra_config', ''):
keys.extend(['renderer', 'skottie'])
if 'DDL' in api.vars.builder_cfg.get('extra_config', ''):
# 'DDL' style means "--skpViewportSize 2048 --pr ~small"
keys.extend(['style', 'DDL'])
else:
keys.extend(['style', 'default'])
args.extend(keys)
if use_hash_file:
args.extend(['--uninterestingHashesFile', hashes_file])
if upload_dm_results(b):
args.extend(['--writePath', api.flavor.device_dirs.dm_dir])
args.extend(dm_flags(api, api.vars.builder_name))
# See skia:2789.
if 'AbandonGpuContext' in api.vars.extra_tokens:
args.append('--abandonGpuContext')
if 'PreAbandonGpuContext' in api.vars.extra_tokens:
args.append('--preAbandonGpuContext')
if 'ReleaseAndAbandonGpuContext' in api.vars.extra_tokens:
args.append('--releaseAndAbandonGpuContext')
api.run(api.flavor.step, 'dm', cmd=args, abort_on_failure=False)
if upload_dm_results(b):
# Copy images and JSON to host machine if needed.
api.flavor.copy_directory_contents_to_host(
api.flavor.device_dirs.dm_dir, api.flavor.host_dirs.dm_dir)
def RunSteps(api):
api.vars.setup()
api.file.ensure_directory('makedirs tmp_dir', api.vars.tmp_dir)
api.flavor.setup()
env = {}
if 'iOS' in api.vars.builder_name:
env['IOS_BUNDLE_ID'] = 'com.google.dm'
env['IOS_MOUNT_POINT'] = api.vars.slave_dir.join('mnt_iosdevice')
with api.context(env=env):
try:
if 'Chromecast' in api.vars.builder_name:
api.flavor.install(resources=True, skps=True)
elif 'Lottie' in api.vars.builder_name:
api.flavor.install(resources=True, lotties=True)
else:
api.flavor.install(skps=True, images=True, svgs=True, resources=True)
test_steps(api)
finally:
api.flavor.cleanup_steps()
api.run.check_failure()
TEST_BUILDERS = [
'Test-Android-Clang-AndroidOne-GPU-Mali400MP2-arm-Release-All-Android',
'Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All-Android',
('Test-Android-Clang-GalaxyS6-GPU-MaliT760-arm64-Debug-All'
'-Android_NoGPUThreads'),
('Test-Android-Clang-GalaxyS7_G930FD-GPU-MaliT880-arm64-Release-All'
'-Android_Vulkan'),
'Test-Android-Clang-MotoG4-CPU-Snapdragon617-arm-Release-All-Android',
'Test-Android-Clang-NVIDIA_Shield-GPU-TegraX1-arm64-Debug-All-Android_CCPR',
'Test-Android-Clang-Nexus5-GPU-Adreno330-arm-Release-All-Android',
'Test-Android-Clang-Nexus7-CPU-Tegra3-arm-Release-All-Android',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm64-Debug-All-Android_Vulkan',
'Test-Android-Clang-Pixel-GPU-Adreno530-arm-Debug-All-Android_ASAN',
'Test-Android-Clang-Pixel2XL-GPU-Adreno540-arm64-Debug-All-Android',
'Test-Android-Clang-Pixel3-GPU-Adreno630-arm64-Debug-All-Android_Vulkan',
'Test-Android-Clang-Pixel3a-GPU-Adreno615-arm64-Debug-All-Android',
('Test-ChromeOS-Clang-AcerChromebookR13Convertible-GPU-PowerVRGX6250-'
'arm-Debug-All'),
'Test-Chromecast-Clang-Chorizo-CPU-Cortex_A7-arm-Release-All',
'Test-Chromecast-Clang-Chorizo-GPU-Cortex_A7-arm-Release-All',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-ASAN',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-BonusConfigs',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-shard_00_10-Coverage',
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All-MSAN',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
'-SK_USE_DISCARDABLE_SCALEDIMAGECACHE'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-Lottie',
('Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All'
'-SK_FORCE_RASTER_PIPELINE_BLITTER'),
'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Release-All-TSAN',
'Test-Debian9-Clang-GCE-GPU-SwiftShader-x86_64-Release-All-SwiftShader',
'Test-Debian9-Clang-NUC5PPYH-GPU-IntelHD405-x86_64-Release-All-Vulkan',
'Test-Debian9-Clang-NUC7i5BNK-GPU-IntelIris640-x86_64-Debug-All-Vulkan',
'Test-Debian10-GCC-GCE-CPU-AVX2-x86_64-Debug-All-Docker',
'Test-iOS-Clang-iPhone6-GPU-PowerVRGX6450-arm64-Release-All-Metal',
('Test-Mac10.13-Clang-MacBook10.1-GPU-IntelHD615-x86_64-Release-All'
'-NativeFonts'),
'Test-Mac10.13-Clang-MacBookPro11.5-CPU-AVX2-x86_64-Release-All',
'Test-Mac10.13-Clang-MacBookPro11.5-GPU-RadeonHD8870M-x86_64-Debug-All-Metal',
('Test-Mac10.13-Clang-MacMini7.1-GPU-IntelIris5100-x86_64-Debug-All'
'-CommandBuffer'),
'Test-Mac10.14-Clang-MacBookAir7.2-GPU-IntelHD6000-x86_64-Debug-All',
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-Vulkan',
('Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_AbandonGpuContext_SK_CPU_LIMIT_SSE41'),
('Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-Valgrind_PreAbandonGpuContext_SK_CPU_LIMIT_SSE41'),
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL1',
'Test-Ubuntu18-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-DDL3',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All-BonusConfigs',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-GpuTess',
'Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Debug-All-NonNVPR',
('Test-Win10-Clang-Golo-GPU-QuadroP400-x86_64-Release-All'
'-ReleaseAndAbandonGpuContext'),
'Test-Win10-Clang-NUC5i7RYH-CPU-AVX2-x86_64-Debug-All-NativeFonts_GDI',
'Test-Win10-Clang-NUC5i7RYH-GPU-IntelIris6100-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-NUCD34010WYKH-GPU-IntelHD4400-x86_64-Release-All-ANGLE',
'Test-Win10-Clang-ShuttleA-GPU-GTX660-x86_64-Release-All-Vulkan',
'Test-Win10-Clang-ShuttleA-GPU-RadeonHD7770-x86_64-Release-All-Vulkan',
'Test-Win10-Clang-ShuttleC-GPU-GTX960-x86_64-Debug-All-ANGLE',
'Test-Win10-MSVC-LenovoYogaC630-GPU-Adreno630-arm64-Debug-All-ANGLE',
'Test-Win2019-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FAAA',
'Test-Win2019-Clang-GCE-CPU-AVX2-x86_64-Debug-All-FSAA',
'Test-iOS-Clang-iPadPro-GPU-PowerVRGT7800-arm64-Release-All',
'Test-Mac10.13-Clang-MacBook10.1-GPU-IntelHD615-x86_64-Debug-All-CommandBuffer',
'Test-Android-Clang-TecnoSpark3Pro-GPU-PowerVRGE8320-arm-Debug-All-Android',
]
def GenTests(api):
for builder in TEST_BUILDERS:
test = (
api.test(builder) +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('skia-bot-123')) +
api.step_data('get swarming task id',
stdout=api.raw_io.output('123456'))
)
if 'Win' in builder and not 'LenovoYogaC630' in builder:
test += api.platform('win', 64)
if 'Chromecast' in builder:
test += api.step_data(
'read chromecast ip',
stdout=api.raw_io.output('192.168.1.2:5555'))
yield test
builder = 'Test-Win8-Clang-Golo-CPU-AVX-x86-Debug-All'
yield (
api.test('trybot') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.platform('win', 64) +
api.properties(patch_storage='gerrit') +
api.properties.tryserver(
buildername=builder,
gerrit_project='skia',
gerrit_url='https://skia-review.googlesource.com/',
)+
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
builder = 'Test-Debian9-Clang-GCE-CPU-AVX2-x86_64-Debug-All'
yield (
api.test('failed_dm') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('symbolized dm', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Release-All-Android'
yield (
api.test('failed_get_hashes') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get uninteresting hashes', retcode=1)
)
builder = 'Test-Android-Clang-Nexus7-GPU-Tegra3-arm-Debug-All-Android'
retry_step_name = ('push [START_DIR]/skia/resources/* '
'/sdcard/revenge_of_the_skiabot/resources.push '
'[START_DIR]/skia/resources/file1')
yield (
api.test('failed_push') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('get swarming bot id',
stdout=api.raw_io.output('build123-m2--device5')) +
api.step_data(retry_step_name, retcode=1) +
api.step_data(retry_step_name + ' (attempt 2)', retcode=1) +
api.step_data(retry_step_name + ' (attempt 3)', retcode=1)
)
retry_step_name = 'adb pull.pull /sdcard/revenge_of_the_skiabot/dm_out'
yield (
api.test('failed_pull') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
gold_hashes_url='https://example.com/hashes.txt',
swarm_out_dir='[SWARM_OUT_DIR]',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
) +
api.step_data('dm', retcode=1) +
api.step_data(retry_step_name, retcode=1) +
api.step_data(retry_step_name + ' (attempt 2)', retcode=1) +
api.step_data(retry_step_name + ' (attempt 3)', retcode=1)
)
yield (
api.test('internal_bot_5') +
api.properties(buildername=builder,
buildbucket_build_id='123454321',
revision='abc123',
path_config='kitchen',
swarm_out_dir='[SWARM_OUT_DIR]',
gold_hashes_url='https://example.com/hashes.txt',
internal_hardware_label='5',
task_id='task_12345') +
api.path.exists(
api.path['start_dir'].join('skia'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skimage', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'skp', 'VERSION'),
api.path['start_dir'].join('skia', 'infra', 'bots', 'assets',
'svg', 'VERSION'),
api.path['start_dir'].join('tmp', 'uninteresting_hashes.txt')
)
)
|
{
"content_hash": "355604ca7d8599c6a23b5dd3206e7da3",
"timestamp": "",
"source": "github",
"line_count": 1249,
"max_line_length": 93,
"avg_line_length": 38.638911128903125,
"alnum_prop": 0.6076253626191462,
"repo_name": "HalCanary/skia-hc",
"id": "7ec0ddbb601931d2c1eb2ef59a4fa6ba52e421f9",
"size": "48466",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "infra/bots/recipes/test.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Assembly",
"bytes": "1277297"
},
{
"name": "Batchfile",
"bytes": "865"
},
{
"name": "C",
"bytes": "505166"
},
{
"name": "C#",
"bytes": "4683"
},
{
"name": "C++",
"bytes": "32234337"
},
{
"name": "CMake",
"bytes": "2850"
},
{
"name": "CSS",
"bytes": "3078"
},
{
"name": "Dockerfile",
"bytes": "14764"
},
{
"name": "GLSL",
"bytes": "109164"
},
{
"name": "Go",
"bytes": "135327"
},
{
"name": "HTML",
"bytes": "1321397"
},
{
"name": "Java",
"bytes": "167849"
},
{
"name": "JavaScript",
"bytes": "463920"
},
{
"name": "Lex",
"bytes": "2521"
},
{
"name": "Lua",
"bytes": "70982"
},
{
"name": "Makefile",
"bytes": "13502"
},
{
"name": "Objective-C",
"bytes": "83351"
},
{
"name": "Objective-C++",
"bytes": "366996"
},
{
"name": "PHP",
"bytes": "139510"
},
{
"name": "PowerShell",
"bytes": "1432"
},
{
"name": "Python",
"bytes": "1055437"
},
{
"name": "Shell",
"bytes": "95010"
}
],
"symlink_target": ""
}
|
from nltk.corpus import brown, stopwords
from nltk import ConditionalFreqDist
cfd = ConditionalFreqDist()
stopwords_list = stopwords.words('english')
def is_noun(tag):
return tag.lower() in ['nn','nns','nn$','nn-tl','nn+bez',
'nn+hvz', 'nns$','np','np$','np+bez',
'nps', 'nps$','nr','np-tl','nrs','nr$']
for sentence in brown.tagged_sents():
for (index, tagtuple) in enumerate(sentence):
(token, tag) = tagtuple
token = token.lower()
if token not in stopwords_list and is_noun(tag):
window = sentence[index+1:index+5]
for (window_token, window_tag) in window:
window_token = window_token.lower()
if window_token not in stopwords_list and is_noun(window_tag):
cfd[token][window_token] += 1
# OK. We are done ! Let's start associating !
print('left ->', cfd['left'].max())
print('life ->', cfd['life'].max())
print('man ->', cfd['man'].max())
print('woman ->', cfd['woman'].max())
print('boy ->', cfd['boy'].max())
print('girl ->', cfd['girl'].max())
print('male ->', cfd['male'].max())
print('ball ->', cfd['ball'].max())
print('doctor ->', cfd['doctor'].max())
print('road ->', cfd['road'].max())
|
{
"content_hash": "bcc2a368ee87ec41e40d5de9000d9ee2",
"timestamp": "",
"source": "github",
"line_count": 36,
"max_line_length": 78,
"avg_line_length": 35.166666666666664,
"alnum_prop": 0.5600315955766193,
"repo_name": "desilinguist/acm-crossroads-nltk",
"id": "2ec2026f418cb5265c1abae4f5c0d26698d3c8e7",
"size": "1290",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "task4.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "52432"
},
{
"name": "Python",
"bytes": "4561"
},
{
"name": "TeX",
"bytes": "36253"
}
],
"symlink_target": ""
}
|
from zang.exceptions.zang_exception import ZangException
from zang.configuration.configuration import Configuration
from zang.connectors.connector_factory import ConnectorFactory
from zang.domain.enums.product import Product
from docs.examples.credetnials import sid, authToken
url = 'http://api.zang.io/v2'
configuration = Configuration(sid, authToken, url=url)
usagesConnector = ConnectorFactory(configuration).usagesConnector
# view usage
try:
usage = usagesConnector.viewUsage('{UsageSid}')
print(usage.totalCost)
except ZangException as ze:
print(ze)
# list usages
try:
product = Product.ordinal(Product.OUTBOUND_CALL)
usages = usagesConnector.listUsages(
product=product,
year=2017,
month=2,
pageSize=100)
if usages and usages.elements:
total = 0.0
for usage in usages.elements:
total += usage.totalCost
print(total)
except ZangException as ze:
print(ze)
|
{
"content_hash": "d270b499755a25a6ec1c6285d4a8cdcb",
"timestamp": "",
"source": "github",
"line_count": 35,
"max_line_length": 65,
"avg_line_length": 27.514285714285716,
"alnum_prop": 0.726895119418484,
"repo_name": "jaymin-panchal/zang-python",
"id": "5222cd861c272b682035c88a6bd8c3434d72cc9a",
"size": "963",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "docs/examples/usages_example.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "344686"
}
],
"symlink_target": ""
}
|
"""
This script helps to set up a simple, local deployment of QFS.
You can write your own config file (start from ./sample_setup.cfg)
and run this script to install, uninstall or upgrade.
./sample_setup.py -c sample_setup.cfg -a install
-c: config file
-a: action (one of install, start, stop, uninstall)
-b: distribution directory
-s: source directory
The script sets up the servers' config files as follows.
(By default it installs all this in ~/qfsbase, set in the config file.)
meta-run-dir/checkpoints/
/logs/
/conf/MetaServer.prp
/metaserver.log
/metaserver.out
chunk-run-dir/chunkserver1/chunks/
/conf/ChunkServer.prp
/chunkserver.log
/chunkserver.out
/chunkserver2/chunks/
/conf/ChunkServer.prp
/chunkserver.log
/chunkserver.out
webui-run-dir/docroot/
/conf/WebUI.cfg
/webui.log
"""
import sys, os, os.path, shutil, errno, signal, posix, re, socket
import ConfigParser
import subprocess
import getpass
from optparse import OptionParser, OptionGroup, IndentedHelpFormatter
class Globals():
METASERVER = 'metaserver'
CHUNKSERVER = 'chunkserver'
WEBSERVER = 'qfsstatus.py'
QFSTOOL = None
MKCERTS = None
def get_size_in_bytes(str):
if not str:
return 0
pos = 0
while pos < len(str) and not str[pos].isalpha():
pos = pos + 1
if pos >= len(str):
return int(str)
val = int(str[0:pos])
unit = str[pos]
mul = 1
if unit in ('k', 'K'):
mul = 1000
elif unit in ('m', 'M'):
mul = 1000000
elif unit in ('g', 'G'):
mul = 1000000000
return val * mul
def shell_quote(s):
return "'" + s.replace("'", "'\\''") + "'"
def check_binaries(releaseDir, sourceDir, authFlag):
if not os.path.isfile(releaseDir + '/bin/metaserver'):
sys.exit('Metaserver missing in build directory')
Globals.METASERVER = releaseDir + '/bin/metaserver'
if not os.path.isfile(releaseDir + '/bin/chunkserver'):
sys.exit('Chunkserver missing in build directory')
Globals.CHUNKSERVER = releaseDir + '/bin/chunkserver'
if os.path.isfile(releaseDir + '/bin/tools/qfs'):
Globals.QFSTOOL = releaseDir + '/bin/tools/qfs'
if os.path.isfile(releaseDir + '/webui/qfsstatus.py'):
Globals.WEBSERVER = releaseDir + '/webui/qfsstatus.py'
elif os.path.isfile(sourceDir + '/webui/qfsstatus.py'):
Globals.WEBSERVER = sourceDir + '/webui/qfsstatus.py'
else:
sys.exit('Webserver missing in build and source directories')
if authFlag:
mkcerts = sourceDir + '/src/test-scripts/qfsmkcerts.sh'
if os.path.isfile(mkcerts):
Globals.MKCERTS = mkcerts
else:
sys.exit('qfsmkcerts.sh missing in source directories')
print 'Binaries presence checking - OK.'
def check_port(port):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind(('localhost', port))
del s
except socket.error, err:
sys.exit('aborting, port %d already in use (%s)' % (port, str(err)))
def check_ports(config):
portsToCheck = []
portsToCheck.append(config.getint('metaserver', 'clientport'))
portsToCheck.append(config.getint('metaserver', 'chunkport'))
portsToCheck.append(config.getint('webui', 'webport'))
for section in config.sections():
if section.startswith('chunkserver'):
portsToCheck.append(config.getint(section, 'chunkport'))
for p in portsToCheck:
check_port(p)
def kill_running_program_pid(binaryPath, runDir):
if binaryPath == Globals.METASERVER:
name = 'metaserver'
elif binaryPath == Globals.CHUNKSERVER:
name = 'chunkserver'
elif binaryPath == Globals.WEBSERVER:
name = 'webui'
else:
name = ''
if 0 < len(name) and 0 < len(runDir):
try:
pidf = '%s/%s.pid' % (runDir, name)
f = open(pidf, 'r')
line = f.readline()
f.close()
os.unlink(pidf)
pid = int(line.strip())
os.kill(pid, signal.SIGTERM)
except:
pass
else:
kill_running_program(binaryPath)
def kill_running_program(binaryPath):
if sys.platform in ('darwin', 'Darwin'):
checkPath = os.path.split(binaryPath)[1]
if not checkPath:
return
cmd = ('ps -ef | grep %s | grep %s | grep -v grep | awk \'{print $2}\''
% (os.getlogin(), checkPath))
res = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE).communicate()
pids = res[0].split('\n')
for pid in pids:
if pid.strip() != '':
os.kill(int(pid.strip()), signal.SIGTERM)
else:
if binaryPath.find('qfsstatus') >= 0:
cmd = ('ps -ef | grep %s | grep /qfsbase/ | grep %s | grep -v grep | awk \'{print $2}\''
% (os.getlogin(), binaryPath))
res = subprocess.Popen(cmd, shell=True,
stdout=subprocess.PIPE).communicate()
pids = res[0].split('\n')
for pid in pids:
if pid.strip() != '':
os.kill(int(pid.strip()), signal.SIGTERM)
return
pids = subprocess.Popen(['pidof', binaryPath],
stdout=subprocess.PIPE).communicate()
for pid in pids[0].strip().split():
os.kill(int(pid), signal.SIGTERM)
def run_command(cmd):
return subprocess.check_call(cmd, shell=True)
def rm_tree(path):
if '/qfsbase/' in path:
shutil.rmtree(path)
else:
print >> sys.stderr, 'refusing to remove path %r' % path,
print >> sys.stderr, 'because it does not contain /qfsbase/'
def duplicate_tree(src, dst):
"""Copy files & directories from SRC directory to DST directory.
If DST does not exist, create it. If DST's children with same SRC
children names exist then overwrite them.
"""
if os.path.exists(dst) and not os.path.isdir(dst):
sys.exit('Cannot duplicate directory to a non-directory')
if not os.path.exists(dst):
os.makedirs(dst)
for li in os.listdir(src):
srcPath = os.path.join(src, li)
dstPath = os.path.join(dst, li)
if os.path.isdir(dstPath):
rm_tree(dstPath)
else:
if os.path.exists(dstPath):
os.unlink(dstPath)
if os.path.isdir(srcPath):
shutil.copytree(srcPath, dstPath)
else:
shutil.copyfile(srcPath, dstPath)
def mkdir_p(dirname):
try:
os.makedirs(dirname)
except OSError, err:
if err.errno != errno.EEXIST:
sys.exit('Failed to create directory')
else:
if not os.path.isdir(dirname):
sys.exit('% exists, but is not a directory!' % dirname)
def parse_command_line():
action_keys = { 'install' : True,
'start' : True,
'stop' : True,
'uninstall' : True }
argv0Dir = os.path.dirname(sys.argv[0])
defaultConfig = os.path.join(argv0Dir, 'sample_setup.cfg')
defaultConfig = os.path.abspath(defaultConfig)
defaultSrcDir = os.path.join(argv0Dir, '../..')
defaultSrcDir = os.path.abspath(defaultSrcDir)
defaultRelDir = os.path.join(argv0Dir, '../../build/release')
defaultRelDir = os.path.abspath(defaultRelDir)
if not os.path.exists(defaultRelDir):
defaultRelDir = os.path.join(argv0Dir, '../..')
defaultRelDir = os.path.abspath(defaultRelDir)
formatter = IndentedHelpFormatter(max_help_position=50, width=120)
usage = "usage: ./%prog [options] -a <ACTION>"
parser = OptionParser(usage, formatter=formatter, add_help_option=False)
parser.add_option('-c', '--config-file', action='store',
default=defaultConfig, metavar='FILE', help='Setup config file.')
parser.add_option('-a', '--action', action='store', default=None,
metavar='ACTION', help='One of install, uninstall, or stop.')
parser.add_option('-r', '--release-dir', action='store',
default=defaultRelDir, metavar='DIR', help='QFS release directory.')
parser.add_option('-s', '--source-dir', action='store',
default=defaultSrcDir, metavar='DIR', help='QFS source directory.')
parser.add_option('-u', '--auth', action='store_true',
help="Configure QFS authentication.")
parser.add_option('-o', '--object-store', action='store_true',
help="Enable object store (S3) mode.")
parser.add_option('-h', '--help', action='store_true',
help="Print this help message and exit.")
actions = """
Actions:
install = setup meta and chunk server directories, restarting/starting them
start = start meta and chunk servers
stop = stop meta and chunk servers
uninstall = remove meta and chunk server directories after stopping them"""
sampleSession = """
Hello World example of a client session:
# Install sample server setup, only needed once.
%s/examples/sampleservers/sample_setup.py -a install
PATH="%s:${PATH}"
# Make temp directory.
qfsshell -s localhost -p 20000 -q -- mkdir /qfs/tmp
# Create file containing Hello World, Reed-Solomon encoded, replication 1.
echo 'Hello World' \
| cptoqfs -s localhost -p 20000 -S -r 1 -k /qfs/tmp/helloworld -d -
# Cat file content.
qfscat -s localhost -p 20000 /qfs/tmp/helloworld
# Stat file to see encoding (RS or not), replication level, mtime.
qfsshell -s localhost -p 20000 -q -- stat /qfs/tmp/helloworld
# Copy file locally to current directory.
cpfromqfs -s localhost -p 20000 -k /qfs/tmp/helloworld -d ./helloworld
# Remove file from QFS.
qfsshell -s localhost -p 20000 -q -- rm /qfs/tmp/helloworld
# Stop the server and remove the custom install.
%s/examples/sampleservers/sample_setup.py -a stop
%s/examples/sampleservers/sample_setup.py -a uninstall
Use qfs to manipulate files the same way you would use 'hadoop fs':
# Set qfs command alias.
alias qfs='%s/bin/tools/qfs -cfg ~/qfsbase/client/clidefault.prp'
qfs -h
qfs -stat /
qfs -mkdir /some-dir
qfs -ls /
Did you notice how fast it is? :)
Run the following to test with hadoop:
%s/src/test-scripts/qfshadoop.sh
"""
# An install sets up all config files and (re)starts the servers.
# An uninstall stops the servers and removes the config files.
# A stop stops the servers.
opts, args = parser.parse_args()
sampleSession = sampleSession % (
opts.source_dir,
opts.release_dir,
opts.source_dir,
opts.source_dir,
opts.release_dir,
opts.source_dir
)
if opts.help:
parser.print_help()
print actions
print sampleSession
print
posix._exit(0)
e = []
if not os.path.isfile(opts.config_file):
e.append("specified 'config-file' does not exist: %s"
% opts.config_file)
if not opts.action:
e.append("'action' must be specified")
elif not action_keys.has_key(opts.action):
e.append("invalid 'action' specified: %s" % opts.action)
if not os.path.isdir(opts.release_dir):
e.append("specified 'release-dir' does not exist: %s"
% opts.release_dir)
if not os.path.isdir(opts.source_dir):
e.append("specified 'source-dir' does not exist: %s" % opts.source_dir)
if len(e) > 0:
parser.print_help()
print actions
print sampleSession
print
for error in e:
print "*** %s" % error
print
posix._exit(1)
return opts
def do_cleanup(config, doUninstall):
if config.has_section('metaserver'):
metaRunDir = config.get('metaserver', 'rundir')
if metaRunDir:
kill_running_program_pid(Globals.METASERVER, metaRunDir)
if doUninstall and os.path.isdir(metaRunDir):
rm_tree(metaRunDir)
for section in config.sections():
if section.startswith('chunkserver'):
chunkRunDir = config.get(section, 'rundir')
if chunkRunDir:
kill_running_program_pid(Globals.CHUNKSERVER, chunkRunDir)
if doUninstall and os.path.isdir(chunkRunDir):
rm_tree(chunkRunDir)
if config.has_section('webui'):
webDir = config.get('webui', 'rundir')
if webDir:
kill_running_program_pid(Globals.WEBSERVER, webDir)
if doUninstall and os.path.isdir(webDir):
rm_tree(webDir)
if config.has_section('certs'):
certsDir = config.get('certs', 'rundir')
if doUninstall and os.path.isdir(certsDir):
rm_tree(certsDir)
if config.has_section('client'):
clientDir = config.get('client', 'rundir')
if doUninstall and os.path.isdir(clientDir):
rm_tree(clientDir)
if doUninstall:
qfsbase = os.path.expanduser('~/qfsbase')
if os.path.isdir(qfsbase) and not os.path.islink(qfsbase):
os.rmdir(qfsbase)
print 'Uninstall - OK.'
else:
print 'Stop servers - OK.'
def setup_directories(config, authFlag, objectStoreOnlyModeFlag):
if config.has_section('metaserver'):
metaRunDir = config.get('metaserver', 'rundir')
if metaRunDir:
mkdir_p(metaRunDir);
mkdir_p(metaRunDir + '/conf')
mkdir_p(metaRunDir + '/checkpoints')
mkdir_p(metaRunDir + '/logs')
for section in config.sections():
if section.startswith('chunkserver'):
chunkRunDir = config.get(section, 'rundir')
# look for chunk directory fields, only if
# object store mode is off.
if not objectStoreOnlyModeFlag:
chunkDirs = config.get(section, 'chunkdirs')
chunkDirsList = chunkDirs.split(' ')
if chunkRunDir:
mkdir_p(chunkRunDir);
mkdir_p(chunkRunDir + '/conf')
if not objectStoreOnlyModeFlag:
if len(chunkDirsList) > 0:
for cd in chunkDirsList:
mkdir_p(cd)
else:
mkdir_p(chunkRunDir + '/chunkdir')
if config.has_section('client'):
clientDir = config.get('client', 'rundir')
if clientDir:
mkdir_p(clientDir)
if config.has_section('webui'):
webDir = config.get('webui', 'rundir')
if webDir:
mkdir_p(webDir);
mkdir_p(webDir + '/conf')
mkdir_p(webDir + '/docroot')
print 'Setup directories - OK.'
def check_directories(config):
metaRunDir = None
webDir = None
if config.has_section('metaserver'):
metaRunDir = config.get('metaserver', 'rundir')
if config.has_section('webui'):
webDir = config.get('webui', 'rundir')
if not metaRunDir or not webDir:
sys.exit('Malformed config file.')
if not os.path.exists(metaRunDir) or not os.path.exists(webDir):
sys.exit('Cannot start without install. Please run with "-a install" first.')
print 'Check directories - OK.'
def setup_config_files(config, authFlag, objectStoreOnlyModeFlag):
if config.has_section('client'):
clientDir = config.get('client', 'rundir')
else:
clientDir = None
if authFlag:
if 'certs' not in config.sections():
sys.exit('Required metaserver certs not found in config')
certsDir = config.get('certs', 'rundir')
if not certsDir:
sys.exit('Required certs certsdir not found in config')
defaultUser = getpass.getuser()
if run_command('%s %s meta root %s' % (
shell_quote(Globals.MKCERTS),
shell_quote(certsDir),
shell_quote(defaultUser))) != 0:
sys.exit('Create X509 certs failure')
if clientDir:
clientFile = open(clientDir + '/client.prp', 'w')
print >> clientFile, 'client.auth.X509.X509PemFile = %s/%s.crt' % (certsDir, defaultUser)
print >> clientFile, 'client.auth.X509.PKeyPemFile = %s/%s.key' % (certsDir, defaultUser)
print >> clientFile, 'client.auth.X509.CAFile = %s/qfs_ca/cacert.pem' % certsDir
clientFile.close()
if clientDir:
defaultConfig = clientDir + '/clidefault.prp'
clientFile = open(defaultConfig, 'w')
print >> clientFile, 'fs.default = qfs://localhost:20000'
if authFlag:
print >> clientFile, 'client.auth.X509.X509PemFile = %s/%s.crt' % (certsDir, defaultUser)
print >> clientFile, 'client.auth.X509.PKeyPemFile = %s/%s.key' % (certsDir, defaultUser)
print >> clientFile, 'client.auth.X509.CAFile = %s/qfs_ca/cacert.pem' % certsDir
clientFile.close()
if 'metaserver' not in config.sections():
sys.exit('Required metaserver section not found in config')
metaRunDir = config.get('metaserver', 'rundir')
if not metaRunDir:
sys.exit('Required metaserver rundir not found in config')
metaserverHostname = config.get('metaserver', 'hostname')
metaserverClientPort = config.getint('metaserver', 'clientport')
metaserverChunkPort = config.getint('metaserver', 'chunkport')
clusterKey = config.get('metaserver', 'clusterkey')
if objectStoreOnlyModeFlag:
bucketName = config.get('metaserver', 'bucketName')
accessKeyId = config.get('metaserver', 'accessKeyId')
secretAccessKey = config.get('metaserver', 'secretAccessKey')
if not bucketName or not accessKeyId or not secretAccessKey:
sys.exit('Configuration file must set bucket name,'
'access key id, and secret access key.')
# Metaserver.
metaFile = open(metaRunDir + '/conf/MetaServer.prp', 'w')
print >> metaFile, 'metaServer.clientPort = %d' % metaserverClientPort
print >> metaFile, 'metaServer.chunkServerPort = %d' % metaserverChunkPort
print >> metaFile, 'metaServer.clusterKey = %s' % clusterKey
print >> metaFile, 'metaServer.cpDir = %s/checkpoints' % metaRunDir
print >> metaFile, 'metaServer.logDir = %s/logs' % metaRunDir
print >> metaFile, 'metaServer.recoveryInterval = 1'
print >> metaFile, 'metaServer.msgLogWriter.logLevel = DEBUG'
print >> metaFile, 'metaServer.msgLogWriter.maxLogFileSize = 1e6'
print >> metaFile, 'metaServer.msgLogWriter.maxLogFiles = 10'
print >> metaFile, 'metaServer.minChunkservers = 1'
print >> metaFile, 'metaServer.clientThreadCount = 4'
print >> metaFile, 'metaServer.rootDirUser = %d' % os.getuid()
print >> metaFile, 'metaServer.rootDirGroup = %d' % os.getgid()
print >> metaFile, 'metaServer.rootDirMode = 0777'
print >> metaFile, 'metaServer.pidFile = %s/metaserver.pid' % metaRunDir
if authFlag:
print >> metaFile, 'metaServer.clientAuthentication.X509.X509PemFile = %s/meta.crt' % certsDir
print >> metaFile, 'metaServer.clientAuthentication.X509.PKeyPemFile = %s/meta.key' % certsDir
print >> metaFile, 'metaServer.clientAuthentication.X509.CAFile = %s/qfs_ca/cacert.pem' % certsDir
print >> metaFile, 'metaServer.clientAuthentication.whiteList = %s root' % defaultUser
print >> metaFile, 'metaServer.CSAuthentication.X509.X509PemFile = %s/meta.crt' % certsDir
print >> metaFile, 'metaServer.CSAuthentication.X509.PKeyPemFile = %s/meta.key' % certsDir
print >> metaFile, 'metaServer.CSAuthentication.X509.CAFile = %s/qfs_ca/cacert.pem' % certsDir
print >> metaFile, 'metaServer.CSAuthentication.blackList = none'
if objectStoreOnlyModeFlag:
print >> metaFile, '# S3 parameters'
print >> metaFile, 'metaServer.objectStoreEnabled = 1'
print >> metaFile, 'metaServer.maxReplicasPerFile = 0'
print >> metaFile, 'metaServer.maxReplicasPerRSFile = 0'
print >> metaFile, 'chunkServer.diskQueue.aws.bucketName = %s' % bucketName
print >> metaFile, 'chunkServer.diskQueue.aws.accessKeyId = %s' % accessKeyId
print >> metaFile, 'chunkServer.diskQueue.aws.secretAccessKey = %s' % secretAccessKey
metaFile.close()
# Chunkservers.
for section in config.sections():
if section.startswith('chunkserver'):
chunkClientPort = config.getint(section, 'chunkport')
if not objectStoreOnlyModeFlag:
chunkDirs = config.get(section, 'chunkdirs')
chunkRunDir = config.get(section, 'rundir')
if chunkRunDir:
if authFlag:
if run_command('%s %s chunk%d' % (
shell_quote(Globals.MKCERTS),
shell_quote(certsDir),
chunkClientPort)) != 0:
sys.exit('Create X509 failure')
chunkFile = open(chunkRunDir + '/conf/ChunkServer.prp', 'w')
print >> chunkFile, 'chunkServer.metaServer.hostname = %s' % metaserverHostname
print >> chunkFile, 'chunkServer.metaServer.port = %d' % metaserverChunkPort
print >> chunkFile, 'chunkServer.clientPort = %d' % chunkClientPort
print >> chunkFile, 'chunkServer.clusterKey = %s' % clusterKey
print >> chunkFile, 'chunkServer.rackId = 0'
if not objectStoreOnlyModeFlag:
print >> chunkFile, 'chunkServer.chunkDir = %s' % chunkDirs
print >> chunkFile, 'chunkServer.msgLogWriter.logLevel = DEBUG'
print >> chunkFile, 'chunkServer.msgLogWriter.maxLogFileSize = 1e6'
print >> chunkFile, 'chunkServer.msgLogWriter.maxLogFiles = 2'
print >> chunkFile, 'chunkServer.pidFile = %s/chunkserver.pid' % chunkRunDir
clientThreadCount = 0 if objectStoreOnlyModeFlag else 3
print >> chunkFile, 'chunkServer.clientThreadCount = %d' % clientThreadCount
if authFlag:
print >> chunkFile, 'chunkserver.meta.auth.X509.X509PemFile = %s/chunk%d.crt' % (certsDir, chunkClientPort)
print >> chunkFile, 'chunkserver.meta.auth.X509.PKeyPemFile = %s/chunk%d.key' % (certsDir, chunkClientPort)
print >> chunkFile, 'chunkserver.meta.auth.X509.CAFile = %s/qfs_ca/cacert.pem' % certsDir
if objectStoreOnlyModeFlag:
print >> chunkFile, '# S3 parameters'
print >> chunkFile, 'chunkServer.objectDir = s3://aws.'
chunkFile.close()
# Webserver.
if 'webui' not in config.sections():
return
webDir = config.get('webui', 'rundir')
if not webDir:
return
webFile = open(webDir + '/conf/WebUI.cfg', 'w')
print >> webFile, '[webserver]'
print >> webFile, 'webServer.metaserverHost = %s' % metaserverHostname
print >> webFile, 'webServer.metaserverPort = %d' % metaserverClientPort
print >> webFile, 'webServer.host = 0.0.0.0'
print >> webFile, 'webServer.port = %d' % config.getint('webui', 'webport')
print >> webFile, 'webServer.docRoot = %s/docroot' % webDir
print >> webFile, 'webServer.allmachinesfn = /dev/null'
print >> webFile, 'webServer.displayPorts = True'
print >> webFile, 'webServer.pidFile = %s/webui.pid' % webDir
print >> webFile, '[chunk]'
print >> webFile, 'refreshInterval = 5'
print >> webFile, 'currentSize = 30'
print >> webFile, 'currentSpan = 10'
print >> webFile, 'hourlySize = 30'
print >> webFile, 'hourlySpan =120'
print >> webFile, 'daylySize = 24'
print >> webFile, 'daylySpan = 3600'
print >> webFile, 'monthlySize = 30'
print >> webFile, 'monthlySpan = 86400'
print >> webFile, 'displayPorts = True'
print >> webFile, 'predefinedHeaders = Buffer-usec-wait-avg&D-Timer-overrun-count&D-Timer-overrun-sec&XMeta-server-location&Client-active&D-Buffer-req-denied-bytes&D-CPU-sys&D-CPU-user&D-Disk-read-bytes&D-Disk-read-count&D-Disk-write-bytes&D-Disk-write-count&Write-appenders&D-Disk-read-errors&D-Disk-write-errors&XMeta-location'
print >> webFile, 'predefinedChunkDirHeaders = Chunks&Dev-id&Read-bytes&D-Read-bytes&Read-err&D-Read-err&Read-io&D-Read-io&D-Read-time-microsec&Read-timeout&Space-avail&Space-util-pct&Started-ago&Stopped-ago&Write-bytes&D-Write-bytes&Write-err&D-Write-err&Write-io&D-Write-io&D-Write-time-microsec&Write-timeout&Chunk-server&Chunk-dir'
webFile.close()
print 'Setup config files - OK.'
def copy_files(config, sourceDir):
# Currently, only the web CSS stuff need be copied.
if 'webui' in config.sections():
webDir = config.get('webui', 'rundir')
if webDir:
webDst = webDir + '/docroot'
webSrc = sourceDir + '/webui/files'
duplicate_tree(webSrc, webDst)
def start_servers(config, whichServers, createNewFsFlag, authFlag):
startMeta = whichServers in ('meta', 'all')
startChunk = whichServers in ('chunk', 'all')
startWeb = whichServers in ('web', 'all')
errors = 0
if startMeta:
startWeb = True
metaRunDir = config.get('metaserver', 'rundir')
kill_running_program_pid(Globals.METASERVER, metaRunDir)
if metaRunDir:
metaConf = metaRunDir + '/conf/MetaServer.prp'
metaLog = metaRunDir + '/MetaServer.log'
metaOut = metaRunDir + '/MetaServer.out'
if createNewFsFlag and \
not os.listdir(metaRunDir + '/checkpoints') and \
not os.listdir(metaRunDir + '/logs'):
command = '%s -c %s > %s 2>&1' % (
shell_quote(Globals.METASERVER),
shell_quote(metaConf),
shell_quote(metaOut))
if run_command(command) > 0:
print '*** metaserver failed create empty file system'
errors = errors + 1
if errors == 0:
command = '%s %s %s > %s 2>&1 &' % (
shell_quote(Globals.METASERVER),
shell_quote(metaConf),
shell_quote(metaLog),
shell_quote(metaOut))
if run_command(command) > 0:
print '*** metaserver failed to start'
errors = errors + 1
else:
print 'Meta server started, listening on %s:%d' %(
config.get('metaserver', 'hostname'),
config.getint('metaserver', 'clientport'))
if startChunk:
for section in config.sections():
if section.startswith('chunkserver'):
chunkRunDir = config.get(section, 'rundir')
kill_running_program_pid(Globals.CHUNKSERVER, chunkRunDir)
if chunkRunDir:
chunkConf = chunkRunDir + '/conf/ChunkServer.prp'
chunkLog = chunkRunDir + '/ChunkServer.log'
chunkOut = chunkRunDir + '/ChunkServer.out'
command = '%s %s %s > %s 2>&1 &' % (
shell_quote(Globals.CHUNKSERVER),
shell_quote(chunkConf),
shell_quote(chunkLog),
shell_quote(chunkOut))
if run_command(command) > 0:
print '*** chunkserver failed to start'
errors = errors + 1
if startWeb:
webDir = config.get('webui', 'rundir')
kill_running_program_pid(Globals.WEBSERVER, webDir)
if webDir:
webConf = webDir + '/conf/WebUI.cfg'
webLog = webDir + '/webui.log'
command = '%s %s > %s 2>&1 &' % (
shell_quote(Globals.WEBSERVER),
shell_quote(webConf),
shell_quote(webLog))
if run_command(command) > 0:
print '*** web ui failed to start'
errors = errors + 1
else:
print 'Web ui started: http://localhost:%d' % (
config.getint('webui', 'webport'))
if errors > 0:
print 'Started servers - FAILED.'
else:
print 'Started servers - OK.'
defaultConfig=None
if config.has_section('client'):
clientDir = config.get('client', 'rundir')
if authFlag and os.path.isfile(clientDir + '/client.prp'):
print 'QFS authentication required.'
defaultConfig = clientDir + '/clidefault.prp'
if os.path.isfile(defaultConfig):
print 'Default QFS client configuration file: %s' % defaultConfig
if createNewFsFlag and Globals.QFSTOOL:
if defaultConfig:
cfgOpt = " -cfg %s" % shell_quote(defaultConfig)
command = '%s%s -mkdir %s' % (
shell_quote(Globals.QFSTOOL),
cfgOpt,
shell_quote('/user/' + getpass.getuser()),
)
print 'Creating default user directory by executing:\n%s' % command
if run_command(command) != 0:
print '*** failed to created user directory'
else:
print '- OK.'
# Need to massage the ~ in the config file paths. Otherwise a directory
# with name "~" would get created at $CWD.
def parse_config(configFile, objectStoreOnlyModeFlag):
config = ConfigParser.ConfigParser()
config.read(configFile);
for section in config.sections():
dir = config.get(section, 'rundir')
config.set(section, 'rundir', os.path.expanduser(dir))
# attempt to edit chunkserver directory names
# only if object store mode is off, otherwise ignore.
if not objectStoreOnlyModeFlag and section.startswith('chunkserver'):
dir = config.get(section, 'chunkdirs')
dirstowrite = []
dirs = dir.split(' ')
for d in dirs:
dirstowrite.append(os.path.expanduser(d))
config.set(section, 'chunkdirs', ' '.join(dirstowrite))
return config
if __name__ == '__main__':
opts = parse_command_line()
config = parse_config(opts.config_file, opts.object_store)
if opts.action in ('uninstall', 'stop'):
do_cleanup(config, opts.action == 'uninstall')
posix._exit(0)
check_binaries(opts.release_dir, opts.source_dir, opts.auth)
check_ports(config)
if opts.action == 'install':
setup_directories(config, opts.auth, opts.object_store)
setup_config_files(config, opts.auth, opts.object_store)
copy_files(config, opts.source_dir)
elif opts.action == 'start':
check_directories(config)
start_servers(config, 'all', opts.action == 'install', opts.auth)
|
{
"content_hash": "8df15f48cf4718e489c0923aea9b7595",
"timestamp": "",
"source": "github",
"line_count": 748,
"max_line_length": 339,
"avg_line_length": 42.018716577540104,
"alnum_prop": 0.5933821189945911,
"repo_name": "quantcast/qfs",
"id": "00b3e438502878e985b045ba46cdde3a7d6d9b1a",
"size": "32156",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "examples/sampleservers/sample_setup.py",
"mode": "33261",
"license": "apache-2.0",
"language": [
{
"name": "C",
"bytes": "6079819"
},
{
"name": "C++",
"bytes": "7368240"
},
{
"name": "CMake",
"bytes": "71423"
},
{
"name": "CSS",
"bytes": "10622"
},
{
"name": "GDB",
"bytes": "14312"
},
{
"name": "HTML",
"bytes": "166157"
},
{
"name": "Java",
"bytes": "166127"
},
{
"name": "JavaScript",
"bytes": "13774"
},
{
"name": "M4",
"bytes": "37197"
},
{
"name": "Makefile",
"bytes": "246479"
},
{
"name": "Perl",
"bytes": "3346"
},
{
"name": "Python",
"bytes": "174869"
},
{
"name": "Ragel",
"bytes": "124808"
},
{
"name": "Raku",
"bytes": "1758"
},
{
"name": "Shell",
"bytes": "930787"
},
{
"name": "TeX",
"bytes": "19488"
}
],
"symlink_target": ""
}
|
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class AuthCallsCredentialListMappingList(ListResource):
def __init__(self, version, account_sid, domain_sid):
"""
Initialize the AuthCallsCredentialListMappingList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:param domain_sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingList
"""
super(AuthCallsCredentialListMappingList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'domain_sid': domain_sid, }
self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/Auth/Calls/CredentialListMappings.json'.format(**self._solution)
def create(self, credential_list_sid):
"""
Create the AuthCallsCredentialListMappingInstance
:param unicode credential_list_sid: The SID of the CredentialList resource to map to the SIP domain
:returns: The created AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
"""
data = values.of({'CredentialListSid': credential_list_sid, })
payload = self._version.create(method='POST', uri=self._uri, data=data, )
return AuthCallsCredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
)
def stream(self, limit=None, page_size=None):
"""
Streams AuthCallsCredentialListMappingInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists AuthCallsCredentialListMappingInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of AuthCallsCredentialListMappingInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return AuthCallsCredentialListMappingPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of AuthCallsCredentialListMappingInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return AuthCallsCredentialListMappingPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a AuthCallsCredentialListMappingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
"""
return AuthCallsCredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
)
def __call__(self, sid):
"""
Constructs a AuthCallsCredentialListMappingContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
"""
return AuthCallsCredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=sid,
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AuthCallsCredentialListMappingList>'
class AuthCallsCredentialListMappingPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the AuthCallsCredentialListMappingPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:param domain_sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingPage
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingPage
"""
super(AuthCallsCredentialListMappingPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of AuthCallsCredentialListMappingInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
"""
return AuthCallsCredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
)
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.AuthCallsCredentialListMappingPage>'
class AuthCallsCredentialListMappingContext(InstanceContext):
def __init__(self, version, account_sid, domain_sid, sid):
"""
Initialize the AuthCallsCredentialListMappingContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param domain_sid: The SID of the SIP domain that contains the resource to fetch
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
"""
super(AuthCallsCredentialListMappingContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'domain_sid': domain_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/SIP/Domains/{domain_sid}/Auth/Calls/CredentialListMappings/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch the AuthCallsCredentialListMappingInstance
:returns: The fetched AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return AuthCallsCredentialListMappingInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the AuthCallsCredentialListMappingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AuthCallsCredentialListMappingContext {}>'.format(context)
class AuthCallsCredentialListMappingInstance(InstanceResource):
def __init__(self, version, payload, account_sid, domain_sid, sid=None):
"""
Initialize the AuthCallsCredentialListMappingInstance
:returns: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
"""
super(AuthCallsCredentialListMappingInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'date_created': deserialize.rfc2822_datetime(payload.get('date_created')),
'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')),
'friendly_name': payload.get('friendly_name'),
'sid': payload.get('sid'),
}
# Context
self._context = None
self._solution = {
'account_sid': account_sid,
'domain_sid': domain_sid,
'sid': sid or self._properties['sid'],
}
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: AuthCallsCredentialListMappingContext for this AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingContext
"""
if self._context is None:
self._context = AuthCallsCredentialListMappingContext(
self._version,
account_sid=self._solution['account_sid'],
domain_sid=self._solution['domain_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def friendly_name(self):
"""
:returns: The string that you assigned to describe the resource
:rtype: unicode
"""
return self._properties['friendly_name']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
def fetch(self):
"""
Fetch the AuthCallsCredentialListMappingInstance
:returns: The fetched AuthCallsCredentialListMappingInstance
:rtype: twilio.rest.api.v2010.account.sip.domain.auth_types.auth_calls_mapping.auth_calls_credential_list_mapping.AuthCallsCredentialListMappingInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the AuthCallsCredentialListMappingInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.AuthCallsCredentialListMappingInstance {}>'.format(context)
|
{
"content_hash": "25d6f548e062a180f0f924738528e523",
"timestamp": "",
"source": "github",
"line_count": 388,
"max_line_length": 166,
"avg_line_length": 42.396907216494846,
"alnum_prop": 0.6674164133738602,
"repo_name": "twilio/twilio-python",
"id": "60c3c292cec77a277d8e5ce27db9dc70b3c8287e",
"size": "16465",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "twilio/rest/api/v2010/account/sip/domain/auth_types/auth_calls_mapping/auth_calls_credential_list_mapping.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Dockerfile",
"bytes": "234"
},
{
"name": "Makefile",
"bytes": "2157"
},
{
"name": "Python",
"bytes": "11241545"
}
],
"symlink_target": ""
}
|
from pytest import fixture, mark, raises
from tornado import web, gen
from unittest.mock import Mock
from globus_sdk import ConfidentialAppAuthClient
from ..globus import GlobusOAuthenticator, GlobusLogoutHandler
from .mocks import setup_oauth_mock, no_code_test, mock_handler
def user_model(username):
"""Return a user model"""
return {
'login': username,
}
@fixture
def mock_globus_sdk(monkeypatch):
"""Mock the globus_sdk request for 'oauth2_exchange_code_for_tokens', and
mock some of the items within the returned 'Tokens' class. """
class Tokens:
by_resource_server = {
'transfer.api.globus.org': {'access_token': 'TRANSFER_TOKEN'},
'auth.globus.org': {'access_token': 'AUTH_TOKEN'}
}
id_token = {'preferred_username': 'wash@globusid.org'}
def decode_id_token(self, client):
return self.id_token
tokens = Tokens()
monkeypatch.setattr(
ConfidentialAppAuthClient,
'oauth2_exchange_code_for_tokens',
lambda self, code: tokens
)
return tokens
@fixture
def globus_client(client):
setup_oauth_mock(
client,
host=['auth.globus.org'],
access_token_path='/v2/oauth2/token',
user_path='/userinfo',
token_type='bearer',
)
return client
async def test_globus(globus_client, mock_globus_sdk):
authenticator = GlobusOAuthenticator()
handler = globus_client.handler_for_user(user_model('wash'))
data = await authenticator.authenticate(handler)
assert data['name'] == 'wash'
tokens = list(data['auth_state']['tokens'].keys())
assert tokens == ['transfer.api.globus.org']
async def test_allow_refresh_tokens(globus_client, mock_globus_sdk, monkeypatch):
authenticator = GlobusOAuthenticator()
# Sanity check, this field should be set to True
assert authenticator.allow_refresh_tokens is True
authenticator.allow_refresh_tokens = False
monkeypatch.setattr(
ConfidentialAppAuthClient,
'oauth2_start_flow',
Mock()
)
handler = globus_client.handler_for_user(user_model('wash'))
await authenticator.authenticate(handler)
ConfidentialAppAuthClient.oauth2_start_flow.assert_called_with(
authenticator.get_callback_url(None),
requested_scopes=' '.join(authenticator.scope),
refresh_tokens=False
)
async def test_restricted_domain(globus_client, mock_globus_sdk):
mock_globus_sdk.id_token = {'preferred_username': 'wash@serenity.com'}
authenticator = GlobusOAuthenticator()
authenticator.identity_provider = 'alliance.gov'
handler = globus_client.handler_for_user(user_model('wash'))
with raises(web.HTTPError) as exc:
await authenticator.authenticate(handler)
assert exc.value.status_code == 403
async def test_namespaced_domain(globus_client, mock_globus_sdk):
mock_globus_sdk.id_token = {'preferred_username':
'wash@legitshipping.com@serenity.com'}
authenticator = GlobusOAuthenticator()
# Allow any idp
authenticator.identity_provider = ''
handler = globus_client.handler_for_user(user_model('wash'))
data = await authenticator.authenticate(handler)
assert data['name'] == 'wash'
async def test_token_exclusion(globus_client, mock_globus_sdk):
authenticator = GlobusOAuthenticator()
authenticator.exclude_tokens = [
'transfer.api.globus.org',
'auth.globus.org'
]
handler = globus_client.handler_for_user(user_model('wash'))
data = await authenticator.authenticate(handler)
assert data['name'] == 'wash'
assert list(data['auth_state']['tokens'].keys()) == []
def test_revoke_tokens(monkeypatch):
monkeypatch.setattr(
ConfidentialAppAuthClient,
'oauth2_revoke_token',
Mock()
)
authenticator = GlobusOAuthenticator()
service = {'transfer.api.globus.org': {'access_token': 'foo',
'refresh_token': 'bar'}}
authenticator.revoke_service_tokens(service)
assert ConfidentialAppAuthClient.oauth2_revoke_token.called
async def test_custom_logout(monkeypatch):
custom_logout_url = 'https://universityofindependence.edu/logout'
authenticator = GlobusOAuthenticator()
logout_handler = mock_handler(GlobusLogoutHandler,
authenticator=authenticator)
monkeypatch.setattr(
web.RequestHandler,
'redirect',
Mock()
)
logout_handler.clear_login_cookie = Mock()
logout_handler.get_current_user = Mock()
authenticator.logout_redirect_url = custom_logout_url
await logout_handler.get()
logout_handler.redirect.assert_called_once_with(custom_logout_url)
assert logout_handler.clear_login_cookie.called
async def test_logout_revokes_tokens(monkeypatch):
class User:
@gen.coroutine
def get_auth_state(self):
return {'tokens': {}}
save_auth_state = Mock()
name = 'Wash'
user = User()
authenticator = GlobusOAuthenticator()
logout_handler = mock_handler(GlobusLogoutHandler,
authenticator=authenticator)
monkeypatch.setattr(
web.RequestHandler,
'redirect',
Mock()
)
logout_handler.clear_login_cookie = Mock()
authenticator.revoke_service_tokens = Mock()
authenticator.revoke_tokens_on_logout = True
await logout_handler.clear_tokens(user)
assert authenticator.revoke_service_tokens.called
assert user.save_auth_state.called
|
{
"content_hash": "fec6cff58ffc5cf920b4bfec2f3f28b9",
"timestamp": "",
"source": "github",
"line_count": 175,
"max_line_length": 81,
"avg_line_length": 31.97714285714286,
"alnum_prop": 0.6663688348820586,
"repo_name": "maltevogl/oauthenticator",
"id": "21e42b787ef3a4abc225fd542186d8d5d90260e4",
"size": "5596",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "oauthenticator/tests/test_globus.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "140321"
}
],
"symlink_target": ""
}
|
"""
Based on Premailer.
This is a hack of Premailer that uses BeautifulSoup and SoupSelect instead of lxml.
"""
# http://www.peterbe.com/plog/premailer.py
import re, os
import codecs
import urlparse, urllib
from BeautifulSoup import BeautifulSoup, Comment
import soupselect; soupselect.monkeypatch()
__version__ = '1.9'
__all__ = ['PremailerError', 'Premailer', 'transform']
class PremailerError(Exception):
pass
def _merge_styles(old, new, class_=''):
"""
if ::
old = 'font-size:1px; color: red'
and ::
new = 'font-size:2px; font-weight: bold'
then ::
return 'color: red; font-size:2px; font-weight: bold'
In other words, the new style bits replace the old ones.
The @class_ parameter can be something like ':hover' and if that
is there, you split up the style with '{...} :hover{...}'
Note: old could be something like '{...} ::first-letter{...}'
"""
news = {}
for k, v in [x.strip().split(':', 1) for x in new.split(';') if x.strip()]:
news[k.strip()] = v.strip()
groups = {}
grouping_regex = re.compile('([:\-\w]*){([^}]+)}')
grouped_split = grouping_regex.findall(old)
if grouped_split:
for old_class, old_content in grouped_split:
olds = {}
for k, v in [x.strip().split(':', 1) for x in old_content.split(';') if x.strip()]:
olds[k.strip()] = v.strip()
groups[old_class] = olds
else:
olds = {}
for k, v in [x.strip().split(':', 1) for x in old.split(';') if x.strip()]:
olds[k.strip()] = v.strip()
groups[''] = olds
# Perform the merge
merged = news
for k, v in groups.get(class_, {}).items():
if k not in merged:
merged[k] = v
groups[class_] = merged
if len(groups) == 1:
return '; '.join(['%s:%s' % (k, v) for (k, v) in groups.values()[0].items()])
else:
all = []
for class_, mergeable in sorted(groups.items(),
lambda x, y: cmp(x[0].count(':'), y[0].count(':'))):
all.append('%s{%s}' % (class_,
'; '.join(['%s:%s' % (k, v)
for (k, v)
in mergeable.items()])))
return ' '.join([x for x in all if x != '{}'])
_css_comments = re.compile(r'/\*.*?\*/', re.MULTILINE|re.DOTALL)
_regex = re.compile('((.*?){(.*?)})', re.DOTALL|re.M)
_semicolon_regex = re.compile(';(\s+)')
_colon_regex = re.compile(':(\s+)')
class Premailer(object):
def __init__(self, html, base_url=None,
exclude_pseudoclasses=False,
keep_style_tags=False,
include_star_selectors=False,
external_styles=None):
self.html = html
self.base_url = base_url
self.exclude_pseudoclasses = exclude_pseudoclasses
# whether to delete the <style> tag once it's been processed
self.keep_style_tags = keep_style_tags
# whether to process or ignore selectors like '* { foo:bar; }'
self.include_star_selectors = include_star_selectors
if isinstance(external_styles, basestring):
external_styles = [external_styles]
self.external_styles = external_styles
def _parse_style_rules(self, css_body):
leftover = []
rules = []
css_body = _css_comments.sub('', css_body)
for each in _regex.findall(css_body.strip()):
__, selectors, bulk = each
bulk = _semicolon_regex.sub(';', bulk.strip())
bulk = _colon_regex.sub(':', bulk.strip())
if bulk.endswith(';'):
bulk = bulk[:-1]
for selector in [x.strip() for x in selectors.split(',') if x.strip()]:
if ':' in selector and self.exclude_pseudoclasses:
# a pseudoclass
leftover.append((selector, bulk))
continue
elif selector == '*' and not self.include_star_selectors:
continue
rules.append((selector, bulk))
return rules, leftover
def transform(self):
"""change the self.html and return it with CSS turned into style
attributes.
"""
page = BeautifulSoup(self.html)
if page is None:
print repr(self.html)
raise PremailerError("Could not parse the html")
# Strip comments.
comments = page.findAll(text=lambda text: isinstance(text, Comment))
map(lambda c: c.extract(), comments)
##
## style selectors
##
rules = []
for style in page.find("style") or []:
css_body = str(style)
these_rules, these_leftover = self._parse_style_rules(css_body)
rules.extend(these_rules)
if these_leftover:
style.text = '\n'.join(['%s {%s}' % (k, v) for (k, v) in these_leftover])
elif not self.keep_style_tags:
style.extract()
if self.external_styles:
for stylefile in self.external_styles:
print stylefile
if stylefile.startswith('http://'):
css_body = urllib.urlopen(stylefile).read()
elif os.path.exists(stylefile):
try:
f = codecs.open(stylefile)
css_body = f.read()
finally:
f.close()
else:
raise ValueError(u"Could not find external style: %s" % stylefile)
these_rules, these_leftover = self._parse_style_rules(css_body)
rules.extend(these_rules)
for selector, style in rules:
class_ = ''
if ':' in selector:
selector, class_ = re.split(':', selector, 1)
class_ = ':%s' % class_
#sel = CSSSelector(selector)
items = page.findSelect(selector)
for item in items:
old_style = item.get('style','')
new_style = _merge_styles(old_style, style, class_)
item['style'] = new_style
self._style_to_basic_html_attributes(item, new_style)
for item in page.findAll(lambda tag: tag.get('class', None) != None):
# delete the 'class' attribute
del item['class']
##
## URLs
##
if self.base_url:
for attr in ('href', 'src'):
for item in page.findAll(lambda tag: tag.get(attr, None)!= None):
item[attr] = urlparse.urljoin(self.base_url, item[attr])
# The default __repr__ encoding for the used version of BeautifulSoup is utf-8
return str(page).replace('<head/>','<head></head>')
def _style_to_basic_html_attributes(self, element, style_content):
"""given an element and styles like
'background-color:red; font-family:Arial' turn some of that into HTML
attributes. like 'bgcolor', etc.
Note, the style_content can contain pseudoclasses like:
'{color:red; border:1px solid green} :visited{border:1px solid green}'
"""
if style_content.count('}') and \
style_content.count('{') == style_content.count('{'):
style_content = style_content.split('}')[0][1:]
attributes = {}
for key, value in [x.split(':') for x in style_content.split(';')
if len(x.split(':'))==2]:
key = key.strip()
if key == 'text-align':
attributes['align'] = value.strip()
elif key == 'background-color':
attributes['bgcolor'] = value.strip()
elif key == 'width':
value = value.strip()
if value.endswith('px'):
value = value[:-2]
attributes['width'] = value
#else:
# print "key", repr(key)
# print 'value', repr(value)
for key, value in attributes.items():
if key in element:
# already set, don't dare to overwrite
continue
element[key] = value
def transform(html, base_url=None):
return Premailer(html, base_url=base_url).transform()
if __name__=='__main__':
html = """<html>
<head>
<title>Test</title>
<style>
h1, h2 { color:red; }
strong {
text-decoration:none
}
p { font-size:2px }
p.footer { font-size: 1px}
</style>
</head>
<body>
<h1>Hi!</h1>
<p><strong>Yes!</strong></p>
<p class="footer" style="color:red">Feetnuts</p>
<img href="/images/logo.png"/>
<a href="">28 Wins</a>
</body>
</html>"""
print transform(html, base_url="http://www.28wins.com")
|
{
"content_hash": "7dc8db549996fe1ac01b68308099ac5d",
"timestamp": "",
"source": "github",
"line_count": 261,
"max_line_length": 95,
"avg_line_length": 35.593869731800766,
"alnum_prop": 0.4963401506996771,
"repo_name": "drawquest/drawquest-web",
"id": "e939081d5b9493e30f0434da6e5788b5fc341a6e",
"size": "9290",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "website/canvas/notifications/beautiful_premailer/beautiful_premailer.py",
"mode": "33188",
"license": "bsd-3-clause",
"language": [
{
"name": "AppleScript",
"bytes": "57"
},
{
"name": "C",
"bytes": "547"
},
{
"name": "CSS",
"bytes": "634659"
},
{
"name": "CoffeeScript",
"bytes": "8968"
},
{
"name": "HTML",
"bytes": "898627"
},
{
"name": "JavaScript",
"bytes": "1507053"
},
{
"name": "Makefile",
"bytes": "258"
},
{
"name": "PHP",
"bytes": "1983"
},
{
"name": "Python",
"bytes": "7220727"
},
{
"name": "Ruby",
"bytes": "876"
},
{
"name": "Shell",
"bytes": "3700"
}
],
"symlink_target": ""
}
|
import copy
from django.conf import settings
from django.contrib.admin import ModelAdmin
from django.contrib.admin.views.main import ChangeList
from django.contrib.contenttypes import generic
from django.forms import ModelForm
from django.contrib import admin
from django.db import models
from suit.widgets import NumberInput, SuitSplitDateTimeWidget
class SortableModelAdminBase(object):
"""
Base class for SortableTabularInline and SortableModelAdmin
"""
sortable = 'order'
class Media:
js = ('suit/js/sortables.js',)
class SortableListForm(ModelForm):
"""
Just Meta holder class
"""
class Meta:
widgets = {
'order': NumberInput(
attrs={'class': 'hide input-mini suit-sortable'})
}
class SortableChangeList(ChangeList):
"""
Class that forces ordering by sortable param only
"""
def get_ordering(self, request, queryset):
return [self.model_admin.sortable, '-' + self.model._meta.pk.name]
class SortableTabularInlineBase(SortableModelAdminBase):
"""
Sortable tabular inline
"""
def __init__(self, *args, **kwargs):
super(SortableTabularInlineBase, self).__init__(*args, **kwargs)
self.ordering = (self.sortable,)
self.fields = self.fields or []
if self.fields and self.sortable not in self.fields:
self.fields = list(self.fields) + [self.sortable]
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == self.sortable:
kwargs['widget'] = SortableListForm.Meta.widgets['order']
return super(SortableTabularInlineBase, self).formfield_for_dbfield(
db_field, **kwargs)
class SortableTabularInline(SortableTabularInlineBase, admin.TabularInline):
pass
class SortableGenericTabularInline(SortableTabularInlineBase,
generic.GenericTabularInline):
pass
class SortableStackedInlineBase(SortableModelAdminBase):
"""
Sortable stacked inline
"""
def __init__(self, *args, **kwargs):
super(SortableStackedInlineBase, self).__init__(*args, **kwargs)
self.ordering = (self.sortable,)
def get_fieldsets(self, *args, **kwargs):
"""
Iterate all fieldsets and make sure sortable is in the first fieldset
Remove sortable from every other fieldset, if by some reason someone
has added it
"""
fieldsets = super(SortableStackedInlineBase, self).get_fieldsets(
*args, **kwargs)
sortable_added = False
for fieldset in fieldsets:
for line in fieldset:
if not line or not isinstance(line, dict):
continue
fields = line.get('fields')
if self.sortable in fields:
fields.remove(self.sortable)
# Add sortable field always as first
if not sortable_added:
fields.insert(0, self.sortable)
sortable_added = True
break
return fieldsets
def formfield_for_dbfield(self, db_field, **kwargs):
if db_field.name == self.sortable:
kwargs['widget'] = copy.deepcopy(
SortableListForm.Meta.widgets['order'])
kwargs['widget'].attrs['class'] += ' suit-sortable-stacked'
kwargs['widget'].attrs['rowclass'] = ' suit-sortable-stacked-row'
return super(SortableStackedInlineBase, self).formfield_for_dbfield(
db_field, **kwargs)
class SortableStackedInline(SortableStackedInlineBase, admin.StackedInline):
pass
class SortableGenericStackedInline(SortableStackedInlineBase,
generic.GenericStackedInline):
pass
class SortableModelAdmin(SortableModelAdminBase, ModelAdmin):
"""
Sortable tabular inline
"""
list_per_page = 500
def __init__(self, *args, **kwargs):
super(SortableModelAdmin, self).__init__(*args, **kwargs)
self.ordering = (self.sortable,)
if self.list_display and self.sortable not in self.list_display:
self.list_display = list(self.list_display) + [self.sortable]
self.list_editable = self.list_editable or []
if self.sortable not in self.list_editable:
self.list_editable = list(self.list_editable) + [self.sortable]
self.exclude = self.exclude or []
if self.sortable not in self.exclude:
self.exclude = list(self.exclude) + [self.sortable]
def merge_form_meta(self, form):
"""
Prepare Meta class with order field widget
"""
if not getattr(form, 'Meta', None):
form.Meta = SortableListForm.Meta
if not getattr(form.Meta, 'widgets', None):
form.Meta.widgets = {}
form.Meta.widgets[self.sortable] = SortableListForm.Meta.widgets[
'order']
def get_changelist_form(self, request, **kwargs):
form = super(SortableModelAdmin, self).get_changelist_form(request,
**kwargs)
self.merge_form_meta(form)
return form
def get_changelist(self, request, **kwargs):
return SortableChangeList
def save_model(self, request, obj, form, change):
if not obj.pk:
max_order = obj.__class__.objects.aggregate(
models.Max(self.sortable))
try:
next_order = max_order['%s__max' % self.sortable] + 1
except TypeError:
next_order = 1
setattr(obj, self.sortable, next_order)
super(SortableModelAdmin, self).save_model(request, obj, form, change)
# Quite aggressive detection and intrusion into Django CMS
# Didn't found any other solutions though
if 'cms' in settings.INSTALLED_APPS:
try:
from cms.admin.forms import PageForm
PageForm.Meta.widgets = {
'publication_date': SuitSplitDateTimeWidget,
'publication_end_date': SuitSplitDateTimeWidget,
}
except ImportError:
pass
|
{
"content_hash": "6455b0d78e8c42421aa168a490e835dd",
"timestamp": "",
"source": "github",
"line_count": 191,
"max_line_length": 78,
"avg_line_length": 32.21465968586387,
"alnum_prop": 0.6183975296603283,
"repo_name": "cubledesarrollo/cubledotes",
"id": "b34f82d399d58185a0f40e20c26a12b7b9b81ea9",
"size": "6153",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "cuble/suit/admin.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "83773"
},
{
"name": "CoffeeScript",
"bytes": "1175"
},
{
"name": "JavaScript",
"bytes": "15618"
},
{
"name": "Python",
"bytes": "242692"
}
],
"symlink_target": ""
}
|
import sys
from typing import Any, AsyncIterable, Callable, Dict, Optional, TypeVar
import urllib.parse
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import (
ClientAuthenticationError,
HttpResponseError,
ResourceExistsError,
ResourceNotFoundError,
ResourceNotModifiedError,
map_error,
)
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse
from azure.core.rest import HttpRequest
from azure.core.tracing.decorator import distributed_trace
from azure.core.utils import case_insensitive_dict
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
from ..._vendor import _convert_request
from ...operations._target_compute_sizes_operations import build_list_by_replication_protected_items_request
if sys.version_info >= (3, 8):
from typing import Literal # pylint: disable=no-name-in-module, ungrouped-imports
else:
from typing_extensions import Literal # type: ignore # pylint: disable=ungrouped-imports
T = TypeVar("T")
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class TargetComputeSizesOperations:
"""
.. warning::
**DO NOT** instantiate this class directly.
Instead, you should access the following operations through
:class:`~azure.mgmt.recoveryservicessiterecovery.aio.SiteRecoveryManagementClient`'s
:attr:`target_compute_sizes` attribute.
"""
models = _models
def __init__(self, *args, **kwargs) -> None:
input_args = list(args)
self._client = input_args.pop(0) if input_args else kwargs.pop("client")
self._config = input_args.pop(0) if input_args else kwargs.pop("config")
self._serialize = input_args.pop(0) if input_args else kwargs.pop("serializer")
self._deserialize = input_args.pop(0) if input_args else kwargs.pop("deserializer")
@distributed_trace
def list_by_replication_protected_items(
self, fabric_name: str, protection_container_name: str, replicated_protected_item_name: str, **kwargs: Any
) -> AsyncIterable["_models.TargetComputeSize"]:
"""Gets the list of target compute sizes for the replication protected item.
Lists the available target compute sizes for a replication protected item.
:param fabric_name: Fabric name. Required.
:type fabric_name: str
:param protection_container_name: protection container name. Required.
:type protection_container_name: str
:param replicated_protected_item_name: Replication protected item name. Required.
:type replicated_protected_item_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either TargetComputeSize or the result of cls(response)
:rtype:
~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.recoveryservicessiterecovery.models.TargetComputeSize]
:raises ~azure.core.exceptions.HttpResponseError:
"""
_headers = kwargs.pop("headers", {}) or {}
_params = case_insensitive_dict(kwargs.pop("params", {}) or {})
api_version: Literal["2022-09-10"] = kwargs.pop(
"api_version", _params.pop("api-version", self._config.api_version)
)
cls: ClsType[_models.TargetComputeSizeCollection] = kwargs.pop("cls", None)
error_map = {
401: ClientAuthenticationError,
404: ResourceNotFoundError,
409: ResourceExistsError,
304: ResourceNotModifiedError,
}
error_map.update(kwargs.pop("error_map", {}) or {})
def prepare_request(next_link=None):
if not next_link:
request = build_list_by_replication_protected_items_request(
fabric_name=fabric_name,
protection_container_name=protection_container_name,
replicated_protected_item_name=replicated_protected_item_name,
resource_name=self._config.resource_name,
resource_group_name=self._config.resource_group_name,
subscription_id=self._config.subscription_id,
api_version=api_version,
template_url=self.list_by_replication_protected_items.metadata["url"],
headers=_headers,
params=_params,
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
else:
# make call to next link with the client's api-version
_parsed_next_link = urllib.parse.urlparse(next_link)
_next_request_params = case_insensitive_dict(
{
key: [urllib.parse.quote(v) for v in value]
for key, value in urllib.parse.parse_qs(_parsed_next_link.query).items()
}
)
_next_request_params["api-version"] = self._config.api_version
request = HttpRequest(
"GET", urllib.parse.urljoin(next_link, _parsed_next_link.path), params=_next_request_params
)
request = _convert_request(request)
request.url = self._client.format_url(request.url)
request.method = "GET"
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize("TargetComputeSizeCollection", pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem) # type: ignore
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response: PipelineResponse = await self._client._pipeline.run( # pylint: disable=protected-access
request, stream=False, **kwargs
)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(get_next, extract_data)
list_by_replication_protected_items.metadata = {
"url": "/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.RecoveryServices/vaults/{resourceName}/replicationFabrics/{fabricName}/replicationProtectionContainers/{protectionContainerName}/replicationProtectedItems/{replicatedProtectedItemName}/targetComputeSizes"
}
|
{
"content_hash": "2b97147bed9e0bac92cd183783511a24",
"timestamp": "",
"source": "github",
"line_count": 149,
"max_line_length": 307,
"avg_line_length": 46.38255033557047,
"alnum_prop": 0.6537404138330198,
"repo_name": "Azure/azure-sdk-for-python",
"id": "e89eb243c02f6a8a5751aed490c141d0cce59b69",
"size": "7411",
"binary": false,
"copies": "1",
"ref": "refs/heads/main",
"path": "sdk/recoveryservices/azure-mgmt-recoveryservicessiterecovery/azure/mgmt/recoveryservicessiterecovery/aio/operations/_target_compute_sizes_operations.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "1224"
},
{
"name": "Bicep",
"bytes": "24196"
},
{
"name": "CSS",
"bytes": "6089"
},
{
"name": "Dockerfile",
"bytes": "4892"
},
{
"name": "HTML",
"bytes": "12058"
},
{
"name": "JavaScript",
"bytes": "8137"
},
{
"name": "Jinja",
"bytes": "10377"
},
{
"name": "Jupyter Notebook",
"bytes": "272022"
},
{
"name": "PowerShell",
"bytes": "518535"
},
{
"name": "Python",
"bytes": "715484989"
},
{
"name": "Shell",
"bytes": "3631"
}
],
"symlink_target": ""
}
|
import argparse
import os.path as osp
from multiprocessing import Pool
import mmcv
import numpy as np
from mmcv import Config, DictAction
from mmdet.core.evaluation import eval_map
from mmdet.core.visualization import imshow_gt_det_bboxes
from mmdet.datasets import build_dataset, get_loading_pipeline
from mmdet.datasets.api_wrappers import pq_compute_single_core
from mmdet.utils import replace_cfg_vals, update_data_root
def bbox_map_eval(det_result, annotation, nproc=4):
"""Evaluate mAP of single image det result.
Args:
det_result (list[list]): [[cls1_det, cls2_det, ...], ...].
The outer list indicates images, and the inner list indicates
per-class detected bboxes.
annotation (dict): Ground truth annotations where keys of
annotations are:
- bboxes: numpy array of shape (n, 4)
- labels: numpy array of shape (n, )
- bboxes_ignore (optional): numpy array of shape (k, 4)
- labels_ignore (optional): numpy array of shape (k, )
nproc (int): Processes used for computing mAP.
Default: 4.
Returns:
float: mAP
"""
# use only bbox det result
if isinstance(det_result, tuple):
bbox_det_result = [det_result[0]]
else:
bbox_det_result = [det_result]
# mAP
iou_thrs = np.linspace(
.5, 0.95, int(np.round((0.95 - .5) / .05)) + 1, endpoint=True)
processes = []
workers = Pool(processes=nproc)
for thr in iou_thrs:
p = workers.apply_async(eval_map, (bbox_det_result, [annotation]), {
'iou_thr': thr,
'logger': 'silent',
'nproc': 1
})
processes.append(p)
workers.close()
workers.join()
mean_aps = []
for p in processes:
mean_aps.append(p.get()[0])
return sum(mean_aps) / len(mean_aps)
class ResultVisualizer:
"""Display and save evaluation results.
Args:
show (bool): Whether to show the image. Default: True.
wait_time (float): Value of waitKey param. Default: 0.
score_thr (float): Minimum score of bboxes to be shown.
Default: 0.
overlay_gt_pred (bool): Whether to plot gts and predictions on the
same image. If False, predictions and gts will be plotted on two
same image which will be concatenated in vertical direction.
The image above is drawn with gt, and the image below is drawn
with the prediction result. Default: False.
"""
def __init__(self,
show=False,
wait_time=0,
score_thr=0,
overlay_gt_pred=False):
self.show = show
self.wait_time = wait_time
self.score_thr = score_thr
self.overlay_gt_pred = overlay_gt_pred
def _save_image_gts_results(self,
dataset,
results,
performances,
out_dir=None):
"""Display or save image with groung truths and predictions from a
model.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
performances (dict): A dict contains samples's indices
in dataset and model's performance on them.
out_dir (str, optional): The filename to write the image.
Defaults: None.
"""
mmcv.mkdir_or_exist(out_dir)
for performance_info in performances:
index, performance = performance_info
data_info = dataset.prepare_train_img(index)
# calc save file path
filename = data_info['filename']
if data_info['img_prefix'] is not None:
filename = osp.join(data_info['img_prefix'], filename)
else:
filename = data_info['filename']
fname, name = osp.splitext(osp.basename(filename))
save_filename = fname + '_' + str(round(performance, 3)) + name
out_file = osp.join(out_dir, save_filename)
imshow_gt_det_bboxes(
data_info['img'],
data_info,
results[index],
dataset.CLASSES,
gt_bbox_color=dataset.PALETTE,
gt_text_color=(200, 200, 200),
gt_mask_color=dataset.PALETTE,
det_bbox_color=dataset.PALETTE,
det_text_color=(200, 200, 200),
det_mask_color=dataset.PALETTE,
show=self.show,
score_thr=self.score_thr,
wait_time=self.wait_time,
out_file=out_file,
overlay_gt_pred=self.overlay_gt_pred)
def evaluate_and_show(self,
dataset,
results,
topk=20,
show_dir='work_dir'):
"""Evaluate and show results.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection or panoptic segmentation
results from test results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
show_dir (str, optional): The filename to write the image.
Default: 'work_dir'
eval_fn (callable, optional): Eval function, Default: None.
"""
assert topk > 0
if (topk * 2) > len(dataset):
topk = len(dataset) // 2
if isinstance(results[0], dict):
good_samples, bad_samples = self.panoptic_evaluate(
dataset, results, topk=topk)
elif isinstance(results[0], list):
good_samples, bad_samples = self.detection_evaluate(
dataset, results, topk=topk)
elif isinstance(results[0], tuple):
results_ = [result[0] for result in results]
good_samples, bad_samples = self.detection_evaluate(
dataset, results_, topk=topk)
else:
raise 'The format of result is not supported yet. ' \
'Current dict for panoptic segmentation and list ' \
'or tuple for object detection are supported.'
good_dir = osp.abspath(osp.join(show_dir, 'good'))
bad_dir = osp.abspath(osp.join(show_dir, 'bad'))
self._save_image_gts_results(dataset, results, good_samples, good_dir)
self._save_image_gts_results(dataset, results, bad_samples, bad_dir)
def detection_evaluate(self, dataset, results, topk=20, eval_fn=None):
"""Evaluation for object detection.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Object detection results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
eval_fn (callable, optional): Eval function, Default: None.
Returns:
tuple: A tuple contains good samples and bad samples.
good_mAPs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_mAPs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
if eval_fn is None:
eval_fn = bbox_map_eval
else:
assert callable(eval_fn)
prog_bar = mmcv.ProgressBar(len(results))
_mAPs = {}
for i, (result, ) in enumerate(zip(results)):
# self.dataset[i] should not call directly
# because there is a risk of mismatch
data_info = dataset.prepare_train_img(i)
mAP = eval_fn(result, data_info['ann_info'])
_mAPs[i] = mAP
prog_bar.update()
# descending select topk image
_mAPs = list(sorted(_mAPs.items(), key=lambda kv: kv[1]))
good_mAPs = _mAPs[-topk:]
bad_mAPs = _mAPs[:topk]
return good_mAPs, bad_mAPs
def panoptic_evaluate(self, dataset, results, topk=20):
"""Evaluation for panoptic segmentation.
Args:
dataset (Dataset): A PyTorch dataset.
results (list): Panoptic segmentation results from test
results pkl file.
topk (int): Number of the highest topk and
lowest topk after evaluation index sorting. Default: 20.
Returns:
tuple: A tuple contains good samples and bad samples.
good_pqs (dict[int, float]): A dict contains good
samples's indices in dataset and model's
performance on them.
bad_pqs (dict[int, float]): A dict contains bad
samples's indices in dataset and model's
performance on them.
"""
# image to annotations
gt_json = dataset.coco.img_ann_map
result_files, tmp_dir = dataset.format_results(results)
pred_json = mmcv.load(result_files['panoptic'])['annotations']
pred_folder = osp.join(tmp_dir.name, 'panoptic')
gt_folder = dataset.seg_prefix
pqs = {}
prog_bar = mmcv.ProgressBar(len(results))
for i in range(len(results)):
data_info = dataset.prepare_train_img(i)
image_id = data_info['img_info']['id']
gt_ann = {
'image_id': image_id,
'segments_info': gt_json[image_id],
'file_name': data_info['img_info']['segm_file']
}
pred_ann = pred_json[i]
pq_stat = pq_compute_single_core(
i, [(gt_ann, pred_ann)],
gt_folder,
pred_folder,
dataset.categories,
dataset.file_client,
print_log=False)
pq_results, classwise_results = pq_stat.pq_average(
dataset.categories, isthing=None)
pqs[i] = pq_results['pq']
prog_bar.update()
if tmp_dir is not None:
tmp_dir.cleanup()
# descending select topk image
pqs = list(sorted(pqs.items(), key=lambda kv: kv[1]))
good_pqs = pqs[-topk:]
bad_pqs = pqs[:topk]
return good_pqs, bad_pqs
def parse_args():
parser = argparse.ArgumentParser(
description='MMDet eval image prediction result for each')
parser.add_argument('config', help='test config file path')
parser.add_argument(
'prediction_path', help='prediction path where test pkl result')
parser.add_argument(
'show_dir', help='directory where painted images will be saved')
parser.add_argument('--show', action='store_true', help='show results')
parser.add_argument(
'--wait-time',
type=float,
default=0,
help='the interval of show (s), 0 is block')
parser.add_argument(
'--topk',
default=20,
type=int,
help='saved Number of the highest topk '
'and lowest topk after index sorting')
parser.add_argument(
'--show-score-thr',
type=float,
default=0,
help='score threshold (default: 0.)')
parser.add_argument(
'--overlay-gt-pred',
action='store_true',
help='whether to plot gts and predictions on the same image.'
'If False, predictions and gts will be plotted on two same'
'image which will be concatenated in vertical direction.'
'The image above is drawn with gt, and the image below is'
'drawn with the prediction result.')
parser.add_argument(
'--cfg-options',
nargs='+',
action=DictAction,
help='override some settings in the used config, the key-value pair '
'in xxx=yyy format will be merged into config file. If the value to '
'be overwritten is a list, it should be like key="[a,b]" or key=a,b '
'It also allows nested list/tuple values, e.g. key="[(a,b),(c,d)]" '
'Note that the quotation marks are necessary and that no white space '
'is allowed.')
args = parser.parse_args()
return args
def main():
args = parse_args()
mmcv.check_file_exist(args.prediction_path)
cfg = Config.fromfile(args.config)
# replace the ${key} with the value of cfg.key
cfg = replace_cfg_vals(cfg)
# update data root according to MMDET_DATASETS
update_data_root(cfg)
if args.cfg_options is not None:
cfg.merge_from_dict(args.cfg_options)
cfg.data.test.test_mode = True
cfg.data.test.pop('samples_per_gpu', 0)
if cfg.data.train.type in ('MultiImageMixDataset', 'ClassBalancedDataset',
'RepeatDataset', 'ConcatDataset'):
cfg.data.test.pipeline = get_loading_pipeline(
cfg.data.train.dataset.pipeline)
else:
cfg.data.test.pipeline = get_loading_pipeline(cfg.data.train.pipeline)
dataset = build_dataset(cfg.data.test)
outputs = mmcv.load(args.prediction_path)
result_visualizer = ResultVisualizer(args.show, args.wait_time,
args.show_score_thr,
args.overlay_gt_pred)
result_visualizer.evaluate_and_show(
dataset, outputs, topk=args.topk, show_dir=args.show_dir)
if __name__ == '__main__':
main()
|
{
"content_hash": "124ec272457fe4df66a586f1f73e1588",
"timestamp": "",
"source": "github",
"line_count": 368,
"max_line_length": 78,
"avg_line_length": 37.21195652173913,
"alnum_prop": 0.5660143128377392,
"repo_name": "open-mmlab/mmdetection",
"id": "4d8b60c96da3fee1d62db0617b1baab686fa5313",
"size": "13742",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "tools/analysis_tools/analyze_results.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "Dockerfile",
"bytes": "2540"
},
{
"name": "Python",
"bytes": "4811377"
},
{
"name": "Shell",
"bytes": "47911"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('attachments', '0002_auto_20150916_0438'),
]
operations = [
migrations.AlterField(
model_name='attachment',
name='attachment_type',
field=models.CharField(verbose_name='Attachment Type', choices=[('passport', 'Passport'), ('cheque', 'Cheque'), ('other', 'Other Document')], max_length=50),
preserve_default=True,
),
]
|
{
"content_hash": "1484ab46a1dab0f76bd7f628deb6cb23",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 169,
"avg_line_length": 28.894736842105264,
"alnum_prop": 0.6120218579234973,
"repo_name": "phani00/tovp",
"id": "3802054b59121945241db770f9d4703e584362d0",
"size": "573",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "tovp/attachments/migrations/0003_auto_20151017_1058.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "190528"
},
{
"name": "HTML",
"bytes": "288449"
},
{
"name": "JavaScript",
"bytes": "2887"
},
{
"name": "Python",
"bytes": "514452"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import migrations, models
import registers.models
class Migration(migrations.Migration):
dependencies = [
('registers', '0002_auto_20160919_1303'),
]
operations = [
migrations.AlterModelOptions(
name='processitsystemrelationship',
options={'verbose_name_plural': 'Process/IT System relationships'},
),
migrations.AddField(
model_name='itsystem',
name='alt_processing',
field=models.TextField(blank=True, null=True, verbose_name='alternate processing procedure'),
),
migrations.AddField(
model_name='itsystem',
name='capability',
field=registers.models.ChoiceArrayField(base_field=models.CharField(choices=[('0', 'Information lifecycle'), ('1', 'Communication and collaboration'), ('2', 'Automation and integration'), ('3', 'Security and risk management'), ('4', 'Intelligence and analytics')], max_length=256), blank=True, default=list, null=True, size=None, verbose_name='IT System capabilities'),
),
migrations.AddField(
model_name='itsystem',
name='critical_period',
field=models.CharField(blank=True, help_text='Is there a period/season when this system is most important?', max_length=255, null=True),
),
migrations.AddField(
model_name='itsystem',
name='function',
field=registers.models.ChoiceArrayField(base_field=models.CharField(choices=[('0', 'Planning'), ('1', 'Operation'), ('2', 'Reporting')], max_length=256), blank=True, default=list, null=True, size=None, verbose_name='IT System function(s)'),
),
migrations.AddField(
model_name='itsystem',
name='legal_need_to_retain',
field=models.NullBooleanField(default=None, help_text='Is there a legal or compliance need to keep the digital content in this system?'),
),
migrations.AddField(
model_name='itsystem',
name='notes',
field=models.TextField(blank=True, null=True),
),
migrations.AddField(
model_name='itsystem',
name='point_of_truth',
field=models.NullBooleanField(default=None, help_text='Is the digital content kept in this business system a single point of truth?'),
),
migrations.AddField(
model_name='itsystem',
name='post_recovery',
field=models.TextField(blank=True, help_text='Functional testing and post recovery procedure.', null=True, verbose_name='post recovery procedure'),
),
migrations.AddField(
model_name='itsystem',
name='risks',
field=registers.models.ChoiceArrayField(base_field=models.CharField(choices=[('0', 'IT System features not aligned to business processes'), ('1', 'IT System technology refresh lifecycle not safeguarded or future-proofed'), ('2', 'IT System data/information integrity and availability not aligned to business processes'), ('3', 'IT System emergency contingency and disaster recovery approach not well established'), ('4', 'IT System support arrangements not well established, value for money and/or safeguarded'), ('5', 'IT System roles and responsibilities not well established'), ('6', 'IT System solution design not aligned to department IT standards'), ('7', 'IT System change management does not consistently consider risk and security'), ('8', 'IT System incident and security management not triaged on business criticality'), ('9', 'IT System user training not well established')], max_length=256), blank=True, default=list, null=True, size=None, verbose_name='IT System risks'),
),
migrations.AddField(
model_name='itsystem',
name='system_creation_date',
field=models.DateField(blank=True, help_text='Date that this system went into production.', null=True),
),
migrations.AddField(
model_name='itsystem',
name='system_health',
field=models.PositiveIntegerField(blank=True, choices=[(0, 'Healthy'), (1, 'Issues noted'), (2, 'At risk')], null=True),
),
migrations.AddField(
model_name='itsystem',
name='technical_recov',
field=models.TextField(blank=True, null=True, verbose_name='technical recovery procedure'),
),
migrations.AddField(
model_name='itsystem',
name='unique_evidence',
field=models.NullBooleanField(default=None, help_text='Is the digital content kept in this business system unique evidence of the official business of the Department?'),
),
migrations.AddField(
model_name='itsystem',
name='use',
field=registers.models.ChoiceArrayField(base_field=models.CharField(choices=[('0', 'Measurement'), ('1', 'Information'), ('2', 'Wisdom'), ('3', 'Data'), ('4', 'Knowledge'), ('5', 'Intelligence')], max_length=256), blank=True, default=list, null=True, size=None, verbose_name='IT System use(s)'),
),
migrations.AddField(
model_name='itsystem',
name='user_notification',
field=models.TextField(blank=True, help_text='List of users/stakeholders to contact regarding incidents', null=True),
),
migrations.AddField(
model_name='itsystem',
name='variation_iscp',
field=models.TextField(blank=True, null=True, verbose_name='Variation to the ISCP'),
),
migrations.AlterField(
model_name='itsystem',
name='access',
field=models.PositiveSmallIntegerField(choices=[(1, 'Public Internet'), (2, 'Authenticated Extranet'), (3, 'Corporate Network'), (4, 'Local System (Networked)'), (5, 'Local System (Standalone)')], default=3, help_text='The network upon which this system is accessible.'),
),
migrations.AlterField(
model_name='itsystem',
name='authentication',
field=models.PositiveSmallIntegerField(choices=[(1, 'Domain/application Credentials'), (2, 'Single Sign On'), (3, 'Externally Managed')], default=1, help_text='The method by which users authenticate themselve to the system.'),
),
migrations.AlterField(
model_name='itsystem',
name='criticality',
field=models.PositiveIntegerField(blank=True, choices=[(1, 'Critical'), (2, 'Moderate'), (3, 'Low')], help_text='How critical is this system to P&W core functions?', null=True),
),
migrations.AlterField(
model_name='itsystem',
name='hardwares',
field=models.ManyToManyField(blank=True, help_text='Hardware that is used to provide this IT System', to='registers.ITSystemHardware', verbose_name='hardware'),
),
migrations.AlterField(
model_name='itsystem',
name='softwares',
field=models.ManyToManyField(blank=True, help_text='Software that is used to provide this IT System', to='registers.Software', verbose_name='software'),
),
]
|
{
"content_hash": "e415b95a90ebfdb83515fbbfab09059a",
"timestamp": "",
"source": "github",
"line_count": 123,
"max_line_length": 997,
"avg_line_length": 58.77235772357724,
"alnum_prop": 0.6331442799834002,
"repo_name": "rockychen-dpaw/oim-cms",
"id": "076cbe6e30ef3cd3117ac379df474c42b0e12f34",
"size": "7301",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "registers/migrations/0003_auto_20161003_2217.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "33080"
},
{
"name": "HTML",
"bytes": "68403"
},
{
"name": "JavaScript",
"bytes": "141247"
},
{
"name": "Python",
"bytes": "649405"
}
],
"symlink_target": ""
}
|
import os
from os import system as _sh
import os.path as osp
from os.path import join, basename
import sys
from time import sleep
import _mysql_exceptions
from glob import glob
from functools import partial
sys.path.append('/root/boinc/py')
import boinc_path_config
from Boinc import database, configxml
sh = partial(lambda s,l: _sh(s.format(**l)),l=locals())
PROJHOME=os.environ['PROJHOME']
PROJHOME_DST=PROJHOME+'.dst'
print "Copying project files to data volume..."
for f in glob(join(PROJHOME,'*'))+glob(join(PROJHOME,'.*')):
sh('cp -rp "{f}" {PROJHOME_DST}')
sh('rm -rf {PROJHOME}; ln -s {PROJHOME_DST} {PROJHOME}')
if not '--copy-only' in sys.argv:
print "Creating database..."
waited=False
while True:
try:
database.create_database(srcdir='/root/boinc',
config=configxml.ConfigFile(filename=join(PROJHOME,'config.xml')).read().config,
drop_first=False)
except _mysql_exceptions.ProgrammingError as e:
if e[0]==1007:
print "Database exists, not overwriting."
break
else:
raise
except _mysql_exceptions.OperationalError as e:
if e[0]==2003:
if waited: sys.stdout.write('.'); sys.stdout.flush()
else:
sys.stdout.write("Waiting for mysql server to start..."); sys.stdout.flush()
waited=True
sleep(1)
else:
raise
else:
sh('cd {PROJHOME}/html/ops; ./db_schemaversion.php > {PROJHOME}/db_revision')
break
if waited: sys.stdout.write('\n')
print "Running BOINC update scripts..."
os.chdir(PROJHOME)
sh('bin/xadd')
sh('(%s) | bin/update_versions'%('; '.join(['echo y']*10)))
sh('touch {PROJHOME}/.ready')
|
{
"content_hash": "bf17b2a671ff7f1c6d221ab85a19a8e9",
"timestamp": "",
"source": "github",
"line_count": 62,
"max_line_length": 118,
"avg_line_length": 30.774193548387096,
"alnum_prop": 0.5718029350104822,
"repo_name": "grctest/project-rain-site",
"id": "5cdcbe1a35443820f7abbb77950f89318ab0394f",
"size": "1931",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "ProjectRain_Docker/images/makeproject/postbuild.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "ApacheConf",
"bytes": "103"
},
{
"name": "C++",
"bytes": "134254"
},
{
"name": "CSS",
"bytes": "83476"
},
{
"name": "Go",
"bytes": "146"
},
{
"name": "HTML",
"bytes": "111578"
},
{
"name": "JavaScript",
"bytes": "177392"
},
{
"name": "PHP",
"bytes": "757095"
},
{
"name": "Python",
"bytes": "24653"
},
{
"name": "Ruby",
"bytes": "4719"
},
{
"name": "Shell",
"bytes": "33478"
}
],
"symlink_target": ""
}
|
"""Reports binary size metrics for LaCrOS build artifacts.
More information at //docs/speed/binary_size/metrics.md.
"""
import argparse
import collections
import contextlib
import json
import logging
import os
import subprocess
import sys
import tempfile
SRC_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, os.path.join(SRC_DIR, 'build', 'util'))
from lib.results import result_sink
from lib.results import result_types
@contextlib.contextmanager
def _SysPath(path):
"""Library import context that temporarily appends |path| to |sys.path|."""
if path and path not in sys.path:
sys.path.insert(0, path)
else:
path = None # Indicates that |sys.path| is not modified.
try:
yield
finally:
if path:
sys.path.pop(0)
DIR_SOURCE_ROOT = os.environ.get(
'CHECKOUT_SOURCE_ROOT',
os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)))
BUILD_COMMON_PATH = os.path.join(DIR_SOURCE_ROOT, 'build', 'util', 'lib',
'common')
TRACING_PATH = os.path.join(DIR_SOURCE_ROOT, 'third_party', 'catapult',
'tracing')
EU_STRIP_PATH = os.path.join(DIR_SOURCE_ROOT, 'buildtools', 'third_party',
'eu-strip', 'bin', 'eu-strip')
with _SysPath(BUILD_COMMON_PATH):
import perf_tests_results_helper # pylint: disable=import-error
with _SysPath(TRACING_PATH):
from tracing.value import convert_chart_json # pylint: disable=import-error
_BASE_CHART = {
'format_version': '0.1',
'benchmark_name': 'resource_sizes',
'trace_rerun_options': [],
'charts': {}
}
_KEY_RAW = 'raw'
_KEY_GZIPPED = 'gzipped'
_KEY_STRIPPED = 'stripped'
_KEY_STRIPPED_GZIPPED = 'stripped_then_gzipped'
class _Group:
"""A group of build artifacts whose file sizes are summed and tracked.
Build artifacts for size tracking fall under these categories:
* File: A single file.
* Group: A collection of files.
* Dir: All files under a directory.
Attributes:
paths: A list of files or directories to be tracked together.
title: The display name of the group.
track_stripped: Whether to also track summed stripped ELF sizes.
track_compressed: Whether to also track summed compressed sizes.
"""
def __init__(self, paths, title, track_stripped=False,
track_compressed=False):
self.paths = paths
self.title = title
self.track_stripped = track_stripped
self.track_compressed = track_compressed
def __eq__(self, other):
"""Overrides the default implementation"""
if isinstance(other, _Group):
return (self.paths == other.paths) & (self.title == other.title) & (
self.track_stripped == other.track_stripped) & (
self.track_compressed == other.track_compressed)
return False
# Common artifacts in official builder lacros-arm32 and lacros64 in
# src-internal. The artifcts can be found in
# chromium/src-internal/testing/buildbot/archive/lacros64.json and
# chromium/src-internal/testing/buildbot/archive/lacros-arm32.json
# chromium/src-internal/testing/buildbot/archive/lacros-arm64.json
_TRACKED_GROUPS = [
_Group(paths=['chrome'],
title='File: chrome',
track_stripped=True,
track_compressed=True),
_Group(paths=['chrome_crashpad_handler'],
title='File: chrome_crashpad_handler'),
_Group(paths=['icudtl.dat'], title='File: icudtl.dat'),
_Group(paths=['icudtl.dat.hash'], title='File: icudtl.dat.hash'),
_Group(paths=['libEGL.so'], title='File: libEGL.so'),
_Group(paths=['libGLESv2.so'], title='File: libGLESv2.so'),
_Group(paths=['nacl_helper'], title='File: nacl_helper'),
_Group(paths=['resources.pak'], title='File: resources.pak'),
_Group(paths=[
'chrome_100_percent.pak', 'chrome_200_percent.pak',
'headless_lib_data.pak', 'headless_lib_strings.pak'
],
title='Group: Other PAKs'),
_Group(paths=['snapshot_blob.bin'], title='Group: Misc'),
_Group(paths=['locales/'], title='Dir: locales'),
_Group(paths=['WidevineCdm/'], title='Dir: WidevineCdm'),
]
def _visit_paths(base_dir, paths):
"""Itemizes files specified by a list of paths.
Args:
base_dir: Base directory for all elements in |paths|.
paths: A list of filenames or directory names to specify files whose sizes
to be counted. Directories are recursed. There's no de-duping effort.
Non-existing files or directories are ignored (with warning message).
"""
for path in paths:
full_path = os.path.join(base_dir, path)
if os.path.exists(full_path):
if os.path.isdir(full_path):
for dirpath, _, filenames in os.walk(full_path):
for filename in filenames:
yield os.path.join(dirpath, filename)
else: # Assume is file.
yield full_path
else:
logging.critical('Not found: %s', path)
def _is_probably_elf(filename):
"""Heuristically decides whether |filename| is ELF via magic signature."""
with open(filename, 'rb') as fh:
return fh.read(4) == '\x7FELF'
def _is_unstrippable_elf(filename):
"""Identifies known-unstrippable ELF files to denoise the system."""
return filename.endswith('.nexe') or filename.endswith('libwidevinecdm.so')
def _get_filesize(filename):
"""Returns the size of a file, or 0 if file is not found."""
try:
return os.path.getsize(filename)
except OSError:
logging.critical('Failed to get size: %s', filename)
return 0
def _get_gzipped_filesize(filename):
"""Returns the gzipped size of a file, or 0 if file is not found."""
BUFFER_SIZE = 65536
if not os.path.isfile(filename):
return 0
try:
# Call gzip externally instead of using gzip package since it's > 2x faster.
cmd = ['gzip', '-c', filename]
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
# Manually counting bytes instead of using len(p.communicate()[0]) to avoid
# buffering the entire compressed data (can be ~100 MB).
ret = 0
while True:
chunk = len(p.stdout.read(BUFFER_SIZE))
if chunk == 0:
break
ret += chunk
return ret
except OSError:
logging.critical('Failed to get gzipped size: %s', filename)
return 0
def _get_catagorized_filesizes(filename):
"""Measures |filename| sizes under various transforms.
Returns: A Counter (keyed by _Key_* constants) that stores measured sizes.
"""
sizes = collections.Counter()
sizes[_KEY_RAW] = _get_filesize(filename)
sizes[_KEY_GZIPPED] = _get_gzipped_filesize(filename)
# Pre-assign values for non-ELF, or in case of failure for ELF.
sizes[_KEY_STRIPPED] = sizes[_KEY_RAW]
sizes[_KEY_STRIPPED_GZIPPED] = sizes[_KEY_GZIPPED]
if _is_probably_elf(filename) and not _is_unstrippable_elf(filename):
try:
fd, temp_file = tempfile.mkstemp()
os.close(fd)
cmd = [EU_STRIP_PATH, filename, '-o', temp_file]
subprocess.check_output(cmd)
sizes[_KEY_STRIPPED] = _get_filesize(temp_file)
sizes[_KEY_STRIPPED_GZIPPED] = _get_gzipped_filesize(temp_file)
if sizes[_KEY_STRIPPED] > sizes[_KEY_RAW]:
# This weird case has been observed for libwidevinecdm.so.
logging.critical('Stripping made things worse for %s' % filename)
except subprocess.CalledProcessError:
logging.critical('Failed to strip file: %s' % filename)
finally:
os.unlink(temp_file)
return sizes
def _dump_chart_json(output_dir, chartjson):
"""Writes chart histogram to JSON files.
Output files:
results-chart.json contains the chart JSON.
perf_results.json contains histogram JSON for Catapult.
Args:
output_dir: Directory to place the JSON files.
chartjson: Source JSON data for output files.
"""
results_path = os.path.join(output_dir, 'results-chart.json')
logging.critical('Dumping chartjson to %s', results_path)
with open(results_path, 'w') as json_file:
json.dump(chartjson, json_file, indent=2)
# We would ideally generate a histogram set directly instead of generating
# chartjson then converting. However, perf_tests_results_helper is in
# //build, which doesn't seem to have any precedent for depending on
# anything in Catapult. This can probably be fixed, but since this doesn't
# need to be super fast or anything, converting is a good enough solution
# for the time being.
histogram_result = convert_chart_json.ConvertChartJson(results_path)
if histogram_result.returncode != 0:
raise Exception('chartjson conversion failed with error: ' +
histogram_result.stdout)
histogram_path = os.path.join(output_dir, 'perf_results.json')
logging.critical('Dumping histograms to %s', histogram_path)
with open(histogram_path, 'wb') as json_file:
json_file.write(histogram_result.stdout)
def _run_resource_sizes(args):
"""Main flow to extract and output size data."""
chartjson = _BASE_CHART.copy()
chartjson.update({
'benchmark_description':
('LaCrOS %s resource size information.' % args.arch)
})
report_func = perf_tests_results_helper.ReportPerfResult
total_sizes = collections.Counter()
def report_sizes(sizes, title, track_stripped, track_compressed):
report_func(chart_data=chartjson,
graph_title=title,
trace_title='size',
value=sizes[_KEY_RAW],
units='bytes')
if track_stripped:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped)',
trace_title='size',
value=sizes[_KEY_STRIPPED],
units='bytes')
if track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Gzipped)',
trace_title='size',
value=sizes[_KEY_GZIPPED],
units='bytes')
if track_stripped and track_compressed:
report_func(chart_data=chartjson,
graph_title=title + ' (Stripped, Gzipped)',
trace_title='size',
value=sizes[_KEY_STRIPPED_GZIPPED],
units='bytes')
tracked_groups = _TRACKED_GROUPS.copy()
# Architecture amd64 requires artifact nacl_irt_x86_64.nexe.
if args.arch == 'amd64':
tracked_groups.append(
_Group(paths=['nacl_irt_x86_64.nexe'],
title='File: nacl_irt_x86_64.nexe'))
# Architecture arm32 requires artifact nacl_irt_arm.nexe.
elif args.arch == 'arm32':
tracked_groups.append(
_Group(paths=['nacl_irt_arm.nexe'], title='File: nacl_irt_arm.nexe'))
tracked_groups.append(
_Group(paths=['nacl_helper_bootstrap'],
title='File: nacl_helper_bootstrap'))
# TODO(https://crbug.com/1356761): remove the following part once nacl files
# are available.
elif args.arch == 'arm64':
tracked_groups.remove(
_Group(paths=['nacl_helper'], title='File: nacl_helper'))
for g in tracked_groups:
sizes = sum(
map(_get_catagorized_filesizes, _visit_paths(args.out_dir, g.paths)),
collections.Counter())
report_sizes(sizes, g.title, g.track_stripped, g.track_compressed)
# Total compressed size is summed over individual compressed sizes, instead
# of concatanating first, then compress everything. This is done for
# simplicity. It also gives a conservative size estimate (assuming file
# metadata and overheads are negligible).
total_sizes += sizes
report_sizes(total_sizes, 'Total', True, True)
_dump_chart_json(args.output_dir, chartjson)
def main():
"""Parses arguments and runs high level flows."""
argparser = argparse.ArgumentParser(description='Writes LaCrOS size metrics.')
argparser.add_argument('--chromium-output-directory',
dest='out_dir',
required=True,
type=os.path.realpath,
help='Location of the build artifacts.')
argparser.add_argument('--arch',
required=True,
type=str,
help='The architecture of lacros, valid values: amd64,'
' arm32, arm64')
output_group = argparser.add_mutually_exclusive_group()
output_group.add_argument('--output-dir',
default='.',
help='Directory to save chartjson to.')
# Accepted to conform to the isolated script interface, but ignored.
argparser.add_argument('--isolated-script-test-filter',
help=argparse.SUPPRESS)
argparser.add_argument('--isolated-script-test-perf-output',
type=os.path.realpath,
help=argparse.SUPPRESS)
output_group.add_argument(
'--isolated-script-test-output',
type=os.path.realpath,
help='File to which results will be written in the simplified JSON '
'output format.')
args = argparser.parse_args()
isolated_script_output = {'valid': False, 'failures': []}
if args.isolated_script_test_output:
test_name = 'lacros_resource_sizes'
args.output_dir = os.path.join(
os.path.dirname(args.isolated_script_test_output), test_name)
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
try:
_run_resource_sizes(args)
isolated_script_output = {'valid': True, 'failures': []}
finally:
if args.isolated_script_test_output:
results_path = os.path.join(args.output_dir, 'test_results.json')
with open(results_path, 'w') as output_file:
json.dump(isolated_script_output, output_file)
with open(args.isolated_script_test_output, 'w') as output_file:
json.dump(isolated_script_output, output_file)
result_sink_client = result_sink.TryInitClient()
if result_sink_client:
status = result_types.PASS
if not isolated_script_output['valid']:
status = result_types.UNKNOWN
elif isolated_script_output['failures']:
status = result_types.FAIL
result_sink_client.Post(test_name, status, None, None, None)
if __name__ == '__main__':
main()
|
{
"content_hash": "cd6bbc4afe713bb29f695e1c16808399",
"timestamp": "",
"source": "github",
"line_count": 394,
"max_line_length": 80,
"avg_line_length": 35.888324873096444,
"alnum_prop": 0.6515558698727015,
"repo_name": "chromium/chromium",
"id": "6004ae7f8832680dfbefd471579b2b07ccc08c3a",
"size": "14303",
"binary": false,
"copies": "7",
"ref": "refs/heads/main",
"path": "build/lacros/lacros_resource_sizes.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [],
"symlink_target": ""
}
|
"""
Talon.One API
Use the Talon.One API to integrate with your application and to manage applications and campaigns: - Use the operations in the [Integration API section](#integration-api) are used to integrate with our platform - Use the operation in the [Management API section](#management-api) to manage applications and campaigns. ## Determining the base URL of the endpoints The API is available at the same hostname as your Campaign Manager deployment. For example, if you are reading this page at `https://mycompany.talon.one/docs/api/`, the URL for the [updateCustomerSession](https://docs.talon.one/integration-api/#operation/updateCustomerSessionV2) endpoint is `https://mycompany.talon.one/v2/customer_sessions/{Id}` # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from talon_one.configuration import Configuration
class CouponRejectionReason(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'campaign_id': 'int',
'coupon_id': 'int',
'reason': 'str'
}
attribute_map = {
'campaign_id': 'campaignId',
'coupon_id': 'couponId',
'reason': 'reason'
}
def __init__(self, campaign_id=None, coupon_id=None, reason=None, local_vars_configuration=None): # noqa: E501
"""CouponRejectionReason - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._campaign_id = None
self._coupon_id = None
self._reason = None
self.discriminator = None
self.campaign_id = campaign_id
self.coupon_id = coupon_id
self.reason = reason
@property
def campaign_id(self):
"""Gets the campaign_id of this CouponRejectionReason. # noqa: E501
:return: The campaign_id of this CouponRejectionReason. # noqa: E501
:rtype: int
"""
return self._campaign_id
@campaign_id.setter
def campaign_id(self, campaign_id):
"""Sets the campaign_id of this CouponRejectionReason.
:param campaign_id: The campaign_id of this CouponRejectionReason. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and campaign_id is None: # noqa: E501
raise ValueError("Invalid value for `campaign_id`, must not be `None`") # noqa: E501
self._campaign_id = campaign_id
@property
def coupon_id(self):
"""Gets the coupon_id of this CouponRejectionReason. # noqa: E501
:return: The coupon_id of this CouponRejectionReason. # noqa: E501
:rtype: int
"""
return self._coupon_id
@coupon_id.setter
def coupon_id(self, coupon_id):
"""Sets the coupon_id of this CouponRejectionReason.
:param coupon_id: The coupon_id of this CouponRejectionReason. # noqa: E501
:type: int
"""
if self.local_vars_configuration.client_side_validation and coupon_id is None: # noqa: E501
raise ValueError("Invalid value for `coupon_id`, must not be `None`") # noqa: E501
self._coupon_id = coupon_id
@property
def reason(self):
"""Gets the reason of this CouponRejectionReason. # noqa: E501
:return: The reason of this CouponRejectionReason. # noqa: E501
:rtype: str
"""
return self._reason
@reason.setter
def reason(self, reason):
"""Sets the reason of this CouponRejectionReason.
:param reason: The reason of this CouponRejectionReason. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and reason is None: # noqa: E501
raise ValueError("Invalid value for `reason`, must not be `None`") # noqa: E501
allowed_values = ["CouponNotFound", "CouponPartOfNotRunningCampaign", "CouponLimitReached", "CampaignLimitReached", "ProfileLimitReached", "CouponRecipientDoesNotMatch", "CouponExpired", "CouponStartDateInFuture", "CouponRejectedByCondition", "EffectCouldNotBeApplied"] # noqa: E501
if self.local_vars_configuration.client_side_validation and reason not in allowed_values: # noqa: E501
raise ValueError(
"Invalid value for `reason` ({0}), must be one of {1}" # noqa: E501
.format(reason, allowed_values)
)
self._reason = reason
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, CouponRejectionReason):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, CouponRejectionReason):
return True
return self.to_dict() != other.to_dict()
|
{
"content_hash": "b98d6979f8993e6df917f450d173a956",
"timestamp": "",
"source": "github",
"line_count": 179,
"max_line_length": 732,
"avg_line_length": 36.201117318435756,
"alnum_prop": 0.6128086419753086,
"repo_name": "talon-one/talon_one.py",
"id": "4fa25bb17b548ae3ddce0552b1c7b84cc809300c",
"size": "6497",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "talon_one/models/coupon_rejection_reason.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Makefile",
"bytes": "238"
},
{
"name": "Python",
"bytes": "5139586"
},
{
"name": "Shell",
"bytes": "1826"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('ivrs', '0011_auto_20150716_2255'),
]
operations = [
migrations.AddField(
model_name='state',
name='is_processed',
field=models.BooleanField(default=False),
preserve_default=True,
),
]
|
{
"content_hash": "e80178860530f80e648c286c2dbb6626",
"timestamp": "",
"source": "github",
"line_count": 19,
"max_line_length": 53,
"avg_line_length": 21.894736842105264,
"alnum_prop": 0.5889423076923077,
"repo_name": "klpdotorg/dubdubdub",
"id": "45f4f63a99fbc47978f363014c2296e3b22e4901",
"size": "440",
"binary": false,
"copies": "1",
"ref": "refs/heads/develop",
"path": "apps/ivrs/migrations/0012_state_is_processed.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Batchfile",
"bytes": "478"
},
{
"name": "CSS",
"bytes": "335110"
},
{
"name": "HTML",
"bytes": "655218"
},
{
"name": "JavaScript",
"bytes": "1941014"
},
{
"name": "PLpgSQL",
"bytes": "156345"
},
{
"name": "Python",
"bytes": "920256"
},
{
"name": "Shell",
"bytes": "10544"
}
],
"symlink_target": ""
}
|
""" colr_run.py
Run a subprocess, but don't let it output to stdout. Instead, capture it
and show an animation. Stdout output from the process goes straight to
/dev/null. Stderr too, if the -e option is given on the command line.
-Christopher Welborn 07-08-2019
"""
import os
import subprocess
import sys
from .base import __version__ as colr_version
from .colr import (
Colr as C,
auto_disable as colr_auto_disable,
)
from .progress import AnimatedProgress
from .progress_frames import (
_build_rainbow_variants,
Frames,
)
try:
from .colr_docopt import docopt
except ImportError as ex:
if 'docopt' in ex.name.lower():
print(
'Import error while import colr_docopt: {}'.format(ex),
file=sys.stderr,
)
print(
'\nYou must have Docopt installed to use this tool.',
file=sys.stderr,
)
print('Install it with: `pip install docopt`', file=sys.stderr)
else:
print('Cannot import docopt!: {}'.format(ex), file=sys.stderr)
sys.exit(1)
_build_rainbow_variants(Frames)
colr_auto_disable()
NAME = 'Colr Run'
VERSION = '0.0.1'
VERSIONSTR = '{} v. {}'.format(NAME, VERSION)
SCRIPT = os.path.split(os.path.abspath(sys.argv[0]))[1]
SCRIPTDIR = os.path.abspath(sys.path[0])
# Default delay, in seconds, between animation frames.
DEFAULT_DELAY = 0.3
USAGESTR = """{versionstr} (Colr v. {colr_version})
Runs a program, captures/silences stdout, and prints an animation instead.
Usage:
{script} -h | -v
{script} -l
{script} [-a] [-e] [-d secs] [-f name] [-m msg] -- ARGS...
Options:
ARGS : Command and arguments to run.
-a,--append : Append command to message.
-d secs,--delay secs : Delay, in seconds, between animation frames.
Default: {delay}s
-e,--stderr : Capture stderr also.
-f name,--frames name : Name of a frame set to use.
Use -l to list known names.
-h,--help : Show this help message.
-l,--listframes : List available animated frames names.
-m msg,--msg msg : Message to display.
-v,--version : Show version.
Basic Example:
To run a program with the default settings, -- is still required:
{script} -- bash -c 'x=0; while ((x<1000000)); do let x+=1; done'
Any output from the program will ruin the animation. You can silence
stderr output with -e if you don't need it:
{script} -e -- some-long-running-command
Exit Status:
The exit status of {script} is the exit status of the command being
executed. For {script} errors, the exit status is 1 for basic errors,
and 2 for cancelled commands.
""".format(
script=SCRIPT,
versionstr=VERSIONSTR,
colr_version=colr_version,
delay=DEFAULT_DELAY,
)
def main(argd):
""" Main entry point, expects docopt arg dict as argd. """
if argd['--listframes']:
return list_frames()
try:
frameset = Frames.get_by_name(argd['--frames'] or 'dots_rainbow')
except ValueError:
raise InvalidArg('not a known frame set: {}'.format(argd['--frames']))
return run_cmd(
argd['ARGS'],
delay=parse_float(argd['--delay'], default=DEFAULT_DELAY, minimum=0.1),
msg=argd['--msg'],
frameset=frameset,
append=argd['--append'],
stderr=argd['--stderr'],
)
def list_frames():
""" List all available frames names. """
# Filter colored frames.
names = Frames.names()
basicnames = [
s for s in names
if not getattr(Frames, s).has_codes()
]
print('\nAvailable Frames ({}):\n '.format(len(basicnames)), end='')
print('\n '.join(basicnames))
colored = sorted(set(
name.rpartition('_')[-1]
for name in names
if (name not in basicnames) and (not name.endswith('_rgb'))
))
print(
'\nColor variants available ({}):\n '.format(len(colored)),
end='',
)
print('\n '.join(
'<name>_{}'.format(s)
for s in colored
))
return 0
def parse_float(s, default=None, minimum=None):
""" Parse a string as a float. If the argument is "falsey", `default` is
returned. On error, an InvalidArg is raised.
Arguments:
s : String to parse.
default : Default value, when a "falsey" argument is given.
minimum : Minimum accepted value. InvalidArg is raised if this is
set and the value is less than `minimum`.
"""
if not s:
return default
try:
f = float(s)
except ValueError:
raise InvalidArg('not a float/number: {}'.format(s))
if (minimum is not None) and (f < minimum):
raise InvalidArg(
'below minimum value, {:.2f}: {:.2f}'.format(minimum, f)
)
return f
def print_err(*args, **kwargs):
""" A wrapper for print() that uses stderr by default.
Colorizes messages, unless a Colr itself is passed in.
"""
if kwargs.get('file', None) is None:
kwargs['file'] = sys.stderr
# Use color if the file is a tty.
if kwargs['file'].isatty():
# Keep any Colr args passed, convert strs into Colrs.
msg = kwargs.get('sep', ' ').join(
str(a) if isinstance(a, C) else str(C(a, 'red'))
for a in args
)
else:
# The file is not a tty anyway, no escape codes.
msg = kwargs.get('sep', ' ').join(
str(a.stripped() if isinstance(a, C) else a)
for a in args
)
print(msg, **kwargs)
def run_cmd(
args, msg=None, delay=None,
frameset=None, append=False, stderr=False):
""" Run a command, but capture it's stdout. Show an animation instead.
"""
cmdstr = ' '.join(args)
if msg:
if append:
msg = f'{msg}: {cmdstr}'
else:
msg = cmdstr
p = AnimatedProgress(
msg,
delay=DEFAULT_DELAY if delay is None else delay,
frames=(frameset or Frames.dots_rainbow).prepend(' '),
show_time=True,
auto_disable=True,
)
p.start()
try:
ret = subprocess.check_call(
args,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL if stderr else None,
)
except subprocess.CalledProcessError:
ret = 1
p.stop()
return ret
class InvalidArg(ValueError):
""" Raised when the user has used an invalid argument. """
def __init__(self, msg=None):
self.msg = msg or ''
def __str__(self):
if self.msg:
return f'Invalid argument, {self.msg}'
return 'Invalid argument!'
def entry_point():
""" Wrapper for if __name__ == '__main__', for setuptools console scripts.
"""
try:
mainret = main(docopt(USAGESTR, version=VERSIONSTR, script=SCRIPT))
except InvalidArg as ex:
print_err(ex)
mainret = 1
except (EOFError, KeyboardInterrupt):
print_err('\nUser cancelled.\n')
mainret = 2
except BrokenPipeError:
print_err('\nBroken pipe, input/output was interrupted.\n')
mainret = 3
sys.exit(mainret)
if __name__ == '__main__':
entry_point()
|
{
"content_hash": "78f366ef5c28a766218c4bfbb426f565",
"timestamp": "",
"source": "github",
"line_count": 247,
"max_line_length": 79,
"avg_line_length": 30.06477732793522,
"alnum_prop": 0.5741987611096149,
"repo_name": "welbornprod/colr",
"id": "17826c30705cd81fecf0d058bb9571f7eff2af99",
"size": "7474",
"binary": false,
"copies": "1",
"ref": "refs/heads/dev",
"path": "colr/colr_run.py",
"mode": "33261",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "550066"
},
{
"name": "Shell",
"bytes": "5797"
}
],
"symlink_target": ""
}
|
from django.contrib.auth.models import AnonymousUser
from .models import *
from django.test import TestCase
from django.db import transaction
import reversion
TEST_USER_NAME_CREATOR = 'test project creator'
TEST_USER_NAME_NOT_MEMBER = 'user is not a member'
TEST_PROJECT_PUBLIC_NAME = 'test project name public'
TEST_PROJECT_PRIVATE_NAME = 'test project name private'
TEST_PROJECT_TASK_FULLNAME = 'Test project task'
TEST_PROJECT_TASK_DESCRIPTION = '<strong>A</strong>'
def get_public_project():
return Project.objects.get(fullname=TEST_PROJECT_PUBLIC_NAME)
def get_private_project():
return Project.objects.get(fullname=TEST_PROJECT_PRIVATE_NAME)
def get_creator_user():
return User.objects.get( username = TEST_USER_NAME_CREATOR )
def get_user_not_member():
return User.objects.get( username = TEST_USER_NAME_NOT_MEMBER )
def create_task():
test_project = get_public_project()
user_creator = get_creator_user()
task = Task( project=test_project, fullname=TEST_PROJECT_TASK_FULLNAME, description = TEST_PROJECT_TASK_DESCRIPTION )
task.set_change_user(user_creator)
task.save()
return task
class Project_Test(TestCase):
def setUp(self):
user_creator = User.objects.create_user( username = TEST_USER_NAME_CREATOR, password = '-' )
user_creator.save()
user_not_member = User.objects.create_user( username = TEST_USER_NAME_NOT_MEMBER, password = '-' )
user_not_member.save()
test_project_public = Project(fullname=TEST_PROJECT_PUBLIC_NAME)
test_project_public.set_change_user(user_creator)
test_project_public.private_type = PROJECT_VISIBLE_VISIBLE
test_project_public.save()
test_project_private = Project(fullname=TEST_PROJECT_PRIVATE_NAME)
test_project_private.set_change_user(user_creator)
test_project_private.private_type = PROJECT_VISIBLE_PRIVATE
test_project_private.save()
def test_have_repo_false(self):
test_project = get_public_project()
self.assertEqual( test_project.have_repo(), False )
def test_creator_is_member(self):
test_project = get_public_project()
user_creator = get_creator_user()
self.assertEqual( test_project.is_member(user_creator), True )
def test_not_member_is_member_False(self):
test_project = get_public_project()
user_not_member = get_user_not_member()
self.assertEqual( test_project.is_member(user_not_member), False )
def test_creator_is_member_None(self):
test_project = get_public_project()
self.assertEqual( test_project.is_member(None), False )
def test_creator_is_admin(self):
test_project = get_public_project()
user_creator = get_creator_user()
self.assertEqual( test_project.is_admin(user_creator), True )
def test_anon_is_NOT_admin(self):
test_project = get_public_project()
user_anon = AnonymousUser()
self.assertEqual( test_project.is_admin(user_anon), False )
def test_creator_can_admin(self):
test_project = get_public_project()
user_creator = get_creator_user()
self.assertEqual( test_project.can_admin(user_creator), True )
def test_creator_acl_admin(self):
test_project = get_public_project()
user_creator = get_creator_user()
self.assertEqual( test_project.user_access_level(user_creator), PROJECT_ACCESS_ADMIN )
def test_none_user_acl_admin_public(self):
test_project = get_public_project()
self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_VIEW )
def test_none_user_acl_admin_private(self):
test_project = get_private_project()
self.assertEqual( test_project.user_access_level( None ), PROJECT_ACCESS_NONE )
def test_public_project_list(self):
pl = GetAllPublicProjectList()
self.assertEqual( get_public_project() in pl, True )
self.assertEqual( pl.count(), 1 )
def test_GetMemberedProjectList_None(self):
self.assertEqual( GetMemberedProjectList( None ), {} )
def test_GetMemberedProjectList_NotMember(self):
self.assertEqual( GetMemberedProjectList( get_user_not_member() ).count(), 0 )
def test_GetMemberedProjectList_Creator(self):
self.assertEqual( GetMemberedProjectList( get_creator_user() ).count(), 2 )
def test_GetFullMemberAdminList_public(self):
self.assertEqual( get_public_project().GetFullMemberAdminList().count(), 1 )
def test_member_User_can_join_public( self ):
self.assertEqual( get_public_project().can_join( get_creator_user() ), False )
def test_member_User_can_join_private( self ):
self.assertEqual( get_private_project().can_join( get_creator_user() ), False )
def test_not_member_User_can_join_public( self ):
self.assertEqual( get_public_project().can_join( get_user_not_member() ), True )
def test_not_member_User_can_join_private( self ):
self.assertEqual( get_private_project().can_join( get_user_not_member() ), False )
def test_Create_Task_check_fullname( self ):
test_task = create_task()
self.assertEqual( test_task.fullname, TEST_PROJECT_TASK_FULLNAME )
def test_Create_Task_check_state( self ):
test_task = create_task()
self.assertEqual( test_task.state, TASK_STATE_NEW )
def test_Create_Task_check_Description( self ):
test_task = create_task()
self.assertEqual( test_task.description_html(), TEST_PROJECT_TASK_DESCRIPTION )
def test_task_Add_Comment( self ):
test_task = create_task()
user_creator = get_creator_user()
c = TaskComment( parenttask = test_task, comment='123' )
c.set_change_user(user_creator)
c.save()
self.assertEqual( c.comment, '123' )
self.assertEqual( GetTaskCommentators( test_task ).count(), 1 )
self.assertEqual( GetTaskCommentators( test_task, user_creator ).count(), 0 )
def test_Task_Set_Wrong_State( self ):
test_task = create_task()
with self.assertRaises(Exception):
test_task.set_task_state( get_creator_user(), -10202 ) # some imposible state
def test_Task_Set_Closed_State( self ):
test_task = create_task()
test_task.set_task_state( get_creator_user(), TASK_STATE_CLOSED )
self.assertEqual( test_task.state, TASK_STATE_CLOSED )
self.assertFalse( test_task.get_opened() )
self.assertEqual( test_task.get_state_name(), 'Closed' )
self.assertIsNotNone( test_task.finished_fact_at )
test_task.set_task_state( get_creator_user(), TASK_STATE_NEW )
self.assertEqual( test_task.state, TASK_STATE_NEW )
self.assertIsNone( test_task.finished_fact_at )
self.assertTrue( test_task.get_opened() )
self.assertEqual( test_task.get_state_name(), 'New' )
def test_assignee( self ):
t = Get_User_Tasks( None )
self.assertEqual( t.count(), 0 )
test_task = create_task()
self.assertEqual( test_task.get_profiles().count(), 0 )
tp = TaskProfile( parenttask = test_task, profile = get_creator_user().profile )
tp.set_change_user( get_creator_user() )
for p in TASK_PROFILE_PRIORITY_LIST:
tp.priority = p
tp.save()
self.assertEqual( tp.priority, p )
with self.assertRaises(Exception):
tp.priority = max( TASK_PROFILE_PRIORITY_LIST ) + 1
tp.save()
def test_linked_resource(self):
test_task = create_task()
self.assertEqual( test_task.get_profiles().count(), 0 ) # new task has no profiles linked
p = Get_Profiles_Available2Task( test_task.id )
self.assertEqual( p.count(), 1 ) # 1 profile is available
self.assertEqual( p.filter( user = get_creator_user() ).count(), 1 ) # creator_user profile is available
def test_sub_project_default_false(self):
test_project = get_public_project()
self.assertEqual( test_project.use_sub_projects, False )
class Project_Set_Wrong_Private_Type_Test(TestCase):
def test_Project_Set_Wrong_Private_Type( self ):
user_creator = User.objects.create_user( username = TEST_USER_NAME_CREATOR, password = '-' )
user_creator.save()
user_not_member = User.objects.create_user( username = TEST_USER_NAME_NOT_MEMBER, password = '-' )
user_not_member.save()
with self.assertRaises(Exception):
test_project_public = Project(fullname=TEST_PROJECT_PUBLIC_NAME)
test_project_public.set_change_user(user_creator)
test_project_public.private_type = -1111
test_project_public.save()
|
{
"content_hash": "1168a3c039a178fe6298be9e1a0aa796",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 121,
"avg_line_length": 40.23041474654378,
"alnum_prop": 0.6624284077892325,
"repo_name": "postpdm/ich_bau",
"id": "6cb5a7244cfc5d99ee28d9cc3baddfe64de668d5",
"size": "8730",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "project/test_project.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "HTML",
"bytes": "59357"
},
{
"name": "Python",
"bytes": "195507"
}
],
"symlink_target": ""
}
|
"""Tests for imfusion.expression.test module."""
# pylint: disable=wildcard-import,redefined-builtin,unused-wildcard-import
from __future__ import absolute_import, division, print_function
from builtins import *
# pylint: enable=wildcard-import,redefined-builtin,unused-wildcard-import
import pytest
from imfusion.expression.counts import read_exon_counts
from imfusion.expression import test
from imfusion.model import Insertion
from imfusion.util.frozendict import frozendict
# pylint: disable=no-self-use,redefined-outer-name
@pytest.fixture
def insertions():
"""Example insertions in ENSMUSG00000051951 (5 exons)."""
return [
# After third exon.
Insertion(
id='1',
seqname='1',
position=3207327,
strand=1,
support_junction=1,
support_spanning=1,
support=2,
metadata=frozendict({
'gene_id': 'ENSMUSG00000051951',
'sample': 'S1'})),
# After first exon.
Insertion(
id='2',
seqname='1',
position=3214491,
strand=-1,
support_junction=1,
support_spanning=1,
support=2,
metadata=frozendict({
'gene_id': 'ENSMUSG00000051951',
'sample': 'S2'}))
] # yapf: disable
@pytest.fixture
def exon_counts():
"""Example exon counts."""
file_path = pytest.helpers.data_path(
'exon_counts.txt', relative_to=__file__)
return read_exon_counts(file_path)
class TestSplitCounts(object):
"""Tests for split_counts function."""
def test_proper_example(self, insertions, exon_counts):
"""Tests example with two insertions, after exons 2 and 4."""
before, after, dropped = test.split_counts(
exon_counts, insertions, gene_id='ENSMUSG00000051951')
assert len(before) == 1
assert len(after) == 2
assert dropped == set()
def test_proper_example_df(self, insertions, exon_counts):
"""Tests same example, using dataframe input for insertions."""
insertion_df = Insertion.to_frame(insertions)
before, after, dropped = test.split_counts(
exon_counts, insertion_df, gene_id='ENSMUSG00000051951')
assert len(before) == 1
assert len(after) == 2
assert dropped == set()
def test_before_gene(self, insertions, exon_counts):
"""Tests dropping sample with an insertion before the gene"""
insertions[1] = insertions[1]._replace(position=3215652)
before, after, dropped = test.split_counts(
exon_counts, insertions, gene_id='ENSMUSG00000051951')
assert len(before) == 3
assert len(after) == 2
assert dropped == {'S2'}
def test_after_gene(self, insertions, exon_counts):
"""Tests dropping sample with an insertion after the gene"""
insertions[0] = insertions[0]._replace(position=3205801)
before, after, dropped = test.split_counts(
exon_counts, insertions, gene_id='ENSMUSG00000051951')
assert len(before) == 1
assert len(after) == 4
assert dropped == {'S1'}
def test_in_exon(self, insertions, exon_counts):
"""Tests insertion in fourth exon."""
insertions[0] = insertions[0]._replace(position=3207217)
before, after, dropped = test.split_counts(
exon_counts, insertions, gene_id='ENSMUSG00000051951')
assert len(before) == 1
assert len(after) == 1
assert dropped == set()
@pytest.fixture
def test_insertions():
"""Example insertions for Trp53bp2 and Nf1."""
file_path = pytest.helpers.data_path(
'insertions.txt', relative_to=__file__)
return list(Insertion.from_csv(file_path, sep='\t'))
@pytest.fixture
def test_exon_counts():
"""Example exon counts for Trp53bp2 and Nf1."""
file_path = pytest.helpers.data_path(
'exon_counts_test.txt', relative_to=__file__)
return read_exon_counts(file_path)
class TestTestDeExon(object):
"""Tests for test_de function."""
def test_pos_example_trp53bp2(self, test_insertions, test_exon_counts):
"""Tests positive example of DE in Trp53bp2 in the SB dataset."""
result = test.test_de_exon(
test_insertions, test_exon_counts, gene_id='ENSMUSG00000026510')
assert result.p_value < 0.01
assert result.direction == 1
def test_pos_example_trp53bp2_df(self, test_insertions, test_exon_counts):
"""Tests positive example of DE in Trp53bp2 with dataframe input."""
test_insertion_df = Insertion.to_frame(test_insertions)
result = test.test_de_exon(
test_insertion_df, test_exon_counts, gene_id='ENSMUSG00000026510')
assert result.p_value < 0.01
assert result.direction == 1
def test_neg_example_nf1(self, test_insertions, test_exon_counts):
"""Tests negative example of DE in Nf1 in the SB dataset."""
result = test.test_de_exon(
test_insertions, test_exon_counts, gene_id='ENSMUSG00000020716')
assert result.p_value > 0.05
assert result.direction == 1
def test_plot_boxplot(self, test_insertions, test_exon_counts):
"""Exercises plotting of boxplot."""
result = test.test_de_exon(
test_insertions, test_exon_counts, gene_id='ENSMUSG00000026510')
result.plot_boxplot()
def test_plot_sums(self, test_insertions, test_exon_counts):
"""Exercises plotting of sums."""
result = test.test_de_exon(
test_insertions, test_exon_counts, gene_id='ENSMUSG00000026510')
result.plot_sums()
|
{
"content_hash": "3ac655d7734e45e06dcd11a620c595eb",
"timestamp": "",
"source": "github",
"line_count": 184,
"max_line_length": 78,
"avg_line_length": 31.13586956521739,
"alnum_prop": 0.6250654564496422,
"repo_name": "jrderuiter/im-fusion",
"id": "dc53f8cf492256197a4fca3b32284c11a690eead",
"size": "5753",
"binary": false,
"copies": "2",
"ref": "refs/heads/develop",
"path": "tests/imfusion/expression/test_test.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Python",
"bytes": "295577"
},
{
"name": "Shell",
"bytes": "69"
}
],
"symlink_target": ""
}
|
if __name__ == '__main__':
import coverage
cov = coverage.Coverage()
cov.start()
# Import required modules after coverage starts
import sys
import pytest
# Call pytest and exit with the return code from pytest so that
# CI systems will fail if tests fail.
ret = pytest.main(sys.argv[1:])
cov.stop()
cov.save()
# Save HTML coverage report to disk
cov.html_report()
# Emit coverage report to stdout
cov.report()
sys.exit(ret)
|
{
"content_hash": "5cf1babf1deee06096a656fb62877fc6",
"timestamp": "",
"source": "github",
"line_count": 21,
"max_line_length": 67,
"avg_line_length": 23.428571428571427,
"alnum_prop": 0.6321138211382114,
"repo_name": "MaxPoint/spylon",
"id": "db8f9aef05cfd2351710d51c390b1b4c7b07aa1d",
"size": "514",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "run_tests.py",
"mode": "33261",
"license": "bsd-3-clause",
"language": [
{
"name": "Python",
"bytes": "152813"
}
],
"symlink_target": ""
}
|
import _plotly_utils.basevalidators
class TicksValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="ticks", parent_name="layout.scene.yaxis", **kwargs):
super(TicksValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "plot"),
role=kwargs.pop("role", "style"),
values=kwargs.pop("values", ["outside", "inside", ""]),
**kwargs
)
|
{
"content_hash": "50c9105987d94018946d193aead9ad55",
"timestamp": "",
"source": "github",
"line_count": 13,
"max_line_length": 88,
"avg_line_length": 39.92307692307692,
"alnum_prop": 0.5973025048169557,
"repo_name": "plotly/python-api",
"id": "ab4cafbea5905bd2c5ea9d502555daa9fcb020c5",
"size": "519",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "packages/python/plotly/plotly/validators/layout/scene/yaxis/_ticks.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "JavaScript",
"bytes": "6870"
},
{
"name": "Makefile",
"bytes": "1708"
},
{
"name": "Python",
"bytes": "823245"
},
{
"name": "Shell",
"bytes": "3238"
}
],
"symlink_target": ""
}
|
from genshi.core import START, END, XML_NAMESPACE, DOCTYPE, TEXT, \
START_NS, END_NS, START_CDATA, END_CDATA, PI, COMMENT
from genshi.output import NamespaceFlattener
import _base
from html5lib.constants import voidElements
class TreeWalker(_base.TreeWalker):
def __iter__(self):
depth = 0
ignore_until = None
previous = None
for event in NamespaceFlattener(prefixes={
'http://www.w3.org/1999/xhtml': ''
})(self.tree):
if previous is not None:
if previous[0] == START:
depth += 1
if ignore_until <= depth:
ignore_until = None
if ignore_until is None:
for token in self.tokens(previous, event):
yield token
if token["type"] == "EmptyTag":
ignore_until = depth
if previous[0] == END:
depth -= 1
previous = event
if previous is not None:
if ignore_until is None or ignore_until <= depth:
for token in self.tokens(previous, None):
yield token
elif ignore_until is not None:
raise ValueError("Illformed DOM event stream: void element without END_ELEMENT")
def tokens(self, event, next):
kind, data, pos = event
if kind == START:
tag, attrib = data
if tag in voidElements:
for token in self.emptyTag(tag, list(attrib), \
not next or next[0] != END or next[1] != tag):
yield token
else:
yield self.startTag(tag, list(attrib))
elif kind == END:
if data not in voidElements:
yield self.endTag(data)
elif kind == COMMENT:
yield self.comment(data)
elif kind == TEXT:
for token in self.text(data):
yield token
elif kind == DOCTYPE:
yield self.doctype(*data)
elif kind in (XML_NAMESPACE, DOCTYPE, START_NS, END_NS, \
START_CDATA, END_CDATA, PI):
pass
else:
yield self.unknown(kind)
|
{
"content_hash": "55006566b06732102e6acbc65e32efb2",
"timestamp": "",
"source": "github",
"line_count": 67,
"max_line_length": 96,
"avg_line_length": 33.6865671641791,
"alnum_prop": 0.5104120513956579,
"repo_name": "dewitt/webfingerclient-dclinton",
"id": "fd02a6345489fe9ef4c9e3d395041ec2ef90457b",
"size": "2257",
"binary": false,
"copies": "4",
"ref": "refs/heads/master",
"path": "third_party/html5lib/treewalkers/genshistream.py",
"mode": "33188",
"license": "apache-2.0",
"language": [
{
"name": "CSS",
"bytes": "3310"
},
{
"name": "Protocol Buffer",
"bytes": "3820"
},
{
"name": "Python",
"bytes": "35832"
}
],
"symlink_target": ""
}
|
from __future__ import print_function, absolute_import
import warnings
from contextlib import contextmanager
from numba.tests.support import override_config, TestCase
from numba.cuda.testing import skip_on_cudasim
from numba import unittest_support as unittest
from numba import cuda, types
from numba.cuda.testing import SerialMixin
@skip_on_cudasim("Skipped on simulator")
class TestCudaDebugInfo(SerialMixin, TestCase):
"""Tests features that will be deprecated
"""
@contextmanager
def assert_deprecation_warning(self):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
yield w
def test_autotune(self):
@cuda.jit("(int32[:],)")
def foo(xs):
xs[0] = 1
with self.assert_deprecation_warning() as w:
foo.autotune
assert len(w) == 1
assert issubclass(w[-1].category, DeprecationWarning)
assert ".autotune" in str(w[-1].message)
with self.assert_deprecation_warning() as w:
foo.occupancy
assert len(w) == 2
assert issubclass(w[0].category, DeprecationWarning)
assert ".occupancy" in str(w[0].message)
assert issubclass(w[1].category, DeprecationWarning)
assert ".autotune" in str(w[1].message)
if __name__ == '__main__':
unittest.main()
|
{
"content_hash": "c6758d9aaadfb7c9b5c296deafeb8208",
"timestamp": "",
"source": "github",
"line_count": 44,
"max_line_length": 65,
"avg_line_length": 31.65909090909091,
"alnum_prop": 0.6439339554917445,
"repo_name": "jriehl/numba",
"id": "f3888425020a6019dbc4523bc9c2ecb42292c4eb",
"size": "1393",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "numba/cuda/tests/cudapy/test_deprecation.py",
"mode": "33188",
"license": "bsd-2-clause",
"language": [
{
"name": "Batchfile",
"bytes": "7023"
},
{
"name": "C",
"bytes": "657637"
},
{
"name": "C++",
"bytes": "49158"
},
{
"name": "Cuda",
"bytes": "214"
},
{
"name": "GDB",
"bytes": "101"
},
{
"name": "HTML",
"bytes": "3464"
},
{
"name": "Jupyter Notebook",
"bytes": "110326"
},
{
"name": "Python",
"bytes": "6611899"
},
{
"name": "Shell",
"bytes": "7290"
}
],
"symlink_target": ""
}
|
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
# pandas display options
pd.options.display.max_rows = 100
pd.options.display.max_columns = 25
pd.options.display.width = 1000
one_day = 1
one_week = 5
one_month = 21
one_quarter = 63
learning_rate = 0.0001
training_epochs = 1
# data to use in training and predictions
#features = ['Close', 'Volume', 'dx', 'd2x', 'momentum'] # today's close
# read in file
def read_data(file_name):
stock = pd.read_csv(file_name, parse_dates=True, index_col=0) # 31747 days of data
n_samples = len(stock)
# want to predict future prices, these are the target values using in training the model
stock['next_week'] = stock['Close'].shift(one_week)
stock['next_month'] = stock['Close'].shift(one_month)
stock['next_quarter'] = stock['Close'].shift(one_quarter)
# scale
stock['Open'] = (stock['Open'] - stock['Open'].min()) / stock['Open'].max()
# add in useful things
stock['Velocity'] = stock['Open'] - stock['Open'].shift(1)
stock['Acceleration'] = stock['Velocity'] - stock['Velocity'].shift(1)
stock['Momentum'] = stock['Open'] * stock['Velocity']
stock['Energy'] = stock['Open'] * stock['Velocity'] * stock['Velocity']
stock['Force'] = stock['Open'] * stock['Acceleration']
stock['Elastic'] = stock['Open'] * stock['Open']
stock['VelocityAbs'] = stock['Velocity'].abs()
stock['AccelerationAbs'] = stock['Acceleration'].abs()
stock['MomentumAbs'] = stock['Momentum'].abs()
stock['EnergyAbs'] = stock['Energy'].abs()
stock['ForceAbs'] = stock['Force'].abs()
stock['ElasticAbs'] = stock['Elastic'].abs()
# ditch samples with NAN values
stock = stock.dropna(axis=0)
# flip order from newest to oldest to oldest to newest
stock = stock.iloc[::-1]
# shuffle data
#stock = stock.sample(frac=1)
# split data set into training and holdout
# hold out all dates > 1/1/2017
hold_out_stock = stock.loc[stock.index > '01-01-2016']
# test and train on 1/1/85-12/31/2016
train_stock = stock.loc[stock.index > '01-01-1985']
train_stock = stock.loc[stock.index < '12-31-2015']
# all stock is needed to walk back dates for testing hold out data
return stock, train_stock, hold_out_stock
#############################################################################################
#############################################################################################
# split into train, test, predict
print("Training scores:")
print("DJIA")
dj_stock, dj_train, dj_hold_out = read_data('data/djia.csv')
print("S&P")
sp_stock, sp_train, sp_hold_out = read_data('data/S&P.csv')
print("Russell")
r_stock, r_train, r_hold_out = read_data('data/Russell2000.csv')
print("NASDAQ")
n_stock, n_train, n_hold_out = read_data('data/nasdaq.csv')
#############################################################################################
# check correlations
def check_features():
print("**************** Correlations ********************************")
features = ['Open', 'Volume', 'next_week', 'next_month', 'next_quarter', 'Velocity', 'Momentum', 'Energy', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']
#features = ['Open', 'Volume', 'Energy', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']
correlations = dj_stock[features].corr()
print(correlations[['Open', 'next_week', 'next_month', 'next_quarter']])
# ditch features that don't effect the future prices
check_features()
# use correlations function below to find good features
features = ['Open', 'Volume', 'Velocity', 'Acceleration', 'Momentum', 'Energy', 'Force', 'Elastic', 'VelocityAbs', 'AccelerationAbs', 'MomentumAbs', 'EnergyAbs', 'ForceAbs', 'ElasticAbs']
features = ['Open', 'Elastic']
target = ['next_quarter'] # next_week, next_month, next_quarter
# convert current training set to numpy array
x_train = dj_train.as_matrix(columns=[features])
y_train = dj_train.as_matrix(columns=[target])
n_features = len(features)
n_out = 1
n_samples = len(y_train)
X = tf.placeholder('float')
Y = tf.placeholder('float')
def model(X, w):
return tf.multiply(X, w)
w = tf.Variable(tf.random_normal([n_features, n_out]), name='weights')
y_model = model(X, w)
cost = tf.square(Y - y_model)
predict = tf.nn.relu(tf.multiply(w, X))
train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
sess = tf.Session()
init = tf.global_variables_initializer()
sess.run(init)
for epoch in range(training_epochs):
for (x, y) in zip(x_train, y_train):
sess.run(train_op, feed_dict={X: x, Y: y})
w_value = sess.run(w)
sess.close()
################################################
# plot training data
################################################
plt.figure(figsize=(15,15))
plt.suptitle("Test Stock Predictions")
ax1 = plt.subplot(211)
ax1.plot(y_train, 'b')
y_learned = np.empty([n_samples])
i = 0
for x in x_train:
y_learned[i] = (x * w_value).sum()
i += 1
ax1.plot(y_learned, 'r')
ax1.set_title("Training data predictions")
#############################################
# plot hold out data
##############################################
x_test = dj_hold_out.as_matrix(columns=[features])
y_test = dj_hold_out.as_matrix(columns=[target])
z = range(len(y_learned), len(y_learned)+len(y_test))
ax2 = plt.subplot(212, sharex=ax1, sharey=ax1)
ax2.plot(z, y_test, 'b')
y_predicted = np.empty([len(y_test)])
i = 0
for x in x_test:
y_predicted[i] = (x * w_value).sum()
i += 1
ax2.plot(z, y_predicted, 'r')
ax2.set_title("Hold out data predictions")
plt.savefig('LinearRegression.png')
plt.show()
print(len(dj_hold_out))
|
{
"content_hash": "632dbb1d2da5bdab10f0383b63a88f48",
"timestamp": "",
"source": "github",
"line_count": 217,
"max_line_length": 209,
"avg_line_length": 26.976958525345623,
"alnum_prop": 0.5978817902289033,
"repo_name": "timestocome/Test-stock-prediction-algorithms",
"id": "fdbf8f634f7233fbac8fa34c6f866b32ea5e2160",
"size": "6073",
"binary": false,
"copies": "1",
"ref": "refs/heads/master",
"path": "StockMarketLinearRegression/LinearRegression.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Jupyter Notebook",
"bytes": "455373"
},
{
"name": "Python",
"bytes": "306406"
}
],
"symlink_target": ""
}
|
from sqlalchemy import bindparam
from sqlalchemy import Column
from sqlalchemy import delete
from sqlalchemy import exc
from sqlalchemy import ForeignKey
from sqlalchemy import func
from sqlalchemy import insert
from sqlalchemy import inspect
from sqlalchemy import Integer
from sqlalchemy import literal_column
from sqlalchemy import null
from sqlalchemy import or_
from sqlalchemy import select
from sqlalchemy import String
from sqlalchemy import testing
from sqlalchemy import text
from sqlalchemy import union
from sqlalchemy import update
from sqlalchemy import util
from sqlalchemy.orm import aliased
from sqlalchemy.orm import column_property
from sqlalchemy.orm import contains_eager
from sqlalchemy.orm import deferred
from sqlalchemy.orm import join as orm_join
from sqlalchemy.orm import joinedload
from sqlalchemy.orm import query_expression
from sqlalchemy.orm import relationship
from sqlalchemy.orm import undefer
from sqlalchemy.orm import with_expression
from sqlalchemy.orm import with_loader_criteria
from sqlalchemy.orm import with_polymorphic
from sqlalchemy.sql import and_
from sqlalchemy.sql import sqltypes
from sqlalchemy.sql.selectable import Join as core_join
from sqlalchemy.sql.selectable import LABEL_STYLE_DISAMBIGUATE_ONLY
from sqlalchemy.sql.selectable import LABEL_STYLE_TABLENAME_PLUS_COL
from sqlalchemy.testing import assert_raises_message
from sqlalchemy.testing import AssertsCompiledSQL
from sqlalchemy.testing import eq_
from sqlalchemy.testing import fixtures
from sqlalchemy.testing import is_
from sqlalchemy.testing.fixtures import fixture_session
from sqlalchemy.testing.util import resolve_lambda
from sqlalchemy.util.langhelpers import hybridproperty
from .inheritance import _poly_fixtures
from .test_query import QueryTest
from ..sql.test_compiler import CorrelateTest as _CoreCorrelateTest
# TODO:
# composites / unions, etc.
class SelectableTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_filter_by(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"WHERE users.name = :name_1",
)
def test_c_accessor_not_mutated_subq(self):
"""test #6394, ensure all_selected_columns is generated each time"""
User = self.classes.User
s1 = select(User.id)
eq_(s1.subquery().c.keys(), ["id"])
eq_(s1.subquery().c.keys(), ["id"])
def test_integration_w_8285_subc(self):
Address = self.classes.Address
s1 = select(
Address.id, Address.__table__.c["user_id", "email_address"]
)
self.assert_compile(
s1,
"SELECT addresses.id, addresses.user_id, "
"addresses.email_address FROM addresses",
)
subq = s1.subquery()
self.assert_compile(
select(subq.c.user_id, subq.c.id),
"SELECT anon_1.user_id, anon_1.id FROM (SELECT addresses.id AS "
"id, addresses.user_id AS user_id, addresses.email_address "
"AS email_address FROM addresses) AS anon_1",
)
def test_scalar_subquery_from_subq_same_source(self):
"""test #6394, ensure all_selected_columns is generated each time"""
User = self.classes.User
s1 = select(User.id)
for i in range(2):
stmt = s1.subquery().select().scalar_subquery()
self.assert_compile(
stmt,
"(SELECT anon_1.id FROM "
"(SELECT users.id AS id FROM users) AS anon_1)",
)
def test_froms_single_table(self):
User, Address = self.classes("User", "Address")
stmt = select(User).filter_by(name="ed")
eq_(stmt.get_final_froms(), [self.tables.users])
def test_froms_join(self):
User, Address = self.classes("User", "Address")
users, addresses = self.tables("users", "addresses")
stmt = select(User).join(User.addresses)
assert stmt.get_final_froms()[0].compare(users.join(addresses))
@testing.combinations(
(
lambda User: (User,),
lambda User: [
{
"name": "User",
"type": User,
"aliased": False,
"expr": User,
"entity": User,
}
],
),
(
lambda user_alias: (user_alias,),
lambda User, user_alias: [
{
"name": None,
"type": User,
"aliased": True,
"expr": user_alias,
"entity": user_alias,
}
],
),
(
lambda User: (User.id,),
lambda User: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
}
],
),
(
lambda User, Address: (User.id, Address),
lambda User, Address: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "Address",
"type": Address,
"aliased": False,
"expr": Address,
"entity": Address,
},
],
),
(
lambda User, Address: (User.id, text("whatever")),
lambda User, Address: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": None,
"type": testing.eq_type_affinity(sqltypes.NullType),
"aliased": False,
"expr": testing.eq_clause_element(text("whatever")),
"entity": None,
},
],
),
(
lambda user_table: (user_table,),
lambda user_table: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"expr": user_table.c.id,
},
{
"name": "name",
"type": testing.eq_type_affinity(sqltypes.String),
"expr": user_table.c.name,
},
],
),
argnames="cols, expected",
)
def test_column_descriptions(self, cols, expected):
User, Address = self.classes("User", "Address")
ua = aliased(User)
cols = testing.resolve_lambda(
cols,
User=User,
Address=Address,
user_alias=ua,
user_table=inspect(User).local_table,
)
expected = testing.resolve_lambda(
expected,
User=User,
Address=Address,
user_alias=ua,
user_table=inspect(User).local_table,
)
stmt = select(*cols)
eq_(stmt.column_descriptions, expected)
if stmt._propagate_attrs:
stmt = select(*cols).from_statement(stmt)
eq_(stmt.column_descriptions, expected)
@testing.combinations(insert, update, delete, argnames="dml_construct")
@testing.combinations(
(
lambda User: User,
lambda User: (User.id, User.name),
lambda User, user_table: {
"name": "User",
"type": User,
"expr": User,
"entity": User,
"table": user_table,
},
lambda User: [
{
"name": "id",
"type": testing.eq_type_affinity(sqltypes.Integer),
"aliased": False,
"expr": User.id,
"entity": User,
},
{
"name": "name",
"type": testing.eq_type_affinity(sqltypes.String),
"aliased": False,
"expr": User.name,
"entity": User,
},
],
),
argnames="entity, cols, expected_entity, expected_returning",
)
def test_dml_descriptions(
self, dml_construct, entity, cols, expected_entity, expected_returning
):
User, Address = self.classes("User", "Address")
lambda_args = dict(
User=User,
Address=Address,
user_table=inspect(User).local_table,
)
entity = testing.resolve_lambda(entity, **lambda_args)
cols = testing.resolve_lambda(cols, **lambda_args)
expected_entity = testing.resolve_lambda(
expected_entity, **lambda_args
)
expected_returning = testing.resolve_lambda(
expected_returning, **lambda_args
)
stmt = dml_construct(entity)
if cols:
stmt = stmt.returning(*cols)
eq_(stmt.entity_description, expected_entity)
eq_(stmt.returning_column_descriptions, expected_returning)
@testing.combinations(
(
lambda User, Address: select(User.name)
.select_from(User, Address)
.where(User.id == Address.user_id),
"SELECT users.name FROM users, addresses "
"WHERE users.id = addresses.user_id",
),
(
lambda User, Address: select(User.name)
.select_from(Address, User)
.where(User.id == Address.user_id),
"SELECT users.name FROM addresses, users "
"WHERE users.id = addresses.user_id",
),
)
def test_select_from_ordering(self, stmt, expected):
User, Address = self.classes("User", "Address")
lambda_args = dict(
User=User,
Address=Address,
user_table=inspect(User).local_table,
)
stmt = testing.resolve_lambda(stmt, **lambda_args)
self.assert_compile(stmt, expected)
def test_limit_offset_select(self):
User = self.classes.User
stmt = select(User.id).limit(5).offset(6)
self.assert_compile(
stmt,
"SELECT users.id FROM users LIMIT :param_1 OFFSET :param_2",
checkparams={"param_1": 5, "param_2": 6},
)
@testing.combinations(
(None, "ROWS ONLY"),
({"percent": True}, "PERCENT ROWS ONLY"),
({"percent": True, "with_ties": True}, "PERCENT ROWS WITH TIES"),
)
def test_fetch_offset_select(self, options, fetch_clause):
User = self.classes.User
if options is None:
stmt = select(User.id).fetch(5).offset(6)
else:
stmt = select(User.id).fetch(5, **options).offset(6)
self.assert_compile(
stmt,
"SELECT users.id FROM users OFFSET :param_1 "
"ROWS FETCH FIRST :param_2 %s" % (fetch_clause,),
checkparams={"param_1": 6, "param_2": 5},
)
class ColumnsClauseFromsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_exclude_eagerloads(self):
User, Address = self.classes("User", "Address")
stmt = select(User).options(joinedload(User.addresses))
froms = stmt.columns_clause_froms
mapper = inspect(User)
is_(froms[0], inspect(User).__clause_element__())
eq_(
froms[0]._annotations,
{
"entity_namespace": mapper,
"parententity": mapper,
"parentmapper": mapper,
},
)
eq_(len(froms), 1)
def test_maintain_annotations_from_table(self):
User, Address = self.classes("User", "Address")
stmt = select(User)
mapper = inspect(User)
froms = stmt.columns_clause_froms
is_(froms[0], inspect(User).__clause_element__())
eq_(
froms[0]._annotations,
{
"entity_namespace": mapper,
"parententity": mapper,
"parentmapper": mapper,
},
)
eq_(len(froms), 1)
def test_maintain_annotations_from_annoated_cols(self):
User, Address = self.classes("User", "Address")
stmt = select(User.id)
mapper = inspect(User)
froms = stmt.columns_clause_froms
is_(froms[0], inspect(User).__clause_element__())
eq_(
froms[0]._annotations,
{
"entity_namespace": mapper,
"parententity": mapper,
"parentmapper": mapper,
},
)
eq_(len(froms), 1)
@testing.combinations((True,), (False,))
def test_replace_into_select_from_maintains_existing(self, use_flag):
User, Address = self.classes("User", "Address")
stmt = select(User.id).select_from(Address)
if use_flag:
stmt = stmt.with_only_columns(
func.count(), maintain_column_froms=True
)
else:
stmt = stmt.select_from(
*stmt.columns_clause_froms
).with_only_columns(func.count())
# Address is maintained in the FROM list
self.assert_compile(
stmt, "SELECT count(*) AS count_1 FROM addresses, users"
)
@testing.combinations((True,), (False,))
def test_replace_into_select_from_with_loader_criteria(self, use_flag):
User, Address = self.classes("User", "Address")
stmt = select(User.id).options(
with_loader_criteria(User, User.name == "ed")
)
if use_flag:
stmt = stmt.with_only_columns(
func.count(), maintain_column_froms=True
)
else:
stmt = stmt.select_from(
*stmt.columns_clause_froms
).with_only_columns(func.count())
self.assert_compile(
stmt,
"SELECT count(*) AS count_1 FROM users WHERE users.name = :name_1",
)
class JoinTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
def test_join_from_no_onclause(self):
User, Address = self.classes("User", "Address")
stmt = select(literal_column("1")).join_from(User, Address)
self.assert_compile(
stmt,
"SELECT 1 FROM users JOIN addresses "
"ON users.id = addresses.user_id",
)
def test_join_from_w_relationship(self):
User, Address = self.classes("User", "Address")
stmt = select(literal_column("1")).join_from(
User, Address, User.addresses
)
self.assert_compile(
stmt,
"SELECT 1 FROM users JOIN addresses "
"ON users.id = addresses.user_id",
)
def test_join_from_alised_w_relationship(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(literal_column("1")).join_from(u1, Address, u1.addresses)
self.assert_compile(
stmt,
"SELECT 1 FROM users AS users_1 JOIN addresses "
"ON users_1.id = addresses.user_id",
)
def test_join_conflicting_right_side(self):
User, Address = self.classes("User", "Address")
stmt = select(User).join(Address, User.orders)
assert_raises_message(
exc.InvalidRequestError,
"Join target .*Address.* does not correspond to the right side "
"of join condition User.orders",
stmt.compile,
)
def test_join_from_conflicting_left_side_plain(self):
User, Address, Order = self.classes("User", "Address", "Order")
stmt = select(User).join_from(User, Address, Order.address)
assert_raises_message(
exc.InvalidRequestError,
r"explicit from clause .*User.* does not match .* Order.address",
stmt.compile,
)
def test_join_from_conflicting_left_side_mapper_vs_aliased(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(User).join_from(User, Address, u1.addresses)
assert_raises_message(
exc.InvalidRequestError,
# the display of the attribute here is not consistent vs.
# the straight aliased class, should improve this.
r"explicit from clause .*User.* does not match left side .*"
r"of relationship attribute aliased\(User\).addresses",
stmt.compile,
)
def test_join_from_conflicting_left_side_aliased_vs_mapper(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
stmt = select(u1).join_from(u1, Address, User.addresses)
assert_raises_message(
exc.InvalidRequestError,
r"explicit from clause aliased\(User\) does not match left "
"side of relationship attribute User.addresses",
stmt.compile,
)
def test_join_from_we_can_explicitly_tree_joins(self):
User, Address, Order, Item, Keyword = self.classes(
"User", "Address", "Order", "Item", "Keyword"
)
stmt = (
select(User)
.join(User.addresses)
.join_from(User, Order, User.orders)
.join(Order.items)
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id JOIN orders "
"ON users.id = orders.user_id JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id JOIN items "
"ON items.id = order_items_1.item_id",
)
def test_join_from_w_filter_by(self):
User, Address, Order, Item, Keyword = self.classes(
"User", "Address", "Order", "Item", "Keyword"
)
stmt = (
select(User)
.filter_by(name="n1")
.join(User.addresses)
.filter_by(email_address="a1")
.join_from(User, Order, User.orders)
.filter_by(description="d1")
.join(Order.items)
.filter_by(description="d2")
)
self.assert_compile(
stmt,
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id "
"JOIN orders ON users.id = orders.user_id "
"JOIN order_items AS order_items_1 "
"ON orders.id = order_items_1.order_id "
"JOIN items ON items.id = order_items_1.item_id "
"WHERE users.name = :name_1 "
"AND addresses.email_address = :email_address_1 "
"AND orders.description = :description_1 "
"AND items.description = :description_2",
checkparams={
"name_1": "n1",
"email_address_1": "a1",
"description_1": "d1",
"description_2": "d2",
},
)
@testing.combinations(
(
lambda User: select(User).where(User.id == bindparam("foo")),
"SELECT users.id, users.name FROM users WHERE users.id = :foo",
{"foo": "bar"},
{"foo": "bar"},
),
(
lambda User, Address: select(User)
.join_from(User, Address)
.where(User.id == bindparam("foo")),
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id = :foo",
{"foo": "bar"},
{"foo": "bar"},
),
(
lambda User, Address: select(User)
.join_from(User, Address, User.addresses)
.where(User.id == bindparam("foo")),
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id = :foo",
{"foo": "bar"},
{"foo": "bar"},
),
(
lambda User, Address: select(User)
.join(User.addresses)
.where(User.id == bindparam("foo")),
"SELECT users.id, users.name FROM users JOIN addresses "
"ON users.id = addresses.user_id WHERE users.id = :foo",
{"foo": "bar"},
{"foo": "bar"},
),
)
def test_params_with_join(
self, test_case, expected, bindparams, expected_params
):
User, Address = self.classes("User", "Address")
stmt = resolve_lambda(test_case, **locals())
stmt = stmt.params(**bindparams)
self.assert_compile(stmt, expected, checkparams=expected_params)
class LoadersInSubqueriesTest(QueryTest, AssertsCompiledSQL):
"""The Query object calls eanble_eagerloads(False) when you call
.subquery(). With Core select, we don't have that information, we instead
have to look at the "toplevel" flag to know where we are. make sure
the many different combinations that these two objects and still
too many flags at the moment work as expected on the outside.
"""
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def joinedload_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={"addresses": relationship(Address, lazy="joined")},
)
self.mapper_registry.map_imperatively(Address, addresses)
return User, Address
@testing.fixture
def deferred_fixture(self):
User = self.classes.User
users = self.tables.users
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name": deferred(users.c.name),
"name_upper": column_property(
func.upper(users.c.name), deferred=True
),
},
)
return User
@testing.fixture
def non_deferred_fixture(self):
User = self.classes.User
users = self.tables.users
self.mapper_registry.map_imperatively(
User,
users,
properties={
"name_upper": column_property(func.upper(users.c.name))
},
)
return User
def test_no_joinedload_in_subquery_select_rows(self, joinedload_fixture):
User, Address = joinedload_fixture
sess = fixture_session()
stmt1 = sess.query(User).subquery()
stmt1 = sess.query(stmt1)
stmt2 = select(User).subquery()
stmt2 = select(stmt2)
expected = (
"SELECT anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name "
"FROM users) AS anon_1"
)
self.assert_compile(
stmt1._final_statement(legacy_query_style=False),
expected,
)
self.assert_compile(stmt2, expected)
def test_no_joinedload_in_subquery_select_entity(self, joinedload_fixture):
User, Address = joinedload_fixture
sess = fixture_session()
stmt1 = sess.query(User).subquery()
ua = aliased(User, stmt1)
stmt1 = sess.query(ua)
stmt2 = select(User).subquery()
ua = aliased(User, stmt2)
stmt2 = select(ua)
expected = (
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS id, users.name AS name FROM users) AS anon_1 "
"LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.id = addresses_1.user_id"
)
self.assert_compile(
stmt1._final_statement(legacy_query_style=False),
expected,
)
self.assert_compile(stmt2, expected)
def test_deferred_subq_one(self, deferred_fixture):
"""test for #6661"""
User = deferred_fixture
subq = select(User).subquery()
u1 = aliased(User, subq)
q = select(u1)
self.assert_compile(
q,
"SELECT anon_1.id "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users) AS anon_1",
)
# testing deferred opts separately for deterministic SQL generation
q = select(u1).options(undefer(u1.name))
self.assert_compile(
q,
"SELECT anon_1.id, anon_1.name "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users) AS anon_1",
)
q = select(u1).options(undefer(u1.name_upper))
self.assert_compile(
q,
"SELECT upper(anon_1.name) AS upper_1, anon_1.id "
"FROM (SELECT users.id AS id, users.name AS name "
"FROM users) AS anon_1",
)
def test_non_deferred_subq_one(self, non_deferred_fixture):
"""test for #6661
cols that aren't deferred go into subqueries. 1.3 did this also.
"""
User = non_deferred_fixture
subq = select(User).subquery()
u1 = aliased(User, subq)
q = select(u1)
self.assert_compile(
q,
"SELECT upper(anon_1.name) AS upper_1, anon_1.id, anon_1.name "
"FROM (SELECT upper(users.name) AS upper_2, users.id AS id, "
"users.name AS name FROM users) AS anon_1",
)
def test_deferred_subq_two(self, deferred_fixture):
"""test for #6661
in this test, we are only confirming the current contract of ORM
subqueries which is that deferred + derived column_property's don't
export themselves into the .c. collection of a subquery.
We might want to revisit this in some way.
"""
User = deferred_fixture
subq = select(User).subquery()
assert not hasattr(subq.c, "name_upper")
# "undefer" it by including it
subq = select(User, User.name_upper).subquery()
assert hasattr(subq.c, "name_upper")
def test_non_deferred_col_prop_targetable_in_subq(
self, non_deferred_fixture
):
"""test for #6661"""
User = non_deferred_fixture
subq = select(User).subquery()
assert hasattr(subq.c, "name_upper")
def test_recursive_cte_render_on_deferred(self, deferred_fixture):
"""test for #6661.
this test is most directly the bug reported in #6661,
as the CTE uses stmt._exported_columns_iterator() ahead of compiling
the SELECT in order to get the list of columns that will be selected,
this has to match what the subquery is going to render.
This is also pretty fundamental to why deferred() as an option
can't be honored in a subquery; the subquery needs to export the
correct columns and it needs to do so without having to process
all the loader options. 1.3 OTOH when you got a subquery from
Query, it did a full compile_context. 1.4/2.0 we don't do that
anymore.
"""
User = deferred_fixture
cte = select(User).cte(recursive=True)
# nonsensical, but we are just testing form
cte = cte.union_all(select(User).join(cte, cte.c.id == User.id))
stmt = select(User).join(cte, User.id == cte.c.id)
self.assert_compile(
stmt,
"WITH RECURSIVE anon_1(id, name) AS "
"(SELECT users.id AS id, users.name AS name FROM users "
"UNION ALL SELECT users.id AS id, users.name AS name "
"FROM users JOIN anon_1 ON anon_1.id = users.id) "
"SELECT users.id FROM users JOIN anon_1 ON users.id = anon_1.id",
)
# testing deferred opts separately for deterministic SQL generation
self.assert_compile(
stmt.options(undefer(User.name_upper)),
"WITH RECURSIVE anon_1(id, name) AS "
"(SELECT users.id AS id, users.name AS name FROM users "
"UNION ALL SELECT users.id AS id, users.name AS name "
"FROM users JOIN anon_1 ON anon_1.id = users.id) "
"SELECT upper(users.name) AS upper_1, users.id "
"FROM users JOIN anon_1 ON users.id = anon_1.id",
)
self.assert_compile(
stmt.options(undefer(User.name)),
"WITH RECURSIVE anon_1(id, name) AS "
"(SELECT users.id AS id, users.name AS name FROM users "
"UNION ALL SELECT users.id AS id, users.name AS name "
"FROM users JOIN anon_1 ON anon_1.id = users.id) "
"SELECT users.id, users.name "
"FROM users JOIN anon_1 ON users.id = anon_1.id",
)
def test_nested_union_deferred(self, deferred_fixture):
"""test #6678"""
User = deferred_fixture
s1 = select(User).where(User.id == 5)
s2 = select(User).where(User.id == 6)
s3 = select(User).where(User.id == 7)
stmt = union(s1.union(s2), s3)
u_alias = aliased(User, stmt.subquery())
self.assert_compile(
select(u_alias),
"SELECT anon_1.id FROM ((SELECT users.id AS id, "
"users.name AS name "
"FROM users "
"WHERE users.id = :id_1 UNION SELECT users.id AS id, "
"users.name AS name "
"FROM users WHERE users.id = :id_2) "
"UNION SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id = :id_3) AS anon_1",
)
def test_nested_union_undefer_option(self, deferred_fixture):
"""test #6678
in this case we want to see that the unions include the deferred
columns so that if we undefer on the outside we can get the
column.
"""
User = deferred_fixture
s1 = select(User).where(User.id == 5)
s2 = select(User).where(User.id == 6)
s3 = select(User).where(User.id == 7)
stmt = union(s1.union(s2), s3)
u_alias = aliased(User, stmt.subquery())
self.assert_compile(
select(u_alias).options(undefer(u_alias.name)),
"SELECT anon_1.id, anon_1.name FROM "
"((SELECT users.id AS id, users.name AS name FROM users "
"WHERE users.id = :id_1 UNION SELECT users.id AS id, "
"users.name AS name "
"FROM users WHERE users.id = :id_2) "
"UNION SELECT users.id AS id, users.name AS name "
"FROM users WHERE users.id = :id_3) AS anon_1",
)
class ExtraColsTest(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
run_setup_mappers = None
@testing.fixture
def query_expression_fixture(self):
users, User = (
self.tables.users,
self.classes.User,
)
addresses, Address = (self.tables.addresses, self.classes.Address)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("value", query_expression()),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
return User
@testing.fixture
def query_expression_w_joinedload_fixture(self):
users, User = (
self.tables.users,
self.classes.User,
)
addresses, Address = (self.tables.addresses, self.classes.Address)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("value", query_expression()),
(
"addresses",
relationship(
Address,
primaryjoin=and_(
addresses.c.user_id == users.c.id,
addresses.c.email_address != None,
),
),
),
]
),
)
self.mapper_registry.map_imperatively(Address, addresses)
return User
@testing.fixture
def column_property_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties=util.OrderedDict(
[
("concat", column_property(users.c.id * 2)),
(
"count",
column_property(
select(func.count(addresses.c.id))
.where(
users.c.id == addresses.c.user_id,
)
.correlate(users)
.scalar_subquery()
),
),
]
),
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(
User,
)
},
)
return User, Address
@testing.fixture
def plain_fixture(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses")
},
)
return User, Address
@testing.fixture
def hard_labeled_self_ref_fixture(self, decl_base):
class A(decl_base):
__tablename__ = "a"
id = Column(Integer, primary_key=True)
a_id = Column(ForeignKey("a.id"))
data = Column(String)
data_lower = column_property(func.lower(data).label("hardcoded"))
as_ = relationship("A")
return A
def test_no_joinedload_embedded(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
subq = stmt.subquery()
s2 = select(subq)
self.assert_compile(
s2,
"SELECT anon_1.id, anon_1.user_id, anon_1.email_address "
"FROM (SELECT addresses.id AS id, addresses.user_id AS "
"user_id, addresses.email_address AS email_address "
"FROM addresses) AS anon_1",
)
def test_with_expr_one(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User).options(
with_expression(User.value, User.name + "foo")
)
self.assert_compile(
stmt,
"SELECT users.name || :name_1 AS anon_1, users.id, "
"users.name FROM users",
)
def test_with_expr_two(self, query_expression_fixture):
User = query_expression_fixture
stmt = select(User.id, User.name, (User.name + "foo").label("foo"))
subq = stmt.subquery()
u1 = aliased(User, subq)
stmt = select(u1).options(with_expression(u1.value, subq.c.foo))
self.assert_compile(
stmt,
"SELECT anon_1.foo, anon_1.id, anon_1.name FROM "
"(SELECT users.id AS id, users.name AS name, "
"users.name || :name_1 AS foo FROM users) AS anon_1",
)
def test_with_expr_three(self, query_expression_w_joinedload_fixture):
"""test :ticket:`6259`"""
User = query_expression_w_joinedload_fixture
stmt = select(User).options(joinedload(User.addresses)).limit(1)
# test that the outer IS NULL is rendered
# test that the inner query does not include a NULL default
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT users.id AS id, users.name AS name FROM users "
"LIMIT :param_1) AS anon_1 LEFT OUTER "
"JOIN addresses AS addresses_1 ON addresses_1.user_id = anon_1.id "
"AND addresses_1.email_address IS NOT NULL",
)
def test_with_expr_four(self, query_expression_w_joinedload_fixture):
"""test :ticket:`6259`"""
User = query_expression_w_joinedload_fixture
stmt = (
select(User)
.options(
with_expression(User.value, null()), joinedload(User.addresses)
)
.limit(1)
)
# test that the outer IS NULL is rendered, not adapted
# test that the inner query includes the NULL we asked for
# ironically, this statement would not actually fetch due to the NULL
# not allowing adaption and therefore failing on the result set
# matching, this was addressed in #7154.
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.id, anon_2.name, "
"addresses_1.id AS id_1, addresses_1.user_id, "
"addresses_1.email_address FROM (SELECT NULL AS anon_1, "
"users.id AS id, users.name AS name FROM users LIMIT :param_1) "
"AS anon_2 LEFT OUTER JOIN addresses AS addresses_1 "
"ON addresses_1.user_id = anon_2.id "
"AND addresses_1.email_address IS NOT NULL",
)
def test_joinedload_outermost(self, plain_fixture):
User, Address = plain_fixture
stmt = select(Address).options(joinedload(Address.user))
# render joined eager loads with stringify
self.assert_compile(
stmt,
"SELECT addresses.id, addresses.user_id, addresses.email_address, "
"users_1.id AS id_1, users_1.name FROM addresses "
"LEFT OUTER JOIN users AS users_1 "
"ON users_1.id = addresses.user_id",
)
def test_joinedload_outermost_w_wrapping_elements(self, plain_fixture):
User, Address = plain_fixture
stmt = (
select(User)
.options(joinedload(User.addresses))
.limit(10)
.distinct()
)
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, addresses_1.id AS id_1, "
"addresses_1.user_id, addresses_1.email_address FROM "
"(SELECT DISTINCT users.id AS id, users.name AS name FROM users "
"LIMIT :param_1) "
"AS anon_1 LEFT OUTER JOIN addresses AS addresses_1 "
"ON anon_1.id = addresses_1.user_id",
)
def test_contains_eager_outermost_w_wrapping_elements(self, plain_fixture):
"""test #8569"""
User, Address = plain_fixture
stmt = (
select(User)
.join(User.addresses)
.options(contains_eager(User.addresses))
.limit(10)
.distinct()
)
self.assert_compile(
stmt,
"SELECT DISTINCT addresses.id, addresses.user_id, "
"addresses.email_address, users.id AS id_1, users.name "
"FROM users JOIN addresses ON users.id = addresses.user_id "
"LIMIT :param_1",
)
def test_joinedload_hard_labeled_selfref(
self, hard_labeled_self_ref_fixture
):
"""test #8569"""
A = hard_labeled_self_ref_fixture
stmt = select(A).options(joinedload(A.as_)).distinct()
self.assert_compile(
stmt,
"SELECT anon_1.hardcoded, anon_1.id, anon_1.a_id, anon_1.data, "
"lower(a_1.data) AS lower_1, a_1.id AS id_1, a_1.a_id AS a_id_1, "
"a_1.data AS data_1 FROM (SELECT DISTINCT lower(a.data) AS "
"hardcoded, a.id AS id, a.a_id AS a_id, a.data AS data FROM a) "
"AS anon_1 LEFT OUTER JOIN a AS a_1 ON anon_1.id = a_1.a_id",
)
def test_contains_eager_hard_labeled_selfref(
self, hard_labeled_self_ref_fixture
):
"""test #8569"""
A = hard_labeled_self_ref_fixture
a1 = aliased(A)
stmt = (
select(A)
.join(A.as_.of_type(a1))
.options(contains_eager(A.as_.of_type(a1)))
.distinct()
)
self.assert_compile(
stmt,
"SELECT DISTINCT lower(a.data) AS hardcoded, "
"lower(a_1.data) AS hardcoded, a_1.id, a_1.a_id, a_1.data, "
"a.id AS id_1, a.a_id AS a_id_1, a.data AS data_1 "
"FROM a JOIN a AS a_1 ON a.id = a_1.a_id",
)
def test_column_properties(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
self.assert_compile(
stmt,
"SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_2, users.id, "
"users.name FROM users",
checkparams={"id_1": 2},
)
def test_column_properties_can_we_use(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
# User, Address = column_property_fixture
# stmt = select(User)
# TODO: shouldn't we be able to get at count ?
# stmt = stmt.where(stmt.selected_columns.count > 5)
# self.assert_compile(stmt, "")
def test_column_properties_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
stmt = select(User)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
# TODO: shouldn't we be able to get to stmt.subquery().c.count ?
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users.id = addresses.user_id) AS anon_3, users.id AS id, "
"users.name AS name FROM users) AS anon_2",
checkparams={"id_1": 2},
)
def test_column_properties_subquery_two(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
# col properties will retain anonymous labels, however will
# adopt the .key within the subquery collection so they can
# be addressed.
stmt = select(
User.id,
User.name,
User.concat,
User.count,
)
subq = stmt.subquery()
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(subq).where(subq.c.concat == "foo")
self.assert_compile(
stmt,
"SELECT anon_1.id, anon_1.name, anon_1.anon_2, anon_1.anon_3 "
"FROM (SELECT users.id AS id, users.name AS name, "
"users.id * :id_1 AS anon_2, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_3 "
"FROM users) AS anon_1 WHERE anon_1.anon_2 = :param_1",
checkparams={"id_1": 2, "param_1": "foo"},
)
def test_column_properties_aliased_subquery(self, column_property_fixture):
"""test querying mappings that reference external columns or
selectables."""
User, Address = column_property_fixture
u1 = aliased(User)
stmt = select(u1)
# here, the subquery needs to export the columns that include
# the column properties
stmt = select(stmt.subquery())
self.assert_compile(
stmt,
"SELECT anon_2.anon_1, anon_2.anon_3, anon_2.id, anon_2.name "
"FROM (SELECT users_1.id * :id_1 AS anon_1, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE users_1.id = addresses.user_id) AS anon_3, "
"users_1.id AS id, users_1.name AS name "
"FROM users AS users_1) AS anon_2",
checkparams={"id_1": 2},
)
class RelationshipNaturalCompileTest(QueryTest, AssertsCompiledSQL):
"""test using core join() with relationship attributes.
as __clause_element__() produces a workable SQL expression, this should
be generally possible.
However, it can't work for many-to-many relationships, as these
require two joins. Only the ORM can look at the entities and decide
that there's a separate "secondary" table to be rendered as a separate
join.
"""
__dialect__ = "default"
def test_of_type_implicit_join(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
a1 = aliased(Address)
stmt1 = select(u1).where(u1.addresses.of_type(a1))
stmt2 = (
fixture_session()
.query(u1)
.filter(u1.addresses.of_type(a1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT users_1.id, users_1.name FROM users AS users_1, "
"addresses AS addresses_1 WHERE users_1.id = addresses_1.user_id"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_of_type_explicit_join(self):
User, Address = self.classes("User", "Address")
u1 = aliased(User)
a1 = aliased(Address)
stmt = select(u1).join(u1.addresses.of_type(a1))
self.assert_compile(
stmt,
"SELECT users_1.id, users_1.name FROM users AS users_1 "
"JOIN addresses AS addresses_1 "
"ON users_1.id = addresses_1.user_id",
)
def test_many_to_many_explicit_join(self):
Item, Keyword = self.classes("Item", "Keyword")
stmt = select(Item).join(Keyword, Item.keywords)
self.assert_compile(
stmt,
"SELECT items.id, items.description FROM items "
"JOIN item_keywords AS item_keywords_1 "
"ON items.id = item_keywords_1.item_id "
"JOIN keywords ON keywords.id = item_keywords_1.keyword_id",
)
def test_many_to_many_implicit_join(self):
Item, Keyword = self.classes("Item", "Keyword")
stmt = select(Item).where(Item.keywords)
# this was the intent of the primary + secondary clauseelement.
# it can do enough of the right thing in an implicit join
# context.
self.assert_compile(
stmt,
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id",
)
class InheritedTest(_poly_fixtures._Polymorphic):
run_setup_mappers = "once"
class ExplicitWithPolymorhpicTest(
_poly_fixtures._PolymorphicUnions, AssertsCompiledSQL
):
__dialect__ = "default"
default_punion = (
"(SELECT pjoin.person_id AS person_id, "
"pjoin.company_id AS company_id, "
"pjoin.name AS name, pjoin.type AS type, "
"pjoin.status AS status, pjoin.engineer_name AS engineer_name, "
"pjoin.primary_language AS primary_language, "
"pjoin.manager_name AS manager_name "
"FROM (SELECT engineers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name "
"FROM people JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS pjoin) "
"AS anon_1"
)
def test_subquery_col_expressions_wpoly_one(self):
Person, Manager, Engineer = self.classes(
"Person", "Manager", "Engineer"
)
wp1 = with_polymorphic(Person, [Manager, Engineer])
subq1 = select(wp1).subquery()
wp2 = with_polymorphic(Person, [Engineer, Manager])
subq2 = select(wp2).subquery()
# first thing we see, is that when we go through with_polymorphic,
# the entities that get placed into the aliased class go through
# Mapper._mappers_from_spec(), which matches them up to the
# existing Mapper.self_and_descendants collection, meaning,
# the order is the same every time. Assert here that's still
# happening. If a future internal change modifies this assumption,
# that's not necessarily bad, but it would change things.
eq_(
subq1.c.keys(),
[
"person_id",
"company_id",
"name",
"type",
"person_id_1",
"status",
"engineer_name",
"primary_language",
"person_id_1",
"status_1",
"manager_name",
],
)
eq_(
subq2.c.keys(),
[
"person_id",
"company_id",
"name",
"type",
"person_id_1",
"status",
"engineer_name",
"primary_language",
"person_id_1",
"status_1",
"manager_name",
],
)
def test_subquery_col_expressions_wpoly_two(self):
Person, Manager, Engineer = self.classes(
"Person", "Manager", "Engineer"
)
wp1 = with_polymorphic(Person, [Manager, Engineer])
subq1 = select(wp1).subquery()
stmt = select(subq1).where(
or_(
subq1.c.engineer_name == "dilbert",
subq1.c.manager_name == "dogbert",
)
)
self.assert_compile(
stmt,
"SELECT anon_1.person_id, anon_1.company_id, anon_1.name, "
"anon_1.type, anon_1.person_id AS person_id_1, anon_1.status, "
"anon_1.engineer_name, anon_1.primary_language, "
"anon_1.person_id AS person_id_2, anon_1.status AS status_1, "
"anon_1.manager_name FROM "
"%s WHERE "
"anon_1.engineer_name = :engineer_name_1 "
"OR anon_1.manager_name = :manager_name_1" % (self.default_punion),
)
class ImplicitWithPolymorphicTest(
_poly_fixtures._PolymorphicUnions, AssertsCompiledSQL
):
"""Test a series of mappers with a very awkward with_polymorphic setting,
that tables and columns are rendered using the selectable in the correct
contexts. PolymorphicUnions represent the most awkward and verbose
polymorphic fixtures you can have. expressions need to be maximally
accurate in terms of the mapped selectable in order to produce correct
queries, which also will be really wrong if that mapped selectable is not
in use.
"""
__dialect__ = "default"
def test_select_columns_where_baseclass(self):
Person = self.classes.Person
stmt = (
select(Person.person_id, Person.name)
.where(Person.name == "some name")
.order_by(Person.person_id)
)
sess = fixture_session()
q = (
sess.query(Person.person_id, Person.name)
.filter(Person.name == "some name")
.order_by(Person.person_id)
)
expected = (
"SELECT pjoin.person_id, pjoin.name FROM "
"(SELECT engineers.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.status AS status, engineers.engineer_name AS "
"engineer_name, engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS "
"pjoin WHERE pjoin.name = :name_1 ORDER BY pjoin.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
def test_select_where_baseclass(self):
Person = self.classes.Person
stmt = (
select(Person)
.where(Person.name == "some name")
.order_by(Person.person_id)
)
sess = fixture_session()
q = (
sess.query(Person)
.filter(Person.name == "some name")
.order_by(Person.person_id)
)
expected = (
"SELECT pjoin.person_id, pjoin.company_id, pjoin.name, "
"pjoin.type, pjoin.status, pjoin.engineer_name, "
"pjoin.primary_language, pjoin.manager_name FROM "
"(SELECT engineers.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, people.type AS type, "
"engineers.status AS status, engineers.engineer_name AS "
"engineer_name, engineers.primary_language AS primary_language, "
"CAST(NULL AS VARCHAR(50)) AS manager_name FROM people "
"JOIN engineers ON people.person_id = engineers.person_id "
"UNION ALL SELECT managers.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, managers.status AS status, "
"CAST(NULL AS VARCHAR(50)) AS engineer_name, "
"CAST(NULL AS VARCHAR(50)) AS primary_language, "
"managers.manager_name AS manager_name FROM people "
"JOIN managers ON people.person_id = managers.person_id) AS "
"pjoin WHERE pjoin.name = :name_1 ORDER BY pjoin.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
def test_select_where_subclass(self):
Engineer = self.classes.Engineer
# what will *not* work with Core, that the ORM does for now,
# is that if you do where/orderby Person.column, it will de-adapt
# the Person columns from the polymorphic union
stmt = (
select(Engineer)
.where(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
sess = fixture_session()
q = (
sess.query(Engineer)
.filter(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
plain_expected = ( # noqa
"SELECT engineers.person_id, people.person_id, people.company_id, "
"people.name, "
"people.type, engineers.status, "
"engineers.engineer_name, engineers.primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
# when we have disambiguating labels turned on
disambiguate_expected = ( # noqa
"SELECT engineers.person_id, people.person_id AS person_id_1, "
"people.company_id, "
"people.name, "
"people.type, engineers.status, "
"engineers.engineer_name, engineers.primary_language "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
# these change based on how we decide to apply labels
# in context.py
self.assert_compile(stmt, disambiguate_expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
disambiguate_expected,
)
def test_select_where_columns_subclass(self):
Engineer = self.classes.Engineer
# what will *not* work with Core, that the ORM does for now,
# is that if you do where/orderby Person.column, it will de-adapt
# the Person columns from the polymorphic union
# After many attempts to get the JOIN to render, by annotating
# the columns with the "join" that they come from and trying to
# get Select() to render out that join, there's no approach
# that really works without stepping on other assumptions, so
# add select_from(Engineer) explicitly. It's still puzzling why the
# ORM seems to know how to make this decision more effectively
# when the select() has the same amount of information.
stmt = (
select(Engineer.person_id, Engineer.name)
.where(Engineer.name == "some name")
.select_from(Engineer)
.order_by(Engineer.person_id)
)
sess = fixture_session()
q = (
sess.query(Engineer.person_id, Engineer.name)
.filter(Engineer.name == "some name")
.order_by(Engineer.person_id)
)
expected = (
"SELECT engineers.person_id, people.name "
"FROM people JOIN engineers "
"ON people.person_id = engineers.person_id "
"WHERE people.name = :name_1 ORDER BY engineers.person_id"
)
self.assert_compile(stmt, expected)
self.assert_compile(
q._final_statement(legacy_query_style=False),
expected,
)
class RelationshipNaturalInheritedTest(InheritedTest, AssertsCompiledSQL):
__dialect__ = "default"
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id"
)
default_pjoin = (
"(people LEFT OUTER "
"JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id "
"LEFT OUTER JOIN boss ON managers.person_id = boss.boss_id) "
"ON companies.company_id = people.company_id"
)
flat_aliased_pjoin = (
"(people AS people_1 LEFT OUTER JOIN engineers AS "
"engineers_1 ON people_1.person_id = engineers_1.person_id "
"LEFT OUTER JOIN managers AS managers_1 "
"ON people_1.person_id = managers_1.person_id "
"LEFT OUTER JOIN boss AS boss_1 ON "
"managers_1.person_id = boss_1.boss_id) "
"ON companies.company_id = people_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language "
"AS engineers_primary_language, managers.person_id "
"AS managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name, "
"boss.boss_id AS boss_boss_id, boss.golf_swing AS boss_golf_swing "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON "
"people.person_id = managers.person_id LEFT OUTER JOIN boss "
"ON managers.person_id = boss.boss_id) AS anon_1 "
"ON companies.company_id = anon_1.people_company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"JOIN paperwork ON people.person_id = paperwork.person_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN people ON companies.company_id = people.company_id "
"WHERE people.name = :name_1"
)
poly_columns = "SELECT people.person_id FROM people"
def test_straight(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees)
)
stmt2 = select(Company).join(Company.employees)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.straight_company_to_person_expected)
self.assert_compile(stmt2, self.straight_company_to_person_expected)
self.assert_compile(stmt3, self.straight_company_to_person_expected)
def test_columns(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt = select(Person.person_id)
self.assert_compile(stmt, self.poly_columns)
def test_straight_whereclause(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
stmt1 = (
select(Company)
.select_from(orm_join(Company, Person, Company.employees))
.where(Person.name == "ed")
)
stmt2 = (
select(Company).join(Company.employees).where(Person.name == "ed")
)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.filter(Person.name == "ed")
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.c_to_p_whereclause)
self.assert_compile(stmt2, self.c_to_p_whereclause)
self.assert_compile(stmt3, self.c_to_p_whereclause)
def test_two_level(self):
Company, Person, Paperwork = self.classes(
"Company", "Person", "Paperwork"
)
stmt1 = select(Company).select_from(
orm_join(Company, Person, Company.employees).join(
Paperwork, Person.paperwork
)
)
stmt2 = select(Company).join(Company.employees).join(Person.paperwork)
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees)
.join(Person.paperwork)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, self.person_paperwork_expected)
self.assert_compile(stmt2, self.person_paperwork_expected)
self.assert_compile(stmt3, self.person_paperwork_expected)
def test_wpoly_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*")
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name "
"FROM companies JOIN %s" % self.default_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
s = fixture_session()
p1 = with_polymorphic(Person, "*", aliased=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
s.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
def test_wpoly_aliased_flat_of_type(self):
Company, Person, Manager, Engineer = self.classes(
"Company", "Person", "Manager", "Engineer"
)
p1 = with_polymorphic(Person, "*", aliased=True, flat=True)
stmt1 = select(Company).select_from(
orm_join(Company, p1, Company.employees.of_type(p1))
)
stmt2 = select(Company).join(p1, Company.employees.of_type(p1))
stmt3 = (
fixture_session()
.query(Company)
.join(Company.employees.of_type(p1))
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN %s" % self.flat_aliased_pjoin
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
self.assert_compile(stmt3, expected)
class RelNaturalAliasedJoinsTest(
_poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest
):
# this is the label style for the polymorphic selectable, not the
# outside query
label_style = LABEL_STYLE_TABLENAME_PLUS_COL
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin ON companies.company_id = "
"pjoin.people_company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, people.type "
"AS people_type, engineers.person_id AS engineers_person_id, "
"engineers.status AS engineers_status, engineers.engineer_name "
"AS engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.people_company_id "
"JOIN paperwork ON pjoin.people_person_id = paperwork.person_id"
)
default_pjoin = (
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin "
"ON companies.company_id = pjoin.people_company_id"
)
flat_aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id "
"AS engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, "
"managers.status AS managers_status, managers.manager_name "
"AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin_1 ON companies.company_id = pjoin_1.people_company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS people_person_id, people.company_id "
"AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.people_company_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS people_person_id, "
"people.company_id AS people_company_id, people.name AS people_name, "
"people.type AS people_type, engineers.person_id AS "
"engineers_person_id, engineers.status AS engineers_status, "
"engineers.engineer_name AS engineers_engineer_name, "
"engineers.primary_language AS engineers_primary_language, "
"managers.person_id AS managers_person_id, managers.status "
"AS managers_status, managers.manager_name AS managers_manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.people_company_id "
"WHERE pjoin.people_name = :people_name_1"
)
poly_columns = (
"SELECT pjoin.people_person_id FROM (SELECT people.person_id AS "
"people_person_id, people.company_id AS people_company_id, "
"people.name AS people_name, people.type AS people_type, "
"engineers.person_id AS engineers_person_id, engineers.status "
"AS engineers_status, engineers.engineer_name AS "
"engineers_engineer_name, engineers.primary_language AS "
"engineers_primary_language, managers.person_id AS "
"managers_person_id, managers.status AS managers_status, "
"managers.manager_name AS managers_manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin"
)
class RelNaturalAliasedJoinsDisamTest(
_poly_fixtures._PolymorphicAliasedJoins, RelationshipNaturalInheritedTest
):
# this is the label style for the polymorphic selectable, not the
# outside query
label_style = LABEL_STYLE_DISAMBIGUATE_ONLY
straight_company_to_person_expected = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id"
)
person_paperwork_expected = (
"SELECT companies.company_id, companies.name FROM companies "
"JOIN (SELECT people.person_id AS person_id, people.company_id "
"AS company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, managers.person_id "
"AS person_id_2, managers.status AS status_1, managers.manager_name "
"AS manager_name FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"JOIN paperwork ON pjoin.person_id = paperwork.person_id"
)
default_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin "
"ON companies.company_id = pjoin.company_id"
)
flat_aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin_1 ON companies.company_id = pjoin_1.company_id"
)
aliased_pjoin = (
"(SELECT people.person_id AS person_id, people.company_id AS "
"company_id, people.name AS name, people.type AS type, "
"engineers.person_id AS person_id_1, engineers.status AS status, "
"engineers.engineer_name AS engineer_name, engineers.primary_language "
"AS primary_language, managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers ON people.person_id = "
"engineers.person_id LEFT OUTER JOIN managers ON people.person_id = "
"managers.person_id) AS pjoin_1 "
"ON companies.company_id = pjoin_1.company_id"
)
c_to_p_whereclause = (
"SELECT companies.company_id, companies.name FROM companies JOIN "
"(SELECT people.person_id AS person_id, "
"people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, managers.status AS status_1, "
"managers.manager_name AS manager_name FROM people "
"LEFT OUTER JOIN engineers ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers ON people.person_id = managers.person_id) "
"AS pjoin ON companies.company_id = pjoin.company_id "
"WHERE pjoin.name = :name_1"
)
poly_columns = (
"SELECT pjoin.person_id FROM (SELECT people.person_id AS "
"person_id, people.company_id AS company_id, people.name AS name, "
"people.type AS type, engineers.person_id AS person_id_1, "
"engineers.status AS status, "
"engineers.engineer_name AS engineer_name, "
"engineers.primary_language AS primary_language, "
"managers.person_id AS person_id_2, "
"managers.status AS status_1, managers.manager_name AS manager_name "
"FROM people LEFT OUTER JOIN engineers "
"ON people.person_id = engineers.person_id "
"LEFT OUTER JOIN managers "
"ON people.person_id = managers.person_id) AS pjoin"
)
class RawSelectTest(QueryTest, AssertsCompiledSQL):
"""older tests from test_query. Here, they are converted to use
future selects with ORM compilation.
"""
__dialect__ = "default"
def test_select_from_entity(self):
User = self.classes.User
self.assert_compile(
select(literal_column("*")).select_from(User),
"SELECT * FROM users",
)
def test_where_relationship(self):
User = self.classes.User
stmt1 = select(User).where(User.addresses)
stmt2 = (
fixture_session()
.query(User)
.filter(User.addresses)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT users.id, users.name FROM users, addresses "
"WHERE users.id = addresses.user_id"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_where_m2m_relationship(self):
Item = self.classes.Item
expected = (
"SELECT items.id, items.description FROM items, "
"item_keywords AS item_keywords_1, keywords "
"WHERE items.id = item_keywords_1.item_id "
"AND keywords.id = item_keywords_1.keyword_id"
)
stmt1 = select(Item).where(Item.keywords)
stmt2 = (
fixture_session()
.query(Item)
.filter(Item.keywords)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_inline_select_from_entity(self):
User = self.classes.User
expected = "SELECT * FROM users"
stmt1 = select(literal_column("*")).select_from(User)
stmt2 = (
fixture_session()
.query(literal_column("*"))
.select_from(User)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_select_from_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(literal_column("*")).select_from(ua)
stmt2 = (
fixture_session()
.query(literal_column("*"))
.select_from(ua)
._final_statement(legacy_query_style=False)
)
expected = "SELECT * FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_correlate_entity(self):
User = self.classes.User
Address = self.classes.Address
expected = (
"SELECT users.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE users.id = addresses.user_id) AS anon_1 "
"FROM users, addresses"
)
stmt1 = select(
User.name,
Address.id,
select(func.count(Address.id))
.where(User.id == Address.user_id)
.correlate(User)
.scalar_subquery(),
)
stmt2 = (
fixture_session()
.query(
User.name,
Address.id,
select(func.count(Address.id))
.where(User.id == Address.user_id)
.correlate(User)
.scalar_subquery(),
)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_correlate_aliased_entity(self):
User = self.classes.User
Address = self.classes.Address
uu = aliased(User, name="uu")
stmt1 = select(
uu.name,
Address.id,
select(func.count(Address.id))
.where(uu.id == Address.user_id)
.correlate(uu)
.scalar_subquery(),
)
stmt2 = (
fixture_session()
.query(
uu.name,
Address.id,
select(func.count(Address.id))
.where(uu.id == Address.user_id)
.correlate(uu)
.scalar_subquery(),
)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT uu.name, addresses.id, "
"(SELECT count(addresses.id) AS count_1 "
"FROM addresses WHERE uu.id = addresses.user_id) AS anon_1 "
"FROM users AS uu, addresses"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_entity(self):
User = self.classes.User
expected = "SELECT users.id, users.name FROM users"
stmt1 = select(User)
stmt2 = (
fixture_session()
.query(User)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_columns(self):
User = self.classes.User
expected = "SELECT users.id, users.name FROM users"
stmt1 = select(User.id, User.name)
stmt2 = (
fixture_session()
.query(User.id, User.name)
._final_statement(legacy_query_style=False)
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_aliased_columns(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(ua.id, ua.name)
stmt2 = (
fixture_session()
.query(ua.id, ua.name)
._final_statement(legacy_query_style=False)
)
expected = "SELECT ua.id, ua.name FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_columns_clause_aliased_entity(self):
User = self.classes.User
ua = aliased(User, name="ua")
stmt1 = select(ua)
stmt2 = (
fixture_session()
.query(ua)
._final_statement(legacy_query_style=False)
)
expected = "SELECT ua.id, ua.name FROM users AS ua"
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
def test_core_join_in_select_from_no_onclause(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(User).select_from(core_join(User, Address)),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
def test_join_to_entity_no_onclause(self):
User = self.classes.User
Address = self.classes.Address
self.assert_compile(
select(User).join(Address),
"SELECT users.id, users.name FROM users "
"JOIN addresses ON users.id = addresses.user_id",
)
def test_insert_from_query(self):
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(("id", "email_address"), q),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_insert_from_query_col_attr(self):
User = self.classes.User
Address = self.classes.Address
s = fixture_session()
q = s.query(User.id, User.name).filter_by(name="ed")
self.assert_compile(
insert(Address).from_select(
(Address.id, Address.email_address), q
),
"INSERT INTO addresses (id, email_address) "
"SELECT users.id AS users_id, users.name AS users_name "
"FROM users WHERE users.name = :name_1",
)
def test_update_from_entity(self):
from sqlalchemy.sql import update
User = self.classes.User
self.assert_compile(
update(User), "UPDATE users SET id=:id, name=:name"
)
self.assert_compile(
update(User).values(name="ed").where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"},
)
self.assert_compile(
update(User).values({User.name: "ed"}).where(User.id == 5),
"UPDATE users SET name=:name WHERE users.id = :id_1",
checkparams={"id_1": 5, "name": "ed"},
)
def test_delete_from_entity(self):
from sqlalchemy.sql import delete
User = self.classes.User
self.assert_compile(delete(User), "DELETE FROM users")
self.assert_compile(
delete(User).where(User.id == 5),
"DELETE FROM users WHERE users.id = :id_1",
checkparams={"id_1": 5},
)
def test_insert_from_entity(self):
from sqlalchemy.sql import insert
User = self.classes.User
self.assert_compile(
insert(User), "INSERT INTO users (id, name) VALUES (:id, :name)"
)
self.assert_compile(
insert(User).values(name="ed"),
"INSERT INTO users (name) VALUES (:name)",
checkparams={"name": "ed"},
)
def test_col_prop_builtin_function(self):
class Foo:
pass
self.mapper_registry.map_imperatively(
Foo,
self.tables.users,
properties={
"foob": column_property(
func.coalesce(self.tables.users.c.name)
)
},
)
stmt1 = select(Foo).where(Foo.foob == "somename").order_by(Foo.foob)
stmt2 = (
fixture_session()
.query(Foo)
.filter(Foo.foob == "somename")
.order_by(Foo.foob)
._final_statement(legacy_query_style=False)
)
expected = (
"SELECT coalesce(users.name) AS coalesce_1, "
"users.id, users.name FROM users "
"WHERE coalesce(users.name) = :param_1 "
"ORDER BY coalesce_1"
)
self.assert_compile(stmt1, expected)
self.assert_compile(stmt2, expected)
class CorrelateTest(fixtures.DeclarativeMappedTest, _CoreCorrelateTest):
@classmethod
def setup_classes(cls):
Base = cls.DeclarativeBasic
class T1(Base):
__tablename__ = "t1"
a = Column(Integer, primary_key=True)
@hybridproperty
def c(self):
return self
class T2(Base):
__tablename__ = "t2"
a = Column(Integer, primary_key=True)
@hybridproperty
def c(self):
return self
def _fixture(self):
t1, t2 = self.classes("T1", "T2")
return t1, t2, select(t1).where(t1.c.a == t2.c.a)
|
{
"content_hash": "10e459775011c3c98fd8c496d85d052c",
"timestamp": "",
"source": "github",
"line_count": 2606,
"max_line_length": 79,
"avg_line_length": 34.93860322333077,
"alnum_prop": 0.5690719384953322,
"repo_name": "sqlalchemy/sqlalchemy",
"id": "5c2f107f45ce8410023e744459796e97c1bd12fc",
"size": "91050",
"binary": false,
"copies": "2",
"ref": "refs/heads/main",
"path": "test/orm/test_core_compilation.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "Cython",
"bytes": "21698"
},
{
"name": "Python",
"bytes": "16838583"
}
],
"symlink_target": ""
}
|
from __future__ import unicode_literals
import frappe
from frappe import _
from frappe.utils import flt, fmt_money
#from shopping_cart.templates.utils import get_transaction_context
no_cache = 1
no_sitemap = 1
@frappe.whitelist(allow_guest='True')
def get_products():
#frappe.errprint("in get products")
#res=frappe.db.sql("""select item_code,price_list_rate,validity,no_of_users from `tabItem Price` where price_list='Standard Selling'""" )
res=frappe.db.sql("select i.item_code,ip.price_list_rate,i.validity,i.no_of_users from `tabItem Price` ip join `tabItem` i on ip.item_code=i.item_code where price_list='Standard Selling' and show_on_website=1")
frappe.errprint(res)
return res
|
{
"content_hash": "72ac2703ac5130776abdde1c2a222556",
"timestamp": "",
"source": "github",
"line_count": 16,
"max_line_length": 211,
"avg_line_length": 43.6875,
"alnum_prop": 0.7510729613733905,
"repo_name": "gangadhar-kadam/lgnlvefrape",
"id": "65e34493279b8385af8169335152c0b9ae357f15",
"size": "830",
"binary": false,
"copies": "2",
"ref": "refs/heads/master",
"path": "frappe/templates/pages/pricing.py",
"mode": "33188",
"license": "mit",
"language": [
{
"name": "CSS",
"bytes": "87202"
},
{
"name": "HTML",
"bytes": "77840"
},
{
"name": "JavaScript",
"bytes": "1555841"
},
{
"name": "Python",
"bytes": "972928"
}
],
"symlink_target": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.