code
stringlengths 22
1.05M
| apis
listlengths 1
3.31k
| extract_api
stringlengths 75
3.25M
|
|---|---|---|
import pytest
from dbt.tests.adapter.basic.test_adapter_methods import BaseAdapterMethod
from dbt.tests.adapter.basic.test_base import BaseSimpleMaterializations
from dbt.tests.adapter.basic.test_singular_tests import BaseSingularTests
from dbt.tests.adapter.basic.test_singular_tests_ephemeral import BaseSingularTestsEphemeral
from dbt.tests.adapter.basic.test_empty import BaseEmpty
from dbt.tests.adapter.basic.test_ephemeral import BaseEphemeral
from dbt.tests.adapter.basic.test_incremental import BaseIncremental
from dbt.tests.adapter.basic.test_generic_tests import BaseGenericTests
from dbt.tests.adapter.basic.test_snapshot_check_cols import BaseSnapshotCheckCols
from dbt.tests.adapter.basic.test_snapshot_timestamp import BaseSnapshotTimestamp
class TestAdapterMethods(BaseAdapterMethod):
pass
class TestSimpleMaterializationsTrino(BaseSimpleMaterializations):
pass
class TestSingularTestsTrino(BaseSingularTests):
pass
class TestSingularTestsEphemeralTrino(BaseSingularTestsEphemeral):
pass
class TestEmptyTrino(BaseEmpty):
pass
class TestEphemeralTrino(BaseEphemeral):
pass
class TestIncrementalTrino(BaseIncremental):
pass
class TestGenericTestsTrino(BaseGenericTests):
pass
@pytest.mark.xfail(reason="Snapshot not supported in dbt-trino")
class TestSnapshotCheckColsTrino(BaseSnapshotCheckCols):
pass
@pytest.mark.xfail(reason="Snapshot not supported in dbt-trino")
class TestSnapshotTimestampTrino(BaseSnapshotTimestamp):
pass
|
[
"pytest.mark.xfail"
] |
[((1243, 1306), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Snapshot not supported in dbt-trino"""'}), "(reason='Snapshot not supported in dbt-trino')\n", (1260, 1306), False, 'import pytest\n'), ((1376, 1439), 'pytest.mark.xfail', 'pytest.mark.xfail', ([], {'reason': '"""Snapshot not supported in dbt-trino"""'}), "(reason='Snapshot not supported in dbt-trino')\n", (1393, 1439), False, 'import pytest\n')]
|
"""
author: deadc0de6 (https://github.com/deadc0de6)
Copyright (c) 2017, deadc0de6
basic unittest for the import function
"""
import unittest
import os
import yaml
from dotdrop.dotdrop import importer
from tests.helpers import *
class TestImport(unittest.TestCase):
CONFIG_BACKUP = False
CONFIG_CREATE = True
CONFIG_DOTPATH = 'dotfiles'
CONFIG_NAME = 'config.yaml'
def load_yaml(self, path):
'''Load yaml to dict'''
self.assertTrue(os.path.exists(path))
content = ''
with open(path, 'r') as f:
content = yaml.load(f)
return content
def get_path_strip_version(self, path):
'''Strip a file path for conf tests'''
self.assertTrue(os.path.exists(path))
strip = path
home = os.path.expanduser('~')
if strip.startswith(home):
strip = strip[len(home):]
strip = strip.lstrip('.' + os.sep)
return strip
def assert_file(self, path, conf, profile):
'''Make sure "path" has been inserted in "conf" for "profile"'''
strip = self.get_path_strip_version(path)
self.assertTrue(strip in [x.src for x in conf.get_dotfiles(profile)])
dsts = [os.path.expanduser(x.dst) for x in conf.get_dotfiles(profile)]
self.assertTrue(path in dsts)
def assert_in_yaml(self, path, dic):
'''Make sure "path" is in the "dic" representing the yaml file'''
strip = self.get_path_strip_version(path)
self.assertTrue(strip in [x['src'] for x in dic['dotfiles'].values()])
dsts = [os.path.expanduser(x['dst']) for x in dic['dotfiles'].values()]
self.assertTrue(path in dsts)
def test_import(self):
'''Test the import function'''
src = get_tempfolder()
self.assertTrue(os.path.exists(src))
self.addCleanup(clean, src)
dotfilespath = get_tempfolder()
self.assertTrue(os.path.exists(dotfilespath))
self.addCleanup(clean, dotfilespath)
profile = get_string(10)
confpath = create_fake_config(dotfilespath,
configname=self.CONFIG_NAME,
dotpath=self.CONFIG_DOTPATH,
backup=self.CONFIG_BACKUP,
create=self.CONFIG_CREATE)
self.assertTrue(os.path.exists(confpath))
conf, opts = load_config(confpath, self.CONFIG_DOTPATH, profile)
# create some random dotfiles
dotfile1, content1 = create_random_file(src)
self.addCleanup(clean, dotfile1)
dotfile2, content2 = create_random_file(os.path.expanduser('~'))
self.addCleanup(clean, dotfile2)
homeconf = os.path.join(os.path.expanduser('~'), '.config')
if not os.path.exists(homeconf):
os.mkdir(homeconf)
self.addCleanup(clean, homeconf)
dotconfig = os.path.join(homeconf, get_string(5))
create_dir(dotconfig)
self.addCleanup(clean, dotconfig)
dotfile3, content3 = create_random_file(dotconfig)
dotfile4, content3 = create_random_file(homeconf)
self.addCleanup(clean, dotfile4)
# fake a folder containing dotfiles
dotfile5 = get_tempfolder()
self.assertTrue(os.path.exists(dotfile5))
self.addCleanup(clean, dotfile5)
sub1, _ = create_random_file(dotfile5)
sub2, _ = create_random_file(dotfile5)
# import the dotfiles
dfiles = [dotfile1, dotfile2, dotfile3, dotfile4, dotfile5]
importer(opts, conf, dfiles)
# reload the config
conf, opts = load_config(confpath, self.CONFIG_DOTPATH, profile)
# test dotfiles in config class
self.assertTrue(profile in conf.get_profiles())
self.assert_file(dotfile1, conf, profile)
self.assert_file(dotfile2, conf, profile)
self.assert_file(dotfile3, conf, profile)
self.assert_file(dotfile4, conf, profile)
self.assert_file(dotfile5, conf, profile)
# test dotfiles in yaml file
y = self.load_yaml(confpath)
self.assert_in_yaml(dotfile1, y)
self.assert_in_yaml(dotfile2, y)
self.assert_in_yaml(dotfile3, y)
self.assert_in_yaml(dotfile4, y)
self.assert_in_yaml(dotfile5, y)
# test dotfiles on filesystem
self.assertTrue(os.path.exists(os.path.join(dotfilespath, dotfile1)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath, dotfile2)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath, dotfile3)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath, dotfile4)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath, dotfile5)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath,
dotfile5, sub1)))
self.assertTrue(os.path.exists(os.path.join(dotfilespath,
dotfile5, sub2)))
def main():
unittest.main()
if __name__ == '__main__':
main()
|
[
"unittest.main",
"os.mkdir",
"yaml.load",
"os.path.join",
"os.path.exists",
"dotdrop.dotdrop.importer",
"os.path.expanduser"
] |
[((5039, 5054), 'unittest.main', 'unittest.main', ([], {}), '()\n', (5052, 5054), False, 'import unittest\n'), ((786, 809), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (804, 809), False, 'import os\n'), ((3562, 3590), 'dotdrop.dotdrop.importer', 'importer', (['opts', 'conf', 'dfiles'], {}), '(opts, conf, dfiles)\n', (3570, 3590), False, 'from dotdrop.dotdrop import importer\n'), ((476, 496), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (490, 496), False, 'import os\n'), ((576, 588), 'yaml.load', 'yaml.load', (['f'], {}), '(f)\n', (585, 588), False, 'import yaml\n'), ((728, 748), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (742, 748), False, 'import os\n'), ((1213, 1238), 'os.path.expanduser', 'os.path.expanduser', (['x.dst'], {}), '(x.dst)\n', (1231, 1238), False, 'import os\n'), ((1575, 1603), 'os.path.expanduser', 'os.path.expanduser', (["x['dst']"], {}), "(x['dst'])\n", (1593, 1603), False, 'import os\n'), ((1799, 1818), 'os.path.exists', 'os.path.exists', (['src'], {}), '(src)\n', (1813, 1818), False, 'import os\n'), ((1921, 1949), 'os.path.exists', 'os.path.exists', (['dotfilespath'], {}), '(dotfilespath)\n', (1935, 1949), False, 'import os\n'), ((2370, 2394), 'os.path.exists', 'os.path.exists', (['confpath'], {}), '(confpath)\n', (2384, 2394), False, 'import os\n'), ((2650, 2673), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2668, 2673), False, 'import os\n'), ((2748, 2771), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (2766, 2771), False, 'import os\n'), ((2799, 2823), 'os.path.exists', 'os.path.exists', (['homeconf'], {}), '(homeconf)\n', (2813, 2823), False, 'import os\n'), ((2837, 2855), 'os.mkdir', 'os.mkdir', (['homeconf'], {}), '(homeconf)\n', (2845, 2855), False, 'import os\n'), ((3294, 3318), 'os.path.exists', 'os.path.exists', (['dotfile5'], {}), '(dotfile5)\n', (3308, 3318), False, 'import os\n'), ((4398, 4434), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile1'], {}), '(dotfilespath, dotfile1)\n', (4410, 4434), False, 'import os\n'), ((4476, 4512), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile2'], {}), '(dotfilespath, dotfile2)\n', (4488, 4512), False, 'import os\n'), ((4554, 4590), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile3'], {}), '(dotfilespath, dotfile3)\n', (4566, 4590), False, 'import os\n'), ((4632, 4668), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile4'], {}), '(dotfilespath, dotfile4)\n', (4644, 4668), False, 'import os\n'), ((4710, 4746), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile5'], {}), '(dotfilespath, dotfile5)\n', (4722, 4746), False, 'import os\n'), ((4788, 4830), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile5', 'sub1'], {}), '(dotfilespath, dotfile5, sub1)\n', (4800, 4830), False, 'import os\n'), ((4924, 4966), 'os.path.join', 'os.path.join', (['dotfilespath', 'dotfile5', 'sub2'], {}), '(dotfilespath, dotfile5, sub2)\n', (4936, 4966), False, 'import os\n')]
|
import os
import sys
os.environ["OMP_NUM_THREADS"] = "1"
import tensorflow as tf
import numpy as np
import time
n = 8192
dtype = tf.float32
with tf.device("/gpu:0"):
matrix1 = tf.Variable(tf.ones((n, n), dtype=dtype))
matrix2 = tf.Variable(tf.ones((n, n), dtype=dtype))
product = tf.matmul(matrix1, matrix2)
# avoid optimizing away redundant nodes
config = tf.ConfigProto(graph_options=tf.GraphOptions(optimizer_options=tf.OptimizerOptions(opt_level=tf.OptimizerOptions.L0)))
sess = tf.Session(config=config)
sess.run(tf.global_variables_initializer())
iters = 10
# pre-warming
sess.run(product.op)
start = time.time()
for i in range(iters):
sess.run(product.op)
end = time.time()
ops = n**3 + (n-1)*n**2 # n^2*(n-1) additions, n^3 multiplications
elapsed = (end - start)
rate = iters*ops/elapsed/10**9
print('\nGPU: %d x %d matmul took: %.2f sec, %.2f G ops/sec' % (n, n, elapsed/iters,rate,))
matrix1 = np.ones((n,n))
matrix2 = np.ones((n,n))
start = time.time()
for i in range(iters):
np.matmul(matrix1,matrix2)
end = time.time()
ops = n**3 + (n-1)*n**2 # n^2*(n-1) additions, n^3 multiplications
elapsed = (end - start)
rate = iters*ops/elapsed/10**9
print('\nCPU: %d x %d matmul took: %.2f sec, %.2f G ops/sec' % (n, n, elapsed/iters,rate,))
|
[
"tensorflow.ones",
"tensorflow.global_variables_initializer",
"tensorflow.device",
"tensorflow.Session",
"numpy.ones",
"tensorflow.OptimizerOptions",
"time.time",
"tensorflow.matmul",
"numpy.matmul"
] |
[((500, 525), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (510, 525), True, 'import tensorflow as tf\n'), ((626, 637), 'time.time', 'time.time', ([], {}), '()\n', (635, 637), False, 'import time\n'), ((690, 701), 'time.time', 'time.time', ([], {}), '()\n', (699, 701), False, 'import time\n'), ((928, 943), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (935, 943), True, 'import numpy as np\n'), ((953, 968), 'numpy.ones', 'np.ones', (['(n, n)'], {}), '((n, n))\n', (960, 968), True, 'import numpy as np\n'), ((977, 988), 'time.time', 'time.time', ([], {}), '()\n', (986, 988), False, 'import time\n'), ((1047, 1058), 'time.time', 'time.time', ([], {}), '()\n', (1056, 1058), False, 'import time\n'), ((148, 167), 'tensorflow.device', 'tf.device', (['"""/gpu:0"""'], {}), "('/gpu:0')\n", (157, 167), True, 'import tensorflow as tf\n'), ((295, 322), 'tensorflow.matmul', 'tf.matmul', (['matrix1', 'matrix2'], {}), '(matrix1, matrix2)\n', (304, 322), True, 'import tensorflow as tf\n'), ((536, 569), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (567, 569), True, 'import tensorflow as tf\n'), ((1014, 1041), 'numpy.matmul', 'np.matmul', (['matrix1', 'matrix2'], {}), '(matrix1, matrix2)\n', (1023, 1041), True, 'import numpy as np\n'), ((195, 223), 'tensorflow.ones', 'tf.ones', (['(n, n)'], {'dtype': 'dtype'}), '((n, n), dtype=dtype)\n', (202, 223), True, 'import tensorflow as tf\n'), ((251, 279), 'tensorflow.ones', 'tf.ones', (['(n, n)'], {'dtype': 'dtype'}), '((n, n), dtype=dtype)\n', (258, 279), True, 'import tensorflow as tf\n'), ((437, 490), 'tensorflow.OptimizerOptions', 'tf.OptimizerOptions', ([], {'opt_level': 'tf.OptimizerOptions.L0'}), '(opt_level=tf.OptimizerOptions.L0)\n', (456, 490), True, 'import tensorflow as tf\n')]
|
from functools import singledispatch
from typing import Collection, Hashable
import numpy
import pandas
import xarray
from .proper_unstack import proper_unstack
@singledispatch
def cast(obj, brief_dims: Collection[Hashable]):
"""Helper function of :func:`recursive_diff`.
Cast objects into simpler object types:
- Cast tuple to list
- Cast frozenset to set
- Cast all numpy-based objects to :class:`xarray.DataArray`, as it is the
most generic format that can describe all use cases:
- :class:`numpy.ndarray`
- :class:`pandas.Series`
- :class:`pandas.DataFrame`
- :class:`pandas.Index`, except :class:`pandas.RangeIndex`, which is
instead returned unaltered
- :class:`xarray.Dataset`
The data will be potentially wrapped by a dict to hold the various
attributes and marked so that it doesn't trigger an infinite recursion.
- Do nothing for any other object types.
:param obj:
complex object that must be simplified
:param brief_dims:
xarray dimensions that must be compacted.
See documentation on :func:`recursive_diff`.
:returns:
simpler object to compare
"""
# This is a single dispatch function, defining the default for any
# classes not explicitly registered below.
return obj
@cast.register(numpy.integer)
def cast_npint(obj: numpy.integer, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for all numpy scalar
integers (not to be confused with numpy arrays of integers)
"""
return int(obj)
@cast.register(numpy.floating)
def cast_npfloat(obj: numpy.floating, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for all numpy scalar
floats (not to be confused with numpy arrays of floats)
"""
return float(obj)
@cast.register(numpy.ndarray)
def cast_nparray(obj: numpy.ndarray, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`numpy.ndarray`.
Map to a DataArray with dimensions dim_0, dim_1, ... and
RangeIndex() as the coords.
"""
data = _strip_dataarray(xarray.DataArray(obj), brief_dims)
obj = {f"dim_{i}": pandas.RangeIndex(size) for i, size in enumerate(obj.shape)}
obj["data"] = data
return obj
@cast.register(pandas.Series)
def cast_series(obj: pandas.Series, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.Series`.
Map to a DataArray.
"""
return {
"name": obj.name,
"data": _strip_dataarray(xarray.DataArray(obj, dims=["index"]), brief_dims),
"index": obj.index,
}
@cast.register(pandas.DataFrame)
def cast_dataframe(obj: pandas.DataFrame, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.DataFrame`.
Map to a DataArray.
TODO: proper support for columns with different dtypes. Right now
they are cast to the closest common type by DataFrame.values.
"""
return {
"data": _strip_dataarray(
xarray.DataArray(obj, dims=["index", "column"]), brief_dims
),
"index": obj.index,
"columns": obj.columns,
}
@cast.register(xarray.DataArray)
def cast_dataarray(obj: xarray.DataArray, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`xarray.DataArray`.
Map to a simpler DataArray, with separate indices, non-index coords,
name, and attributes.
"""
# Prevent infinite recursion - see _strip_dataarray()
if "__strip_dataarray__" in obj.attrs:
return obj
# Strip out the non-index coordinates and attributes
return {
"name": obj.name,
"attrs": obj.attrs,
# Index is handled separately, and created as a default
# RangeIndex(shape[i]) if it doesn't exist, as it is compared
# with outer join, whereas non-index coords and data are
# compared with inner joinu
"index": {k: obj.coords[k].to_index() for k in obj.dims},
"coords": {
k: _strip_dataarray(v, brief_dims)
for k, v in obj.coords.items()
if not isinstance(v.variable, xarray.IndexVariable)
},
"data": _strip_dataarray(obj, brief_dims),
}
@cast.register(xarray.Dataset)
def cast_dataset(obj: xarray.Dataset, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`xarray.Dataset`.
Map to a dict of DataArrays.
"""
return {
"attrs": obj.attrs,
# There may be coords, index or not, that are not
# used in any data variable.
# See above on why indices are handled separately
"index": {k: obj.coords[k].to_index() for k in obj.dims},
"coords": {
k: _strip_dataarray(v, brief_dims)
for k, v in obj.coords.items()
if not isinstance(v.variable, xarray.IndexVariable)
},
"data_vars": {
k: _strip_dataarray(v, brief_dims) for k, v in obj.data_vars.items()
},
}
@cast.register(pandas.MultiIndex)
def cast_multiindex(obj: pandas.MultiIndex, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.MultiIndex`.
Map to a set of tuples. Note that this means that levels are
positional. Using a set allows comparing the indices non-positionally.
"""
return {"names": obj.names, "data": set(obj.tolist())}
@cast.register(pandas.RangeIndex)
def cast_rangeindex(obj: pandas.RangeIndex, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.RangeIndex`.
This function does nothing - RangeIndex objects are dealt with
directly by :func:`_recursive_diff`. This function is defined
to prevent RangeIndex objects to be processed by the more generic
``cast(obj: pandas.Index)`` below.
"""
return obj
@cast.register(pandas.Index)
def cast_index(obj: pandas.Index, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`pandas.Index`.
Cast to a DataArray.
.. note::
:func:`~functools.singledispatch` always prefers a more specialised
variant if available, so this function will not be called for
:class:`pandas.MultiIndex` or :class:`pandas.RangeIndex`, as they have
their own single dispatch variants.
"""
return _strip_dataarray(xarray.DataArray(obj), brief_dims)
@cast.register(frozenset)
def cast_frozenset(obj: frozenset, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`frozenset`.
Cast to a set.
"""
return set(obj)
@cast.register(tuple)
def cast_tuple(obj: tuple, brief_dims: Collection[Hashable]):
"""Single dispatch specialised variant of :func:`cast` for
:class:`tuple`.
Cast to a list.
"""
return list(obj)
def _strip_dataarray(
obj: xarray.DataArray, brief_dims: Collection[Hashable]
) -> xarray.DataArray:
"""Helper function of :func:`recursive_diff`.
Analyse a :class:`xarray.DataArray` and:
- strip away any non-index coordinates (including scalar coords)
- create stub coords for dimensions without coords
- sort dimensions alphabetically
- ravel the array to a 1D array with (potentially) a MultiIndex.
brief_dims, if any, are excluded.
:param obj:
any xarray.DataArray
:param brief_dims:
collection of dims, or "all"
:returns:
a stripped-down shallow copy of obj; otherwise None
"""
res = obj.copy()
# Remove non-index coordinates
for k, v in obj.coords.items():
if not isinstance(v.variable, xarray.IndexVariable):
del res[k]
# Ravel the array to make it become 1-dimensional.
# To do this, we must first unstack any already stacked dimension.
for dim in obj.dims:
if isinstance(obj.get_index(dim), pandas.MultiIndex):
res = proper_unstack(res, dim)
# Transpose to ignore dimensions order
res = res.transpose(*sorted(res.dims))
# Finally stack everything back together
if brief_dims != "all":
stack_dims = sorted(set(res.dims) - set(brief_dims))
if stack_dims:
res = res.stack(__stacked__=stack_dims)
# Prevent infinite recursion - see cast(obj: xarray.DataArray)
res.attrs["__strip_dataarray__"] = True
return res
|
[
"pandas.RangeIndex",
"xarray.DataArray"
] |
[((2203, 2224), 'xarray.DataArray', 'xarray.DataArray', (['obj'], {}), '(obj)\n', (2219, 2224), False, 'import xarray\n'), ((2261, 2284), 'pandas.RangeIndex', 'pandas.RangeIndex', (['size'], {}), '(size)\n', (2278, 2284), False, 'import pandas\n'), ((6651, 6672), 'xarray.DataArray', 'xarray.DataArray', (['obj'], {}), '(obj)\n', (6667, 6672), False, 'import xarray\n'), ((2659, 2696), 'xarray.DataArray', 'xarray.DataArray', (['obj'], {'dims': "['index']"}), "(obj, dims=['index'])\n", (2675, 2696), False, 'import xarray\n'), ((3180, 3227), 'xarray.DataArray', 'xarray.DataArray', (['obj'], {'dims': "['index', 'column']"}), "(obj, dims=['index', 'column'])\n", (3196, 3227), False, 'import xarray\n')]
|
# Modulos
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
np.set_printoptions(suppress=True)
# Declaracion de clases
class Adeline:
def __init__(self, r, landa, training_type, iter):
self.iter = iter
self.training_type = training_type
self.landa = landa
self.r = r
def function_transfer(self,x):
return 1/ (1 + np.exp(-x))
def show(self, iter, x, y, up):
print('ITERACION ACTUAL: {}\n'.format(iter))
print('Patron | __________ Salida __________\n')
print('No | Deseada Calculada Error Abs\n')
for i in range(0,self.p):
if i < 12 or i >= self.p - 8:
print('{} | {:.2f} {:.2f} {:.2f}'.format(i, y[i],up[i],abs(y[i]-up[i])))
elif i == 12:
print('...')
print('\n')
def solve(self, x, y):
self.p = x.shape[0]
self.inputs = x.shape[1]
self.W = np.random.uniform(low=-0.3, high=0.3,size=(1,self.inputs))
vectorjfw = []
jfwprev = 0
# self.W = np.zeros((1,self.inputs))
it = 0
while(it < self.iter):
# for i in range(0,self.r):
if self.training_type == "Lotes":
deltaw = np.zeros((self.inputs, 1))
up = self.evaluation(x)
vectorjfw.append(mean_squared_error(y,up))
print('Error promedio: {:.6f}'.format(sum(vectorjfw) / len(vectorjfw)))
jw = (y - up) * (up) * (1 - up)
self.show(it, x, y, up)
for j in range(0,self.p):
wri = self.landa*(jw[j])*np.transpose(x[j])
self.W = self.W + wri
it = it + 1
def evaluation(self,x):
up = sum(np.transpose(self.W*x) - 0.5)
up = self.function_transfer(up)
return up
# Declaracion de operaciones
def main():
''' Cuerpo principal '''
x0 = np.random.uniform(0, 1)
xn = []
xn.append(x0)
for i in range(1,600):
xn.append(4*xn[i-1]*(1-xn[i-1]))
vcr = []
vcry = []
xn = xn[-100:]
for i in range(0,len(xn[-100:])-2):
vcr.append([xn[i],xn[i+1],xn[i]*xn[i],xn[i]*xn[i+1],xn[i+1]*xn[i+1]])
vcry.append(xn[i+2])
x = np.array(vcr)
y = np.array(vcry)
X_train, X_test, y_train, y_test = train_test_split( x, y, test_size=0.33, random_state=42)
p = Adeline(1, 0.8,"PP",1000)
print('Tipo de aprendizaje: Patron a patron\nFactor de aprendizaje: {}\nCantidad de iteraciones utilizadas: {}'.format(p.landa, p.iter))
p.solve(X_train,y_train)
if __name__ == '__main__':
main()
|
[
"numpy.random.uniform",
"numpy.set_printoptions",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.transpose",
"numpy.array",
"numpy.exp",
"sklearn.metrics.mean_squared_error"
] |
[((129, 163), 'numpy.set_printoptions', 'np.set_printoptions', ([], {'suppress': '(True)'}), '(suppress=True)\n', (148, 163), True, 'import numpy as np\n'), ((1979, 2002), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1996, 2002), True, 'import numpy as np\n'), ((2304, 2317), 'numpy.array', 'np.array', (['vcr'], {}), '(vcr)\n', (2312, 2317), True, 'import numpy as np\n'), ((2326, 2340), 'numpy.array', 'np.array', (['vcry'], {}), '(vcry)\n', (2334, 2340), True, 'import numpy as np\n'), ((2380, 2435), 'sklearn.model_selection.train_test_split', 'train_test_split', (['x', 'y'], {'test_size': '(0.33)', 'random_state': '(42)'}), '(x, y, test_size=0.33, random_state=42)\n', (2396, 2435), False, 'from sklearn.model_selection import train_test_split\n'), ((1014, 1074), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': '(-0.3)', 'high': '(0.3)', 'size': '(1, self.inputs)'}), '(low=-0.3, high=0.3, size=(1, self.inputs))\n', (1031, 1074), True, 'import numpy as np\n'), ((434, 444), 'numpy.exp', 'np.exp', (['(-x)'], {}), '(-x)\n', (440, 444), True, 'import numpy as np\n'), ((1319, 1345), 'numpy.zeros', 'np.zeros', (['(self.inputs, 1)'], {}), '((self.inputs, 1))\n', (1327, 1345), True, 'import numpy as np\n'), ((1411, 1436), 'sklearn.metrics.mean_squared_error', 'mean_squared_error', (['y', 'up'], {}), '(y, up)\n', (1429, 1436), False, 'from sklearn.metrics import mean_squared_error\n'), ((1810, 1834), 'numpy.transpose', 'np.transpose', (['(self.W * x)'], {}), '(self.W * x)\n', (1822, 1834), True, 'import numpy as np\n'), ((1682, 1700), 'numpy.transpose', 'np.transpose', (['x[j]'], {}), '(x[j])\n', (1694, 1700), True, 'import numpy as np\n')]
|
import serial.tools.list_ports
import serial
import sys
import glob
class SerialPorts():
def __init__(self, include_links = True):
# Items are returned in no particular order. It may make sense to sort the items.
# Also note that the reported strings are different across platforms and operating systems,
# even for the same device.
self.ports = serial.tools.list_ports.comports(include_links = include_links) # ports class\
self.__set_port_list()
def __set_port_list(self):
""" Get a list of all available serial ports"""
self._coms = [str(i.device) for i in sorted(self.ports)]
def get_com_list(self):
try:
if len(self._coms) > 0:
return self._coms
else:
return self._coms
print("No available com ports found")
except NameError:
pass
def get_com_list_TEST(self):
""" Lists serial port names
:raises EnvironmentError:
On unsupported or unknown platforms
:returns:
A list of the serial ports available on the system
"""
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
self._coms = []
for port in ports:
try:
s = serial.Serial(port)
s.dtr = False
s.close()
self._coms.append(port)
except (OSError, serial.SerialException):
pass
return self._coms
def __chk_com(self, port_name, baudrate = 9200,parity =serial.PARITY_EVEN,stopbits =serial.STOPBITS_ONE, timeout = 0.5,**kwargs ):
with serial.Serial(port=port_name,
baudrate =baudrate,
parity = parity,
stopbits = stopbits,
bytesize = serial.EIGHTBITS,
timeout =timeout,
**kwargs) as s:
print(f"connection succesful: {port_name} with param:\n"
f"baudrate: {baudrate} Bd\n"
f"parity: {parity}\n"
f"stopbits: {stopbits}\n"
f"timeout: {timeout} sec")
def get_com_status(self, **kwargs):
""" loop throught a list of available com ports and check if connection is still good"""
for com in self._coms:
self.__chk_com(port_name=com)
|
[
"serial.Serial",
"sys.platform.startswith",
"serial.tools.list_ports.comports",
"glob.glob"
] |
[((384, 445), 'serial.tools.list_ports.comports', 'serial.tools.list_ports.comports', ([], {'include_links': 'include_links'}), '(include_links=include_links)\n', (416, 445), False, 'import serial\n'), ((1182, 1212), 'sys.platform.startswith', 'sys.platform.startswith', (['"""win"""'], {}), "('win')\n", (1205, 1212), False, 'import sys\n'), ((2093, 2234), 'serial.Serial', 'serial.Serial', ([], {'port': 'port_name', 'baudrate': 'baudrate', 'parity': 'parity', 'stopbits': 'stopbits', 'bytesize': 'serial.EIGHTBITS', 'timeout': 'timeout'}), '(port=port_name, baudrate=baudrate, parity=parity, stopbits=\n stopbits, bytesize=serial.EIGHTBITS, timeout=timeout, **kwargs)\n', (2106, 2234), False, 'import serial\n'), ((1287, 1319), 'sys.platform.startswith', 'sys.platform.startswith', (['"""linux"""'], {}), "('linux')\n", (1310, 1319), False, 'import sys\n'), ((1323, 1356), 'sys.platform.startswith', 'sys.platform.startswith', (['"""cygwin"""'], {}), "('cygwin')\n", (1346, 1356), False, 'import sys\n'), ((1439, 1469), 'glob.glob', 'glob.glob', (['"""/dev/tty[A-Za-z]*"""'], {}), "('/dev/tty[A-Za-z]*')\n", (1448, 1469), False, 'import glob\n'), ((1483, 1516), 'sys.platform.startswith', 'sys.platform.startswith', (['"""darwin"""'], {}), "('darwin')\n", (1506, 1516), False, 'import sys\n'), ((1724, 1743), 'serial.Serial', 'serial.Serial', (['port'], {}), '(port)\n', (1737, 1743), False, 'import serial\n'), ((1538, 1561), 'glob.glob', 'glob.glob', (['"""/dev/tty.*"""'], {}), "('/dev/tty.*')\n", (1547, 1561), False, 'import glob\n')]
|
# -*- coding: utf-8 -*-
import pandas as pd
# Read in track metadata with genre labels
tracks = pd.read_csv('datasets/fma-rock-vs-hiphop.csv')
# Read in track metrics with the features
echonest_metrics = pd.read_json('datasets/echonest-metrics.json', precise_float=True)
# Merge the relevant columns of tracks and echonest_metrics
echo_tracks = echonest_metrics.merge(tracks[['genre_top', 'track_id']], on='track_id')
# Inspect the resultant dataframe
echo_tracks.info()
# Create a correlation matrix
corr_metrics = echo_tracks.corr()
corr_metrics.style.background_gradient()
# Define features
features = echo_tracks.drop(['genre_top', 'track_id'], axis=1)
# Define labels
labels = echo_tracks['genre_top']
# Import the StandardScaler
from sklearn.preprocessing import StandardScaler
# Scale the features and set the values to a new variable
scaler = StandardScaler()
scaled_train_features = scaler.fit_transform(features)
# Import plotting module, and PCA class
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
# Get explained variance ratios from PCA using all features
pca = PCA()
pca.fit(scaled_train_features)
exp_variance = pca.explained_variance_ratio_
# plot the explained variance using a barplot
fig, ax = plt.subplots()
print(pca.explained_variance_ratio_)
print(pca.n_components_)
ax.bar(range(pca.n_components_), exp_variance)
ax.set_xlabel('Principal Component #')
# Import numpy
import numpy as np
# Calculate the cumulative explained variance
cum_exp_variance = np.cumsum(exp_variance)
# Plot the cumulative explained variance and draw a dashed line at 0.85.
fig, ax = plt.subplots()
ax.plot(cum_exp_variance)
ax.axhline(y=0.85, linestyle='--')
# choose the n_components where about 85% of our variance can be explained
n_components = 6
# Perform PCA with the chosen number of components and project data onto components
pca = PCA(n_components, random_state=10)
pca.fit(scaled_train_features)
pca_projection = pca.transform(scaled_train_features)
# Import train_test_split function and Decision tree classifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
# Split data
train_features, test_features, train_labels, test_labels = train_test_split(pca_projection, labels, random_state=10)
# Train decision tree
tree = DecisionTreeClassifier(random_state=10)
tree.fit(train_features, train_labels)
# Predict the labels for the test data
pred_labels_tree = tree.predict(test_features)
# Import LogisticRegression
from sklearn.linear_model import LogisticRegression
# Train logistic regression and predict labels for the test set
logreg = LogisticRegression(random_state=10)
logreg.fit(train_features, train_labels)
pred_labels_logit = logreg.predict(test_features)
# Create the classification report for both models
from sklearn.metrics import classification_report
class_rep_tree = classification_report(test_labels, pred_labels_tree)
class_rep_log = classification_report(test_labels, pred_labels_logit)
print("Decision Tree: \n", class_rep_tree)
print("Logistic Regression: \n", class_rep_log)
# Balance data for greater performance
# Subset only the hip-hop tracks, and then only the rock tracks
hop_only = echo_tracks.loc[echo_tracks['genre_top'] == 'Hip-Hop']
rock_only = echo_tracks.loc[echo_tracks['genre_top'] == 'Rock']
# sample the rocks songs to be the same number as there are hip-hop songs
rock_only = rock_only.sample(hop_only.shape[0], random_state=10)
# concatenate the dataframes rock_only and hop_only
rock_hop_bal = pd.concat([rock_only, hop_only])
# The features, labels, and pca projection are created for the balanced dataframe
features = rock_hop_bal.drop(['genre_top', 'track_id'], axis=1)
labels = rock_hop_bal['genre_top']
pca_projection = pca.fit_transform(scaler.fit_transform(features))
# Redefine the train and test set with the pca_projection from the balanced data
train_features, test_features, train_labels, test_labels = train_test_split(pca_projection, labels, random_state=10)
# Train decision tree on the balanced data
tree = DecisionTreeClassifier(random_state=10)
tree.fit(train_features, train_labels)
pred_labels_tree = tree.predict(test_features)
# Train logistic regression on the balanced data
logreg = LogisticRegression(random_state=10)
logreg.fit(train_features, train_labels)
pred_labels_logit = logreg.predict(test_features)
# Compare the models
print("Decision Tree: \n", classification_report(test_labels, pred_labels_tree))
print("Logistic Regression: \n", classification_report(test_labels, pred_labels_logit))
# Using cross-validation to evaluate our models
from sklearn.model_selection import KFold, cross_val_score
# Set up K-fold cross-validation
kf = KFold(n_splits=10)
tree = DecisionTreeClassifier(random_state=10)
logreg = LogisticRegression(random_state=10)
# Train models using KFold cv
tree_score = cross_val_score(tree, pca_projection, labels, cv = kf )
logit_score = cross_val_score(logreg, pca_projection, labels, cv = kf)
# Print the mean of each array of scores
print("Decision Tree:", np.mean(tree_score), "Logistic Regression:", np.mean(logit_score))
|
[
"sklearn.preprocessing.StandardScaler",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"sklearn.model_selection.cross_val_score",
"pandas.read_json",
"sklearn.tree.DecisionTreeClassifier",
"sklearn.metrics.classification_report",
"numpy.cumsum",
"sklearn.linear_model.LogisticRegression",
"sklearn.model_selection.KFold",
"sklearn.decomposition.PCA",
"numpy.mean",
"matplotlib.pyplot.subplots",
"pandas.concat"
] |
[((98, 144), 'pandas.read_csv', 'pd.read_csv', (['"""datasets/fma-rock-vs-hiphop.csv"""'], {}), "('datasets/fma-rock-vs-hiphop.csv')\n", (109, 144), True, 'import pandas as pd\n'), ((207, 273), 'pandas.read_json', 'pd.read_json', (['"""datasets/echonest-metrics.json"""'], {'precise_float': '(True)'}), "('datasets/echonest-metrics.json', precise_float=True)\n", (219, 273), True, 'import pandas as pd\n'), ((864, 880), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (878, 880), False, 'from sklearn.preprocessing import StandardScaler\n'), ((1116, 1121), 'sklearn.decomposition.PCA', 'PCA', ([], {}), '()\n', (1119, 1121), False, 'from sklearn.decomposition import PCA\n'), ((1255, 1269), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1267, 1269), True, 'import matplotlib.pyplot as plt\n'), ((1519, 1542), 'numpy.cumsum', 'np.cumsum', (['exp_variance'], {}), '(exp_variance)\n', (1528, 1542), True, 'import numpy as np\n'), ((1627, 1641), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (1639, 1641), True, 'import matplotlib.pyplot as plt\n'), ((1887, 1921), 'sklearn.decomposition.PCA', 'PCA', (['n_components'], {'random_state': '(10)'}), '(n_components, random_state=10)\n', (1890, 1921), False, 'from sklearn.decomposition import PCA\n'), ((2248, 2305), 'sklearn.model_selection.train_test_split', 'train_test_split', (['pca_projection', 'labels'], {'random_state': '(10)'}), '(pca_projection, labels, random_state=10)\n', (2264, 2305), False, 'from sklearn.model_selection import train_test_split\n'), ((2337, 2376), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(10)'}), '(random_state=10)\n', (2359, 2376), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((2660, 2695), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(10)'}), '(random_state=10)\n', (2678, 2695), False, 'from sklearn.linear_model import LogisticRegression\n'), ((2906, 2958), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels_tree'], {}), '(test_labels, pred_labels_tree)\n', (2927, 2958), False, 'from sklearn.metrics import classification_report\n'), ((2975, 3028), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels_logit'], {}), '(test_labels, pred_labels_logit)\n', (2996, 3028), False, 'from sklearn.metrics import classification_report\n'), ((3565, 3597), 'pandas.concat', 'pd.concat', (['[rock_only, hop_only]'], {}), '([rock_only, hop_only])\n', (3574, 3597), True, 'import pandas as pd\n'), ((3988, 4045), 'sklearn.model_selection.train_test_split', 'train_test_split', (['pca_projection', 'labels'], {'random_state': '(10)'}), '(pca_projection, labels, random_state=10)\n', (4004, 4045), False, 'from sklearn.model_selection import train_test_split\n'), ((4098, 4137), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(10)'}), '(random_state=10)\n', (4120, 4137), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4283, 4318), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(10)'}), '(random_state=10)\n', (4301, 4318), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4751, 4769), 'sklearn.model_selection.KFold', 'KFold', ([], {'n_splits': '(10)'}), '(n_splits=10)\n', (4756, 4769), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((4778, 4817), 'sklearn.tree.DecisionTreeClassifier', 'DecisionTreeClassifier', ([], {'random_state': '(10)'}), '(random_state=10)\n', (4800, 4817), False, 'from sklearn.tree import DecisionTreeClassifier\n'), ((4827, 4862), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'random_state': '(10)'}), '(random_state=10)\n', (4845, 4862), False, 'from sklearn.linear_model import LogisticRegression\n'), ((4908, 4960), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['tree', 'pca_projection', 'labels'], {'cv': 'kf'}), '(tree, pca_projection, labels, cv=kf)\n', (4923, 4960), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((4978, 5032), 'sklearn.model_selection.cross_val_score', 'cross_val_score', (['logreg', 'pca_projection', 'labels'], {'cv': 'kf'}), '(logreg, pca_projection, labels, cv=kf)\n', (4993, 5032), False, 'from sklearn.model_selection import KFold, cross_val_score\n'), ((4459, 4511), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels_tree'], {}), '(test_labels, pred_labels_tree)\n', (4480, 4511), False, 'from sklearn.metrics import classification_report\n'), ((4546, 4599), 'sklearn.metrics.classification_report', 'classification_report', (['test_labels', 'pred_labels_logit'], {}), '(test_labels, pred_labels_logit)\n', (4567, 4599), False, 'from sklearn.metrics import classification_report\n'), ((5101, 5120), 'numpy.mean', 'np.mean', (['tree_score'], {}), '(tree_score)\n', (5108, 5120), True, 'import numpy as np\n'), ((5147, 5167), 'numpy.mean', 'np.mean', (['logit_score'], {}), '(logit_score)\n', (5154, 5167), True, 'import numpy as np\n')]
|
import KratosMultiphysics as KM
import KratosMultiphysics.KratosUnittest as KratosUnittest
from KratosMultiphysics.CoSimulationApplication.coupling_interface_data import CouplingInterfaceData
from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory
from testing_utilities import DummySolverWrapper
from math import sqrt, pi
class TestScalingOperation(KratosUnittest.TestCase):
def setUp(self):
self.model = KM.Model()
self.model_part = self.model.CreateModelPart("default")
self.model_part.AddNodalSolutionStepVariable(KM.PRESSURE)
self.model_part.ProcessInfo[KM.TIME] = 0.0
self.model_part.ProcessInfo[KM.STEP] = 0
for i in range(5):
new_node = self.model_part.CreateNewNode(i+1, i*0.1, 0.0, 0.0)
new_node.SetSolutionStepValue(KM.PRESSURE, 0, i+1.3)
data_settings = KM.Parameters("""{
"model_part_name" : "default",
"variable_name" : "PRESSURE"
}""")
self.interface_data = CouplingInterfaceData(data_settings, self.model)
self.interface_data.Initialize()
self.solver_wrappers = {"dummy_solver" : DummySolverWrapper({"data_4_testing" : self.interface_data})}
self.solver_process_info = KM.ProcessInfo()
def test_constant_scaling(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : 1.5,
"echo_level" : 0
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.5] * 3
self.__ExecuteTest(scaling_op, factors)
def test_constant_scaling_from_string(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : "1.5",
"echo_level" : 0
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.5] * 3
self.__ExecuteTest(scaling_op, factors)
def test_variable_scaling_time(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : "1.5*t",
"echo_level" : 0
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.5*0.25, 1.5*0.5, 1.5*0.75]
self.__ExecuteTest(scaling_op, factors)
def test_variable_scaling_step(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : "1.5*sqrt(step)*pi",
"echo_level" : 0
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.5*pi*sqrt(1), 1.5*pi*sqrt(2), 1.5*pi*sqrt(3), 1.5*pi*sqrt(4), 1.5*pi*sqrt(5)]
self.__ExecuteTest(scaling_op, factors)
def test_scaling_in_interval(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : 1.22,
"interval" : [0.0, 0.3]
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.0] * 5
factors[0] = 1.22
self.__ExecuteTest(scaling_op, factors)
def test_scaling_in_interval_2(self):
scaling_op_settings = KM.Parameters("""{
"type" : "scaling",
"solver" : "dummy_solver",
"data_name" : "data_4_testing",
"scaling_factor" : 1.22,
"interval" : [0.8, "End"]
}""")
scaling_op = coupling_operation_factory.CreateCouplingOperation(scaling_op_settings, self.solver_wrappers, self.solver_process_info)
factors = [1.0] * 3
factors.extend([1.22] * 3)
self.__ExecuteTest(scaling_op, factors)
def __ExecuteTest(self, scaling_operation, factors):
scaling_operation.Check()
for fac in factors:
self.model_part.ProcessInfo[KM.TIME] += 0.25
self.model_part.ProcessInfo[KM.STEP] += 1
old_data = self.interface_data.GetData()
scaling_operation.Execute()
new_data = self.interface_data.GetData()
self.__CompareValues(old_data, new_data, fac)
def __CompareValues(self, old_data, new_data, factor):
for old_val, new_val in zip(old_data, new_data):
self.assertAlmostEqual(old_val*factor, new_val)
if __name__ == '__main__':
KratosUnittest.main()
|
[
"KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation",
"math.sqrt",
"KratosMultiphysics.KratosUnittest.main",
"KratosMultiphysics.Model",
"KratosMultiphysics.ProcessInfo",
"testing_utilities.DummySolverWrapper",
"KratosMultiphysics.Parameters",
"KratosMultiphysics.CoSimulationApplication.coupling_interface_data.CouplingInterfaceData"
] |
[((5340, 5361), 'KratosMultiphysics.KratosUnittest.main', 'KratosUnittest.main', ([], {}), '()\n', (5359, 5361), True, 'import KratosMultiphysics.KratosUnittest as KratosUnittest\n'), ((458, 468), 'KratosMultiphysics.Model', 'KM.Model', ([], {}), '()\n', (466, 468), True, 'import KratosMultiphysics as KM\n'), ((892, 1020), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "model_part_name" : "default",\n "variable_name" : "PRESSURE"\n }"""'], {}), '(\n """{\n "model_part_name" : "default",\n "variable_name" : "PRESSURE"\n }"""\n )\n', (905, 1020), True, 'import KratosMultiphysics as KM\n'), ((1041, 1089), 'KratosMultiphysics.CoSimulationApplication.coupling_interface_data.CouplingInterfaceData', 'CouplingInterfaceData', (['data_settings', 'self.model'], {}), '(data_settings, self.model)\n', (1062, 1089), False, 'from KratosMultiphysics.CoSimulationApplication.coupling_interface_data import CouplingInterfaceData\n'), ((1279, 1295), 'KratosMultiphysics.ProcessInfo', 'KM.ProcessInfo', ([], {}), '()\n', (1293, 1295), True, 'import KratosMultiphysics as KM\n'), ((1364, 1613), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.5,\n "echo_level" : 0\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.5,\n "echo_level" : 0\n }"""\n )\n', (1377, 1613), True, 'import KratosMultiphysics as KM\n'), ((1626, 1749), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (1676, 1749), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((1904, 2155), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5",\n "echo_level" : 0\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5",\n "echo_level" : 0\n }"""\n )\n', (1917, 2155), True, 'import KratosMultiphysics as KM\n'), ((2168, 2291), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (2218, 2291), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((2439, 2692), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5*t",\n "echo_level" : 0\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5*t",\n "echo_level" : 0\n }"""\n )\n', (2452, 2692), True, 'import KratosMultiphysics as KM\n'), ((2705, 2828), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (2755, 2828), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((2996, 3261), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5*sqrt(step)*pi",\n "echo_level" : 0\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : "1.5*sqrt(step)*pi",\n "echo_level" : 0\n }"""\n )\n', (3009, 3261), True, 'import KratosMultiphysics as KM\n'), ((3274, 3397), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (3324, 3397), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((3614, 3873), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.22,\n "interval" : [0.0, 0.3]\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.22,\n "interval" : [0.0, 0.3]\n }"""\n )\n', (3627, 3873), True, 'import KratosMultiphysics as KM\n'), ((3886, 4009), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (3936, 4009), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((4183, 4444), 'KratosMultiphysics.Parameters', 'KM.Parameters', (['"""{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.22,\n "interval" : [0.8, "End"]\n }"""'], {}), '(\n """{\n "type" : "scaling",\n "solver" : "dummy_solver",\n "data_name" : "data_4_testing",\n "scaling_factor" : 1.22,\n "interval" : [0.8, "End"]\n }"""\n )\n', (4196, 4444), True, 'import KratosMultiphysics as KM\n'), ((4457, 4580), 'KratosMultiphysics.CoSimulationApplication.factories.coupling_operation_factory.CreateCouplingOperation', 'coupling_operation_factory.CreateCouplingOperation', (['scaling_op_settings', 'self.solver_wrappers', 'self.solver_process_info'], {}), '(scaling_op_settings,\n self.solver_wrappers, self.solver_process_info)\n', (4507, 4580), False, 'from KratosMultiphysics.CoSimulationApplication.factories import coupling_operation_factory\n'), ((1181, 1240), 'testing_utilities.DummySolverWrapper', 'DummySolverWrapper', (["{'data_4_testing': self.interface_data}"], {}), "({'data_4_testing': self.interface_data})\n", (1199, 1240), False, 'from testing_utilities import DummySolverWrapper\n'), ((3421, 3428), 'math.sqrt', 'sqrt', (['(1)'], {}), '(1)\n', (3425, 3428), False, 'from math import sqrt, pi\n'), ((3437, 3444), 'math.sqrt', 'sqrt', (['(2)'], {}), '(2)\n', (3441, 3444), False, 'from math import sqrt, pi\n'), ((3453, 3460), 'math.sqrt', 'sqrt', (['(3)'], {}), '(3)\n', (3457, 3460), False, 'from math import sqrt, pi\n'), ((3469, 3476), 'math.sqrt', 'sqrt', (['(4)'], {}), '(4)\n', (3473, 3476), False, 'from math import sqrt, pi\n'), ((3485, 3492), 'math.sqrt', 'sqrt', (['(5)'], {}), '(5)\n', (3489, 3492), False, 'from math import sqrt, pi\n')]
|
"""This is to be executed after there are some data within ./data/memory/."""
import os
import nn.CattleV2 as Cattle
import nnutils
import matplotlib.pyplot as plt
import pylab
import pickle
import sys
# os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
name = 'G2'
CATTLE = cattle = Cattle.Cattle((nnutils.input_size,), nnutils.output_size, name)
# guylaine.load()
history_losses = []
train_with_epsilon = len(sys.argv) != 1
print("training with epsilon decay : %s", train_with_epsilon)
dir = "./{}/".format(name)
if os.path.isfile(dir + 'loss_historyv2'):
history_losses = pickle.load(open(dir + 'loss_historyv2', 'rb'))
for file in os.listdir(dir + 'memory/'):
fullFile = os.path.join(dir + "memory/", file)
print("Opening file %s", file)
CATTLE.load(False)
CATTLE.loadMemory(file)
losses = CATTLE.replay(32, 200, file, train_with_epsilon)
for loss in losses:
history_losses.append(loss)
CATTLE.save()
pickle.dump(history_losses, open(dir + 'loss_historyv2', 'wb'))
os.remove(fullFile)
# plt.plot(history_losses)
# plt.title('model loss')
# plt.ylabel('loss')
# plt.xlabel('epoch')
# plt.show()
|
[
"os.remove",
"nn.CattleV2.Cattle",
"os.path.isfile",
"os.path.join",
"os.listdir"
] |
[((279, 342), 'nn.CattleV2.Cattle', 'Cattle.Cattle', (['(nnutils.input_size,)', 'nnutils.output_size', 'name'], {}), '((nnutils.input_size,), nnutils.output_size, name)\n', (292, 342), True, 'import nn.CattleV2 as Cattle\n'), ((516, 554), 'os.path.isfile', 'os.path.isfile', (["(dir + 'loss_historyv2')"], {}), "(dir + 'loss_historyv2')\n", (530, 554), False, 'import os\n'), ((638, 665), 'os.listdir', 'os.listdir', (["(dir + 'memory/')"], {}), "(dir + 'memory/')\n", (648, 665), False, 'import os\n'), ((682, 717), 'os.path.join', 'os.path.join', (["(dir + 'memory/')", 'file'], {}), "(dir + 'memory/', file)\n", (694, 717), False, 'import os\n'), ((1019, 1038), 'os.remove', 'os.remove', (['fullFile'], {}), '(fullFile)\n', (1028, 1038), False, 'import os\n')]
|
import json
import re
import subprocess as s
import os
from pathlib import Path
from time import sleep as zzz
from colorama import Fore, Style
# print ascii art logo
def welcome():
print(f"{Fore.MAGENTA} ")
print(r"""
_____ _____ _____ _ _
| __ \_ _/ ____| | | | |
| |__) || || | __ _| | ___ _ __ __| | __ _ _ __
| ___/ | || | / _` | |/ _ \ '_ \ / _` |/ _` | '__|
| | _| || |___| (_| | | __/ | | | (_| | (_| | |
|_| |_____\_____\__,_|_|\___|_| |_|\__,_|\__,_|_| """)
print(f" {Style.RESET_ALL}")
print(f"{Fore.BLUE}Welcome to the setup script for the OutLook Knight Project{Style.RESET_ALL}")
print(f"{Fore.GREEN}This script will install all the necessary packages and setup the project to run{Style.RESET_ALL}")
zzz(5)
def scandir(path): # dir: str, ext: list
subfolders = []
for f in os.scandir(path):
if f.is_dir():
subfolders.append(f.path)
""" for path in list(subfolders):
subfol = scandir(path)
subfolders.extend(subfol) """
return subfolders
def createConfigJSONFile():
# use user input to dynamically create config.json file
# with the following settings:
# - description
# - scopes {Database, DLNA, MSCAL, GOOGLECAL}
# - eLog {level, cLogEnabled, dLogEnabled, eLogEnabled, fLogEnabled, utilPath, filePath}
json_file = {
"scopes": {
},
"eLog": {
"level": 0,
"cLogEnabled": False,
"dLogEnabled": False,
"eLogEnabled": False,
"fLogEnabled": False,
"utilPath": "",
"filePath": ""
}
}
configPath = Path(__file__).parent
relative_path = '../app/server/scopes/utils'
relative_path_2 = '../app'
relative_path_3 = '../app/server/scopes/'
src_path = (configPath / relative_path).resolve()
src_path_2 = (configPath / relative_path_2).resolve()
src_path_3 = (configPath / relative_path_3).resolve()
json_file["eLog"]["utilPath"] = str(src_path)
json_file["eLog"]["filePath"] = str(src_path_2 / "Logs/")
completeName = str(src_path / "config.json")
subfolders = scandir(src_path_3)
# print(f"{Fore.GREEN}The following subfolders were found in the scopes folder{Style.RESET_ALL}")
print(
f"{Fore.BLUE}Please enter the following information to create the config.json file{Style.RESET_ALL}")
print(
f"{Fore.BLUE}Please choose the modules to enable (0 or 1 only).{Style.RESET_ALL}")
for folder in subfolders:
temp = []
temp.append(folder.split("scopes\\")[-1])
# print(f"{Fore.GREEN}{temp}{Style.RESET_ALL}")
for i in list(temp):
temp = i.split("\\")[-1]
# check if temp_2 has lowercase letters
if any(u.isupper() for u in list(temp)):
temp = temp
print(f"{Fore.GREEN}Found the scope: {Style.RESET_ALL}")
print(f"{Fore.GREEN}{temp}{Style.RESET_ALL}")
print(f"{Fore.GREEN}Enable {temp} Module?{Style.RESET_ALL}")
json_file["scopes"][temp] = bool(int(input()))
print(f"{Fore.BLUE}Please enter the eLog level (a number between 0 and 10){Style.RESET_ALL}")
json_file["eLog"]["level"] = int(input())
print(f"{Fore.BLUE}Please choose the logging modules to enable (0 or 1 only).{Style.RESET_ALL}")
print(f"{Fore.GREEN}Enable the eLog cLogEnabled module?{Style.RESET_ALL}")
json_file["eLog"]["cLogEnabled"] = bool(int(input()))
print(f"{Fore.GREEN}Enable the eLog dLogEnabled module?{Style.RESET_ALL}")
json_file["eLog"]["dLogEnabled"] = bool(int(input()))
print(f"{Fore.GREEN}Enable the eLog eLogEnabled module?{Style.RESET_ALL}")
json_file["eLog"]["eLogEnabled"] = bool(int(input()))
print(f"{Fore.GREEN}Enable the eLog fLogEnabled module?{Style.RESET_ALL}")
json_file["eLog"]["fLogEnabled"] = bool(int(input()))
with open(completeName, "w") as outfile:
json.dump(json_file, outfile)
if __name__ == '__main__':
welcome()
createConfigJSONFile()
quit()
|
[
"json.dump",
"pathlib.Path",
"os.scandir",
"time.sleep"
] |
[((798, 804), 'time.sleep', 'zzz', (['(5)'], {}), '(5)\n', (801, 804), True, 'from time import sleep as zzz\n'), ((885, 901), 'os.scandir', 'os.scandir', (['path'], {}), '(path)\n', (895, 901), False, 'import os\n'), ((1704, 1718), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (1708, 1718), False, 'from pathlib import Path\n'), ((4028, 4057), 'json.dump', 'json.dump', (['json_file', 'outfile'], {}), '(json_file, outfile)\n', (4037, 4057), False, 'import json\n')]
|
# The following source code was originally obtained from:
# https://github.com/keras-team/keras/blob/r2.6/keras/layers/preprocessing/normalization.py#L27-L282
# https://github.com/keras-team/keras/blob/r2.6/keras/layers/core.py#L55-L119
# ==============================================================================
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization preprocessing layer."""
import tensorflow.compat.v2 as tf
from keras.engine.base_layer import Layer
class FeatureNormalization(Layer):
"""Feature-wise normalization of the data."""
def __init__(self,
axis=-1,
**kwargs):
super().__init__(**kwargs)
self.supports_masking = True
self._compute_output_and_mask_jointly = True
# Standardize `axis` to a tuple.
if axis is None:
axis = ()
elif isinstance(axis, int):
axis = (axis,)
else:
axis = tuple(axis)
self.axis = axis
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
weight_shape = tuple(input_shape[d] for d in self.axis)
self.scale = self.add_weight(
name='scale',
shape=weight_shape,
dtype=self.dtype,
initializer='ones',
trainable=False)
self.offset = self.add_weight(
name='offset',
shape=weight_shape,
dtype=self.dtype,
initializer='zeros',
trainable=False)
self.built = True
def compute_mask(self, inputs, mask=None):
return tf.math.is_finite(inputs)
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
if inputs.dtype != self.dtype:
inputs = tf.cast(inputs, self.dtype)
mask = tf.math.is_finite(inputs)
outputs = tf.math.multiply_no_nan(inputs, tf.cast(mask, inputs.dtype))
outputs = (outputs * tf.cast(self.scale, inputs.dtype) +
tf.cast(self.offset, inputs.dtype))
# Compute the mask and outputs simultaneously.
outputs._keras_mask = mask
return outputs
def compute_output_shape(self, input_shape):
return input_shape
def compute_output_signature(self, input_spec):
return input_spec
def get_config(self):
config = super().get_config()
config.update({
'axis': self.axis,
})
return config
|
[
"tensorflow.compat.v2.convert_to_tensor",
"tensorflow.compat.v2.math.is_finite",
"tensorflow.compat.v2.cast",
"tensorflow.compat.v2.TensorShape"
] |
[((1632, 1659), 'tensorflow.compat.v2.TensorShape', 'tf.TensorShape', (['input_shape'], {}), '(input_shape)\n', (1646, 1659), True, 'import tensorflow.compat.v2 as tf\n'), ((2128, 2153), 'tensorflow.compat.v2.math.is_finite', 'tf.math.is_finite', (['inputs'], {}), '(inputs)\n', (2145, 2153), True, 'import tensorflow.compat.v2 as tf\n'), ((2194, 2222), 'tensorflow.compat.v2.convert_to_tensor', 'tf.convert_to_tensor', (['inputs'], {}), '(inputs)\n', (2214, 2222), True, 'import tensorflow.compat.v2 as tf\n'), ((2313, 2338), 'tensorflow.compat.v2.math.is_finite', 'tf.math.is_finite', (['inputs'], {}), '(inputs)\n', (2330, 2338), True, 'import tensorflow.compat.v2 as tf\n'), ((2273, 2300), 'tensorflow.compat.v2.cast', 'tf.cast', (['inputs', 'self.dtype'], {}), '(inputs, self.dtype)\n', (2280, 2300), True, 'import tensorflow.compat.v2 as tf\n'), ((2385, 2412), 'tensorflow.compat.v2.cast', 'tf.cast', (['mask', 'inputs.dtype'], {}), '(mask, inputs.dtype)\n', (2392, 2412), True, 'import tensorflow.compat.v2 as tf\n'), ((2490, 2524), 'tensorflow.compat.v2.cast', 'tf.cast', (['self.offset', 'inputs.dtype'], {}), '(self.offset, inputs.dtype)\n', (2497, 2524), True, 'import tensorflow.compat.v2 as tf\n'), ((2439, 2472), 'tensorflow.compat.v2.cast', 'tf.cast', (['self.scale', 'inputs.dtype'], {}), '(self.scale, inputs.dtype)\n', (2446, 2472), True, 'import tensorflow.compat.v2 as tf\n')]
|
import os.path
from os import listdir
from typing import List
class FileInfo:
def __init__(
self,
filename: str,
schema: str,
dialect: str,
version: int,
api_level: int,
) -> None:
self.filename = filename
self.schema = schema
self.dialect = dialect
self.version = version
self.api_level = api_level
self.transaction = True
def __lt__(self, other: "FileInfo") -> bool:
if self.schema != other.schema or self.dialect != other.dialect:
raise TypeError("FileInfos must have the same schema and dialect")
return self.version < other.version
def __repr__(self) -> str:
return "FileInfo({}, {}, {}, {}, {})".format(
repr(self.filename),
repr(self.schema),
repr(self.dialect),
self.version,
self.api_level,
)
def collect_sql_files(directory: str) -> List[str]:
return [
os.path.join(directory, fn)
for fn in listdir(directory)
if fn.endswith(".sql")
]
|
[
"os.listdir"
] |
[((1043, 1061), 'os.listdir', 'listdir', (['directory'], {}), '(directory)\n', (1050, 1061), False, 'from os import listdir\n')]
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from data_worker.data_worker import combine_batches, split_into_batches, \
unpickle, unpack_data, display_img
from torch_lib.Interface import Interface
from torch_lib.Nets import MediumNet
from torch_lib.data_worker import suit4pytorch
batches_names = [
'data_batch_1', 'data_batch_2', 'data_batch_3', 'data_batch_4',
'data_batch_5']
if __name__ == '__main__':
print('running main')
saved_weights_file = 'saved_nets/saved_torch/version1.pth'
data_batches = [
unpickle(f'datasets/cifar-10-batches-py/{batch_name}') for batch_name
in batches_names]
unpacked_batches = [
(unpack_data(data_batch)) for data_batch
in data_batches]
print(unpacked_batches[0][0].shape)
X, Y = combine_batches(unpacked_batches)
print(X.shape, Y.shape)
batches = split_into_batches(X, Y, 3)
torch_batches = [(suit4pytorch(X, Y)) for X, Y in batches]
X_torch = torch_batches[0][0]
Y_torch = torch_batches[0][1]
net = MediumNet()
net_interface = Interface(net)
net_interface.train_net(torch_batches, 1, verbose=False, batch_size=None)
net_interface.save_weights(saved_weights_file)
preds = net_interface.predict_net(X_torch)
preds = preds.detach().numpy()
print(preds)
print(np.argmax(preds, axis=1), Y_torch)
|
[
"data_worker.data_worker.unpack_data",
"numpy.argmax",
"data_worker.data_worker.unpickle",
"data_worker.data_worker.combine_batches",
"torch_lib.Interface.Interface",
"torch_lib.data_worker.suit4pytorch",
"torch_lib.Nets.MediumNet",
"data_worker.data_worker.split_into_batches"
] |
[((833, 866), 'data_worker.data_worker.combine_batches', 'combine_batches', (['unpacked_batches'], {}), '(unpacked_batches)\n', (848, 866), False, 'from data_worker.data_worker import combine_batches, split_into_batches, unpickle, unpack_data, display_img\n'), ((911, 938), 'data_worker.data_worker.split_into_batches', 'split_into_batches', (['X', 'Y', '(3)'], {}), '(X, Y, 3)\n', (929, 938), False, 'from data_worker.data_worker import combine_batches, split_into_batches, unpickle, unpack_data, display_img\n'), ((1083, 1094), 'torch_lib.Nets.MediumNet', 'MediumNet', ([], {}), '()\n', (1092, 1094), False, 'from torch_lib.Nets import MediumNet\n'), ((1115, 1129), 'torch_lib.Interface.Interface', 'Interface', (['net'], {}), '(net)\n', (1124, 1129), False, 'from torch_lib.Interface import Interface\n'), ((584, 638), 'data_worker.data_worker.unpickle', 'unpickle', (['f"""datasets/cifar-10-batches-py/{batch_name}"""'], {}), "(f'datasets/cifar-10-batches-py/{batch_name}')\n", (592, 638), False, 'from data_worker.data_worker import combine_batches, split_into_batches, unpickle, unpack_data, display_img\n'), ((715, 738), 'data_worker.data_worker.unpack_data', 'unpack_data', (['data_batch'], {}), '(data_batch)\n', (726, 738), False, 'from data_worker.data_worker import combine_batches, split_into_batches, unpickle, unpack_data, display_img\n'), ((962, 980), 'torch_lib.data_worker.suit4pytorch', 'suit4pytorch', (['X', 'Y'], {}), '(X, Y)\n', (974, 980), False, 'from torch_lib.data_worker import suit4pytorch\n'), ((1369, 1393), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (1378, 1393), True, 'import numpy as np\n')]
|
from inspect import currentframe, getframeinfo
print(getframeinfo(currentframe()).lineno)
import pycuda.driver as drv
print(getframeinfo(currentframe()).lineno)
import pycuda.tools
print(getframeinfo(currentframe()).lineno)
#import pycuda.autoinit
print(getframeinfo(currentframe()).lineno)
from pycuda.compiler import SourceModule
print(getframeinfo(currentframe()).lineno)
import numpy as np
print(getframeinfo(currentframe()).lineno)
drv.init()
print(getframeinfo(currentframe()).lineno)
a = numpy.random.randn(400).astype(numpy.float32)
b = numpy.random.randn(400).astype(numpy.float32)
mod = SourceModule("""
__global__ void multiply_them(float *dest, float *a, float *b)
{
const int i = threadIdx.x;
dest[i] = a[i] * b[i];
}
""")
dest = numpy.zeros_like(a)
multiply_them(
drv.Out(dest), drv.In(a), drv.In(b),
block=(400,1,1))
|
[
"pycuda.compiler.SourceModule",
"pycuda.driver.In",
"inspect.currentframe",
"pycuda.driver.Out",
"pycuda.driver.init"
] |
[((437, 447), 'pycuda.driver.init', 'drv.init', ([], {}), '()\n', (445, 447), True, 'import pycuda.driver as drv\n'), ((597, 749), 'pycuda.compiler.SourceModule', 'SourceModule', (['"""\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n"""'], {}), '(\n """\n__global__ void multiply_them(float *dest, float *a, float *b)\n{\n const int i = threadIdx.x;\n dest[i] = a[i] * b[i];\n}\n"""\n )\n', (609, 749), False, 'from pycuda.compiler import SourceModule\n'), ((791, 804), 'pycuda.driver.Out', 'drv.Out', (['dest'], {}), '(dest)\n', (798, 804), True, 'import pycuda.driver as drv\n'), ((806, 815), 'pycuda.driver.In', 'drv.In', (['a'], {}), '(a)\n', (812, 815), True, 'import pycuda.driver as drv\n'), ((817, 826), 'pycuda.driver.In', 'drv.In', (['b'], {}), '(b)\n', (823, 826), True, 'import pycuda.driver as drv\n'), ((66, 80), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (78, 80), False, 'from inspect import currentframe, getframeinfo\n'), ((137, 151), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (149, 151), False, 'from inspect import currentframe, getframeinfo\n'), ((200, 214), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (212, 214), False, 'from inspect import currentframe, getframeinfo\n'), ((267, 281), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (279, 281), False, 'from inspect import currentframe, getframeinfo\n'), ((351, 365), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (363, 365), False, 'from inspect import currentframe, getframeinfo\n'), ((413, 427), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (425, 427), False, 'from inspect import currentframe, getframeinfo\n'), ((467, 481), 'inspect.currentframe', 'currentframe', ([], {}), '()\n', (479, 481), False, 'from inspect import currentframe, getframeinfo\n')]
|
#!/usr/bin/env python
#
# test_inject_config.py - test cases for the COTInjectConfig class
#
# December 2014, <NAME>
# Copyright (c) 2013-2017 the COT project developers.
# See the COPYRIGHT.txt file at the top-level directory of this distribution
# and at https://github.com/glennmatthews/cot/blob/master/COPYRIGHT.txt.
#
# This file is part of the Common OVF Tool (COT) project.
# It is subject to the license terms in the LICENSE.txt file found in the
# top-level directory of this distribution and at
# https://github.com/glennmatthews/cot/blob/master/LICENSE.txt. No part
# of COT, including this file, may be copied, modified, propagated, or
# distributed except according to the terms contained in the LICENSE.txt file.
"""Unit test cases for the COT.inject_config.COTInjectConfig class."""
import logging
import os.path
import re
import shutil
import mock
from COT.commands.tests.command_testcase import CommandTestCase
from COT.ui import UI
from COT.commands.inject_config import COTInjectConfig
from COT.data_validation import InvalidInputError, ValueUnsupportedError
from COT.platforms import CSR1000V, IOSv, IOSXRv, IOSXRvLC
from COT.helpers import helpers
from COT.disks import DiskRepresentation
from COT.commands.remove_file import COTRemoveFile
logger = logging.getLogger(__name__)
class TestCOTInjectConfig(CommandTestCase):
"""Test cases for COTInjectConfig class."""
# Expected message
OVERWRITE_CONFIG_DISK = {
'levelname': 'NOTICE',
'msg': "Overwriting existing config disk",
}
command_class = COTInjectConfig
def setUp(self):
"""Test case setup function called automatically prior to each test."""
super(TestCOTInjectConfig, self).setUp()
self.config_file = self.sample_cfg
def test_readiness(self):
"""Test ready_to_run() under various combinations of parameters."""
self.command.package = self.input_ovf
# IOSXRv is the only platform that supports both primary and secondary
# config, so fake out our platform type appropriately.
self.set_vm_platform(IOSXRv)
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.assertTrue(re.search("No files specified", reason))
self.assertRaises(InvalidInputError, self.command.run)
self.command.config_file = self.config_file
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
self.command.config_file = None
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.command.secondary_config_file = self.config_file
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
self.command.secondary_config_file = None
ready, reason = self.command.ready_to_run()
self.assertFalse(ready)
self.command.extra_files = [self.config_file]
ready, reason = self.command.ready_to_run()
self.assertTrue(ready)
def test_invalid_always_args(self):
"""Test input values that are always invalid."""
self.command.package = self.input_ovf
with self.assertRaises(InvalidInputError):
self.command.config_file = 0
with self.assertRaises(InvalidInputError):
self.command.secondary_config_file = 0
with self.assertRaises(InvalidInputError):
self.command.extra_files = [self.input_ovf, '/foo/bar']
def test_valid_by_platform(self):
"""Test input values whose validity depends on the platform."""
self.command.package = self.input_ovf
# IOSXRvLC supports neither primary nor secondary config files
self.set_vm_platform(IOSXRvLC)
with self.assertRaises(InvalidInputError):
self.command.config_file = self.config_file
with self.assertRaises(InvalidInputError):
self.command.secondary_config_file = self.config_file
# IOSv supports primary but not secondary
self.set_vm_platform(IOSv)
self.command.config_file = self.config_file
with self.assertRaises(InvalidInputError):
self.command.secondary_config_file = self.config_file
# IOSXRv supports both
self.set_vm_platform(IOSXRv)
self.command.config_file = self.config_file
self.command.secondary_config_file = self.config_file
def test_inject_config_iso(self):
"""Inject config file on an ISO."""
self.command.package = self.input_ovf
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
config_iso = os.path.join(self.temp_dir, 'config.iso')
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 2</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>8</rasd:InstanceID>"""
.format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(config_iso)))
if helpers['isoinfo']:
# The sample_cfg.text should be renamed to the platform-specific
# file name for bootstrap config - in this case, config.txt
self.assertEqual(DiskRepresentation.from_file(config_iso).files,
["config.txt"])
else:
logger.info("isoinfo not available, not checking disk contents")
def test_inject_config_iso_relative_path(self):
"""Inject config file specified by relative path, on an ISO."""
os.chdir(os.path.dirname(self.config_file))
self.command.package = os.path.relpath(self.input_ovf)
self.command.config_file = os.path.basename(self.config_file)
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
config_iso = os.path.join(self.temp_dir, 'config.iso')
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 2</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>8</rasd:InstanceID>"""
.format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(config_iso)))
def test_inject_config_iso_secondary(self):
"""Inject secondary config file on an ISO."""
self.command.package = self.input_ovf
self.set_vm_platform(IOSXRv)
self.command.secondary_config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.assertLogged(**self.invalid_hardware_warning(
'4CPU-4GB-3NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'1CPU-1GB-1NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'1CPU-1GB-1NIC', '1024', 'MiB of RAM'))
self.assertLogged(**self.invalid_hardware_warning(
'2CPU-2GB-1NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'2CPU-2GB-1NIC', '2048', 'MiB of RAM'))
config_iso = os.path.join(self.temp_dir, 'config.iso')
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 2</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>8</rasd:InstanceID>"""
.format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(config_iso)))
if helpers['isoinfo']:
# The sample_cfg.text should be renamed to the platform-specific
# file name for secondary bootstrap config
self.assertEqual(DiskRepresentation.from_file(config_iso).files,
["iosxr_config_admin.txt"])
else:
logger.info("isoinfo not available, not checking disk contents")
def test_inject_config_iso_multiple_drives(self):
"""Inject config file on an ISO when multiple empty drives exist."""
temp_ovf = os.path.join(self.temp_dir, "intermediate.ovf")
# Remove the existing ISO from our input_ovf:
remover = COTRemoveFile(UI())
remover.package = self.input_ovf
remover.output = temp_ovf
remover.file_path = "input.iso"
remover.run()
remover.finished()
remover.destroy()
# Now we have two empty drives.
self.command.package = temp_ovf
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
config_iso = os.path.join(self.temp_dir, 'config.iso')
self.check_diff("""
<ovf:File ovf:href="input.vmdk" ovf:id="file1" ovf:size="{vmdk_size}" />
- <ovf:File ovf:href="input.iso" ovf:id="file2" ovf:size="{iso_size}" />
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>true</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 1</rasd:ElementName>
- <rasd:HostResource>ovf:/file/file2</rasd:HostResource>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>7</rasd:InstanceID>"""
.format(vmdk_size=self.FILE_SIZE['input.vmdk'],
iso_size=self.FILE_SIZE['input.iso'],
cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(config_iso)))
if helpers['isoinfo']:
# The sample_cfg.text should be renamed to the platform-specific
# file name for bootstrap config - in this case, config.txt
self.assertEqual(DiskRepresentation.from_file(config_iso).files,
["config.txt"])
else:
logger.info("isoinfo not available, not checking disk contents")
def test_inject_config_vmdk(self):
"""Inject config file on a VMDK."""
self.command.package = self.iosv_ovf
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
# Note that in this case there is an existing placeholder Disk;
# to be OVF standard compliant, the new File must be created in the
# same order relative to the other Files as the existing Disk is
# to the other Disks.
config_vmdk = os.path.join(self.temp_dir, 'config.vmdk')
self.check_diff(file1=self.iosv_ovf,
expected="""
<ovf:References>
+ <ovf:File ovf:href="config.vmdk" ovf:id="config.vmdk" \
ovf:size="{config_size}" />
<ovf:File ovf:href="input.vmdk" ovf:id="vios-adventerprisek9-m.vmdk" \
ovf:size="{input_size}" />
...
<ovf:Info>Virtual disk information</ovf:Info>
- <ovf:Disk ovf:capacity="128" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="flash2" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
+ <ovf:Disk ovf:capacity="8" ovf:capacityAllocationUnits="byte * 2^20" \
ovf:diskId="flash2" ovf:fileRef="config.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
<ovf:Disk ovf:capacity="1073741824" ovf:capacityAllocationUnits="byte" \
ovf:diskId="vios-adventerprisek9-m.vmdk" \
ovf:fileRef="vios-adventerprisek9-m.vmdk" ovf:format=\
"http://www.vmware.com/interfaces/specifications/vmdk.html#streamOptimized" />
...
<rasd:AddressOnParent>1</rasd:AddressOnParent>
- <rasd:Description>Disk device corresponding to flash2:; may be used \
for bootstrap configuration.</rasd:Description>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>flash2</rasd:ElementName>"""
.format(input_size=self.FILE_SIZE['input.vmdk'],
config_size=os.path.getsize(config_vmdk)))
# TODO - we don't currently have a way to check VMDK file listing
# self.assertEqual(DiskRepresentation.from_file(config_vmdk).files,
# ["ios_config.txt"])
def test_inject_config_unsupported_format_existing(self):
"""Only 'harddisk' and 'cdrom' config drives are supported."""
self.command.package = self.input_ovf
self.command.config_file = self.config_file
# Failure during initial lookup of existing drive
# pylint: disable=protected-access
with mock.patch.object(self.command.vm._platform,
'BOOTSTRAP_DISK_TYPE',
new_callable=mock.PropertyMock,
return_value='floppy'):
self.assertRaises(ValueUnsupportedError, self.command.run)
def test_inject_config_unsupported_format_new_disk(self):
"""Only 'harddisk' and 'cdrom' config drives are supported."""
self.command.package = self.input_ovf
self.command.config_file = self.config_file
# Drive lookup passes, but failure to create new disk
# pylint: disable=protected-access
with mock.patch.object(self.command.vm._platform,
'BOOTSTRAP_DISK_TYPE',
new_callable=mock.PropertyMock,
side_effect=('cdrom', 'cdrom',
'floppy', 'floppy', 'floppy')):
self.assertRaises(ValueUnsupportedError, self.command.run)
def test_inject_config_repeatedly(self):
"""inject-config repeatedly."""
# Add initial config file
self.command.package = self.input_ovf
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
# Overwrite it with a new one
self.command.package = self.temp_file
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITE_CONFIG_DISK)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
# And again.
self.command.package = self.temp_file
self.command.config_file = self.config_file
self.command.run()
self.assertLogged(**self.OVERWRITE_CONFIG_DISK)
self.assertLogged(**self.OVERWRITING_FILE)
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 2</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>8</rasd:InstanceID>"""
.format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(os.path.join(
self.temp_dir, 'config.iso'))))
def test_inject_config_fail_no_disk_available(self):
"""Error handling if the OVF doesn't have an appropriate drive."""
self.command.package = self.minimal_ovf
self.command.config_file = self.config_file
# CSR1000V wants a CD-ROM drive
self.set_vm_platform(CSR1000V)
self.assertRaises(LookupError, self.command.run)
# IOSv wants a hard disk - will fail due to no DiskSection
self.set_vm_platform(IOSv)
self.assertRaises(LookupError, self.command.run)
# Also fail due to DiskSection but no placeholder:
self.command.package = self.input_ovf
self.set_vm_platform(IOSv)
self.assertRaises(LookupError, self.command.run)
def test_find_parent_fail_no_parent(self):
"""Negative testing of some inject-config related APIs."""
self.command.package = self.input_ovf
cpu_item = self.command.vm.hardware.find_item(
resource_type='cpu')
self.assertRaises(LookupError,
self.command.vm.find_device_location, cpu_item)
self.assertLogged(levelname="WARNING",
msg="Item.*has no 'Parent' subelement")
def test_inject_extra_directory(self):
"""Test injection of extras from an entire directory."""
self.command.package = self.input_ovf
extra_dir = os.path.join(self.temp_dir, "configs")
os.makedirs(extra_dir)
shutil.copy(self.input_ovf, extra_dir)
shutil.copy(self.minimal_ovf, extra_dir)
subdir = os.path.join(extra_dir, "subdirectory")
os.makedirs(subdir)
shutil.copy(self.invalid_ovf, subdir)
self.command.extra_files = [extra_dir]
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
config_iso = os.path.join(self.temp_dir, 'config.iso')
if helpers['isoinfo']:
self.assertEqual(
DiskRepresentation.from_file(config_iso).files,
[
'input.ovf',
'minimal.ovf',
'subdirectory',
'subdirectory/invalid.ovf',
]
)
else:
logger.info("isoinfo not present, not checking disk contents")
def test_inject_config_primary_secondary_extra(self):
"""Test injection of primary and secondary files and extras."""
self.command.package = self.input_ovf
# IOSXRv supports secondary config
self.set_vm_platform(IOSXRv)
self.command.config_file = self.config_file
self.command.secondary_config_file = self.config_file
self.command.extra_files = [self.minimal_ovf, self.vmware_ovf]
self.command.run()
self.assertLogged(**self.OVERWRITING_DISK_ITEM)
self.command.finished()
self.assertLogged(**self.invalid_hardware_warning(
'4CPU-4GB-3NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'1CPU-1GB-1NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'1CPU-1GB-1NIC', '1024', 'MiB of RAM'))
self.assertLogged(**self.invalid_hardware_warning(
'2CPU-2GB-1NIC', 'VMXNET3', 'NIC type'))
self.assertLogged(**self.invalid_hardware_warning(
'2CPU-2GB-1NIC', '2048', 'MiB of RAM'))
config_iso = os.path.join(self.temp_dir, 'config.iso')
self.check_diff("""
<ovf:File ovf:href="sample_cfg.txt" ovf:id="textfile" \
ovf:size="{cfg_size}" />
+ <ovf:File ovf:href="config.iso" ovf:id="config.iso" \
ovf:size="{config_size}" />
</ovf:References>
...
<rasd:AutomaticAllocation>false</rasd:AutomaticAllocation>
+ <rasd:Description>Configuration disk</rasd:Description>
<rasd:ElementName>CD-ROM 2</rasd:ElementName>
+ <rasd:HostResource>ovf:/file/config.iso</rasd:HostResource>
<rasd:InstanceID>8</rasd:InstanceID>"""
.format(cfg_size=self.FILE_SIZE['sample_cfg.txt'],
config_size=os.path.getsize(config_iso)))
if helpers['isoinfo']:
self.assertEqual(
DiskRepresentation.from_file(config_iso).files,
[
"iosxr_config.txt",
"iosxr_config_admin.txt",
"minimal.ovf",
"vmware.ovf",
]
)
else:
logger.info("isoinfo not available, not checking disk contents")
|
[
"mock.patch.object",
"COT.disks.DiskRepresentation.from_file",
"COT.ui.UI",
"shutil.copy",
"re.search",
"logging.getLogger"
] |
[((1275, 1302), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1292, 1302), False, 'import logging\n'), ((18174, 18212), 'shutil.copy', 'shutil.copy', (['self.input_ovf', 'extra_dir'], {}), '(self.input_ovf, extra_dir)\n', (18185, 18212), False, 'import shutil\n'), ((18221, 18261), 'shutil.copy', 'shutil.copy', (['self.minimal_ovf', 'extra_dir'], {}), '(self.minimal_ovf, extra_dir)\n', (18232, 18261), False, 'import shutil\n'), ((18355, 18392), 'shutil.copy', 'shutil.copy', (['self.invalid_ovf', 'subdir'], {}), '(self.invalid_ovf, subdir)\n', (18366, 18392), False, 'import shutil\n'), ((2211, 2250), 're.search', 're.search', (['"""No files specified"""', 'reason'], {}), "('No files specified', reason)\n", (2220, 2250), False, 'import re\n'), ((9320, 9324), 'COT.ui.UI', 'UI', ([], {}), '()\n', (9322, 9324), False, 'from COT.ui import UI\n'), ((13929, 14055), 'mock.patch.object', 'mock.patch.object', (['self.command.vm._platform', '"""BOOTSTRAP_DISK_TYPE"""'], {'new_callable': 'mock.PropertyMock', 'return_value': '"""floppy"""'}), "(self.command.vm._platform, 'BOOTSTRAP_DISK_TYPE',\n new_callable=mock.PropertyMock, return_value='floppy')\n", (13946, 14055), False, 'import mock\n'), ((14567, 14736), 'mock.patch.object', 'mock.patch.object', (['self.command.vm._platform', '"""BOOTSTRAP_DISK_TYPE"""'], {'new_callable': 'mock.PropertyMock', 'side_effect': "('cdrom', 'cdrom', 'floppy', 'floppy', 'floppy')"}), "(self.command.vm._platform, 'BOOTSTRAP_DISK_TYPE',\n new_callable=mock.PropertyMock, side_effect=('cdrom', 'cdrom', 'floppy',\n 'floppy', 'floppy'))\n", (14584, 14736), False, 'import mock\n'), ((5628, 5668), 'COT.disks.DiskRepresentation.from_file', 'DiskRepresentation.from_file', (['config_iso'], {}), '(config_iso)\n', (5656, 5668), False, 'from COT.disks import DiskRepresentation\n'), ((8838, 8878), 'COT.disks.DiskRepresentation.from_file', 'DiskRepresentation.from_file', (['config_iso'], {}), '(config_iso)\n', (8866, 8878), False, 'from COT.disks import DiskRepresentation\n'), ((11076, 11116), 'COT.disks.DiskRepresentation.from_file', 'DiskRepresentation.from_file', (['config_iso'], {}), '(config_iso)\n', (11104, 11116), False, 'from COT.disks import DiskRepresentation\n'), ((18697, 18737), 'COT.disks.DiskRepresentation.from_file', 'DiskRepresentation.from_file', (['config_iso'], {}), '(config_iso)\n', (18725, 18737), False, 'from COT.disks import DiskRepresentation\n'), ((20972, 21012), 'COT.disks.DiskRepresentation.from_file', 'DiskRepresentation.from_file', (['config_iso'], {}), '(config_iso)\n', (21000, 21012), False, 'from COT.disks import DiskRepresentation\n')]
|
'''
<NAME> 2012-2013
<<EMAIL>>
'''
import numpy as np
import scipy.io as sio
def create_icosahedron(height=1.,payloadR=0.3):
'''
Creates a tensegrity icosahedron
'''
#create points
points = np.zeros((3,2*6+2))
phi = (1+np.sqrt(5))*0.5
offest = 0.792
points[:,0] = (-phi,0,1*offest)
points[:,1] = (phi,0,1*offest)
points[:,2] = (-phi,0,-1*offest)
points[:,3] = (phi,0,-1*offest)
points[:,4] = (1*offest,-phi,0)
points[:,5] = (1*offest,phi,0)
points[:,6] = (-1*offest,-phi,0)
points[:,7] = (-1*offest,phi,0)
points[:,8] = (0,1*offest,-phi)
points[:,9] = (0,1*offest,phi)
points[:,10] = (0,-1*offest,-phi)
points[:,11] = (0,-1*offest,phi)
points[:,12] = (0,0,(payloadR/(height*0.62*0.5)))
points[:,13] = (0,0,(-payloadR/(height*0.62*0.5)))
#theta = -0.36486 #theta=-20.905 degrees
#yRot = np.array([[np.cos(theta),0.,np.sin(theta)],[0.,1.,0.],[-np.sin(theta),0.,np.cos(theta)]])
#testPoints = np.dot(yRot,points)
#print points
#print testPoints
#points = testPoints
points *= (height*0.62)*0.5
#scale the tensegrity and center it
#create bars
bars = np.zeros((6+1,6*2+2))
bars[:,::2] = np.eye(6+1)
bars[:,1::2] = -np.eye(6+1)
#create springs
springs = np.zeros((6*6,2*6+2))
springs[0,(0,6)] = (1,-1)
springs[1,(0,7)] = (1,-1)
springs[2,(0,9)] = (1,-1)
springs[3,(0,11)] = (1,-1)
springs[4,(1,4)] = (1,-1)
springs[5,(1,5)] = (1,-1)
springs[6,(1,9)] = (1,-1)
springs[7,(1,11)] = (1,-1)
springs[8,(2,6)] = (1,-1)
springs[9,(2,7)] = (1,-1)
springs[10,(2,8)] = (1,-1)
springs[11,(2,10)] = (1,-1)
springs[12,(3,4)] = (1,-1)
springs[13,(3,5)] = (1,-1)
springs[14,(3,8)] = (1,-1)
springs[15,(3,10)] = (1,-1)
springs[16,(4,10)] = (1,-1)
springs[17,(4,11)] = (1,-1)
springs[18,(5,8)] = (1,-1)
springs[19,(5,9)] = (1,-1)
springs[20,(6,10)] = (1,-1)
springs[21,(6,11)] = (1,-1)
springs[22,(7,8)] = (1,-1)
springs[23,(7,9)] = (1,-1)
springs[24,(0,12)] = (1,-1)
springs[25,(1,12)] = (1,-1)
springs[26,(2,13)] = (1,-1)
springs[27,(3,13)] = (1,-1)
springs[28,(6,13)] = (1,-1)
springs[29,(5,13)] = (1,-1)
springs[30,(4,12)] = (1,-1)
springs[31,(7,12)] = (1,-1)
springs[32,(8,13)] = (1,-1)
springs[33,(9,12)] = (1,-1)
springs[34,(10,13)] = (1,-1)
springs[35,(11,12)] = (1,-1)
# springs[:num_struts,:num_struts] = -np.eye(num_struts)+np.roll(np.eye(num_struts),1,1) #ground layer
# springs[num_struts:2*num_struts,num_struts:] = -np.eye(num_struts)+np.roll(np.eye(num_struts),1,1) #top layer
# springs[2*num_struts:,:num_struts] = -np.eye(num_struts) #connections between layers
# springs[2*num_struts:,num_struts:] = np.roll(np.eye(num_struts),1,1)
return points,bars,springs
|
[
"numpy.eye",
"numpy.zeros",
"numpy.sqrt"
] |
[((214, 238), 'numpy.zeros', 'np.zeros', (['(3, 2 * 6 + 2)'], {}), '((3, 2 * 6 + 2))\n', (222, 238), True, 'import numpy as np\n'), ((1188, 1216), 'numpy.zeros', 'np.zeros', (['(6 + 1, 6 * 2 + 2)'], {}), '((6 + 1, 6 * 2 + 2))\n', (1196, 1216), True, 'import numpy as np\n'), ((1228, 1241), 'numpy.eye', 'np.eye', (['(6 + 1)'], {}), '(6 + 1)\n', (1234, 1241), True, 'import numpy as np\n'), ((1307, 1335), 'numpy.zeros', 'np.zeros', (['(6 * 6, 2 * 6 + 2)'], {}), '((6 * 6, 2 * 6 + 2))\n', (1315, 1335), True, 'import numpy as np\n'), ((1260, 1273), 'numpy.eye', 'np.eye', (['(6 + 1)'], {}), '(6 + 1)\n', (1266, 1273), True, 'import numpy as np\n'), ((247, 257), 'numpy.sqrt', 'np.sqrt', (['(5)'], {}), '(5)\n', (254, 257), True, 'import numpy as np\n')]
|
import numpy as np
import pandas as pd
def Preprc(raw_data: object, flag: object = 0) -> object:
"""
Function to compute the decoded values in motionsense HRV sensors and
interploate the timestamps given the decoded sequence numbers
:param raw_data:
:param flag:
:return:
"""
# process recieved arrays (data_arr1=data, data_arr2=time,seq)
if not list(raw_data):
return []
data_arr1, data_arr2, err_pkts = process_raw_PPG(raw_data)
seq = np.copy(data_arr2[:, 1])
# make Sq no. ordered
d = np.diff(seq)
idx1 = np.where(d < -(1023 - 50))[0]
idx1 = np.append(idx1, len(seq) - 1)
for i in range(len(idx1) - 1):
seq[idx1[i] + 1:idx1[i + 1] + 1] = seq[idx1[i] + 1:idx1[i + 1] + 1] - (i + 1) * d[idx1[i]]
seq = (seq - seq[0]).astype(int).reshape((len(seq)))
# print(seq)
seq_max = max(seq) # just some heuristic to make ECG seq value 4 times
arr1 = np.concatenate([seq.reshape((len(seq), 1)), data_arr1], axis=1)
if raw_data.all != None:
df1 = pd.DataFrame(arr1, columns=['Seq', 'AccX', 'AccY', 'AccZ', 'GyroX',
'GyroY', 'GyroZ', 'LED1', 'LED2', 'LED3'])
else:
return []
df1.drop_duplicates(subset=['Seq'], inplace=True)
df2 = pd.DataFrame(np.array(range(seq_max + 1)), columns=['Seq'])
itime = data_arr2[0, 0];
ftime = data_arr2[-1, 0]
df3 = df2.merge(df1, how='left', on=['Seq'])
df3['time'] = pd.to_datetime(np.linspace(itime, ftime, len(df2)), unit='ms')
df3.set_index('time', inplace=True)
df3.interpolate(method='time', axis=0, inplace=True) # filling missing data
df3.dropna(inplace=True)
df3['time_stamps'] = np.linspace(itime, ftime, len(df2))
return df3
def process_raw_PPG(raw_data: object) -> object:
"""
function to decode the values from raw byte arrays
:rtype: object
:param raw_data:
:return:
"""
data = raw_data
Vals = data[:, 2:]
num_samples = Vals.shape[0]
ts = data[:, 0]
Accx = np.zeros((num_samples));
Accy = np.zeros((num_samples))
Accz = np.zeros((num_samples));
Gyrox = np.zeros((num_samples))
Gyroy = np.zeros((num_samples));
Gyroz = np.zeros((num_samples))
led1 = np.zeros((num_samples));
led2 = np.zeros((num_samples))
led3 = np.zeros((num_samples));
seq = np.zeros((num_samples))
time_stamps = np.zeros((num_samples))
n = 0;
i = 0;
s = 0;
mis_pkts = 0
while (n) < (num_samples):
time_stamps[i] = ts[n]
Accx[i] = np.int16((np.uint8(Vals[n, 0]) << 8) | (np.uint8(Vals[n, 1])))
Accy[i] = np.int16((np.uint8(Vals[n, 2]) << 8) | (np.uint8(Vals[n, 3])))
Accz[i] = np.int16((np.uint8(Vals[n, 4]) << 8) | (np.uint8(Vals[n, 5])))
Gyrox[i] = np.int16((np.uint8(Vals[n, 6]) << 8) | (np.uint8(Vals[n, 7])))
Gyroy[i] = np.int16((np.uint8(Vals[n, 8]) << 8) | (np.uint8(Vals[n, 9])))
Gyroz[i] = np.int16((np.uint8(Vals[n, 10]) << 8) | (np.uint8(Vals[n, 11])))
led1[i] = (np.uint8(Vals[n, 12]) << 10) | (np.uint8(Vals[n, 13]) << 2) | \
((np.uint8(Vals[n, 14]) & int('11000000', 2)) >> 6)
led2[i] = ((np.uint8(Vals[n, 14]) & int('00111111', 2)) << 12) | \
(np.uint8(Vals[n, 15]) << 4) | \
((np.uint8(Vals[n, 16]) & int('11110000', 2)) >> 4)
led3[i] = ((np.uint8(Vals[n, 16]) & int('00001111', 2)) << 14) | \
(np.uint8(Vals[n, 17]) << 6) | \
((np.uint8(Vals[n, 18]) & int('11111100', 2)) >> 2)
seq[i] = ((np.uint8(Vals[n, 18]) & int('00000011', 2)) << 8) | \
(np.uint8(Vals[n, 19]))
if i > 0:
difer = int((seq[i] - seq[i - 1]) % 1024)
if difer > 50:
s = s + 1 # keep a record of how many such errors occured
n = n + 1
continue
mis_pkts = mis_pkts + (difer - 1)
n = n + 1;
i = i + 1
# removing any trailing zeros
seq = seq[:i];
time_stamps = time_stamps[:i]
Accx = Accx[:i];
Accy = Accy[:i];
Accz = Accz[:i]
Gyrox = Gyrox[:i];
Gyroy = Gyroy[:i];
Gyroz = Gyroz[:i]
led1 = led1[:i];
led2 = led2[:i];
led3 = led3[:i]
# print('no. of unknown seq errors in PPG= ',s)
# print('no. of missed packets= {}'.format(mis_pkts))
data_arr1 = np.stack((Accx, Accy, Accz, Gyrox, Gyroy, Gyroz, led1, led2, led3), axis=1)
# print(np.shape(data_arr1))
data_arr2 = np.concatenate((time_stamps.reshape(1, -1), seq.reshape(1, -1))).T
return data_arr1, data_arr2, (mis_pkts + s)
|
[
"numpy.stack",
"pandas.DataFrame",
"numpy.uint8",
"numpy.copy",
"numpy.zeros",
"numpy.diff",
"numpy.where"
] |
[((493, 517), 'numpy.copy', 'np.copy', (['data_arr2[:, 1]'], {}), '(data_arr2[:, 1])\n', (500, 517), True, 'import numpy as np\n'), ((552, 564), 'numpy.diff', 'np.diff', (['seq'], {}), '(seq)\n', (559, 564), True, 'import numpy as np\n'), ((2056, 2077), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2064, 2077), True, 'import numpy as np\n'), ((2092, 2113), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2100, 2113), True, 'import numpy as np\n'), ((2127, 2148), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2135, 2148), True, 'import numpy as np\n'), ((2164, 2185), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2172, 2185), True, 'import numpy as np\n'), ((2200, 2221), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2208, 2221), True, 'import numpy as np\n'), ((2237, 2258), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2245, 2258), True, 'import numpy as np\n'), ((2272, 2293), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2280, 2293), True, 'import numpy as np\n'), ((2308, 2329), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2316, 2329), True, 'import numpy as np\n'), ((2343, 2364), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2351, 2364), True, 'import numpy as np\n'), ((2378, 2399), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2386, 2399), True, 'import numpy as np\n'), ((2420, 2441), 'numpy.zeros', 'np.zeros', (['num_samples'], {}), '(num_samples)\n', (2428, 2441), True, 'import numpy as np\n'), ((4419, 4494), 'numpy.stack', 'np.stack', (['(Accx, Accy, Accz, Gyrox, Gyroy, Gyroz, led1, led2, led3)'], {'axis': '(1)'}), '((Accx, Accy, Accz, Gyrox, Gyroy, Gyroz, led1, led2, led3), axis=1)\n', (4427, 4494), True, 'import numpy as np\n'), ((576, 602), 'numpy.where', 'np.where', (['(d < -(1023 - 50))'], {}), '(d < -(1023 - 50))\n', (584, 602), True, 'import numpy as np\n'), ((1052, 1166), 'pandas.DataFrame', 'pd.DataFrame', (['arr1'], {'columns': "['Seq', 'AccX', 'AccY', 'AccZ', 'GyroX', 'GyroY', 'GyroZ', 'LED1', 'LED2',\n 'LED3']"}), "(arr1, columns=['Seq', 'AccX', 'AccY', 'AccZ', 'GyroX', 'GyroY',\n 'GyroZ', 'LED1', 'LED2', 'LED3'])\n", (1064, 1166), True, 'import pandas as pd\n'), ((3683, 3704), 'numpy.uint8', 'np.uint8', (['Vals[n, 19]'], {}), '(Vals[n, 19])\n', (3691, 3704), True, 'import numpy as np\n'), ((2614, 2634), 'numpy.uint8', 'np.uint8', (['Vals[n, 1]'], {}), '(Vals[n, 1])\n', (2622, 2634), True, 'import numpy as np\n'), ((2695, 2715), 'numpy.uint8', 'np.uint8', (['Vals[n, 3]'], {}), '(Vals[n, 3])\n', (2703, 2715), True, 'import numpy as np\n'), ((2776, 2796), 'numpy.uint8', 'np.uint8', (['Vals[n, 5]'], {}), '(Vals[n, 5])\n', (2784, 2796), True, 'import numpy as np\n'), ((2858, 2878), 'numpy.uint8', 'np.uint8', (['Vals[n, 7]'], {}), '(Vals[n, 7])\n', (2866, 2878), True, 'import numpy as np\n'), ((2940, 2960), 'numpy.uint8', 'np.uint8', (['Vals[n, 9]'], {}), '(Vals[n, 9])\n', (2948, 2960), True, 'import numpy as np\n'), ((3023, 3044), 'numpy.uint8', 'np.uint8', (['Vals[n, 11]'], {}), '(Vals[n, 11])\n', (3031, 3044), True, 'import numpy as np\n'), ((2584, 2604), 'numpy.uint8', 'np.uint8', (['Vals[n, 0]'], {}), '(Vals[n, 0])\n', (2592, 2604), True, 'import numpy as np\n'), ((2665, 2685), 'numpy.uint8', 'np.uint8', (['Vals[n, 2]'], {}), '(Vals[n, 2])\n', (2673, 2685), True, 'import numpy as np\n'), ((2746, 2766), 'numpy.uint8', 'np.uint8', (['Vals[n, 4]'], {}), '(Vals[n, 4])\n', (2754, 2766), True, 'import numpy as np\n'), ((2828, 2848), 'numpy.uint8', 'np.uint8', (['Vals[n, 6]'], {}), '(Vals[n, 6])\n', (2836, 2848), True, 'import numpy as np\n'), ((2910, 2930), 'numpy.uint8', 'np.uint8', (['Vals[n, 8]'], {}), '(Vals[n, 8])\n', (2918, 2930), True, 'import numpy as np\n'), ((2992, 3013), 'numpy.uint8', 'np.uint8', (['Vals[n, 10]'], {}), '(Vals[n, 10])\n', (3000, 3013), True, 'import numpy as np\n'), ((3066, 3087), 'numpy.uint8', 'np.uint8', (['Vals[n, 12]'], {}), '(Vals[n, 12])\n', (3074, 3087), True, 'import numpy as np\n'), ((3098, 3119), 'numpy.uint8', 'np.uint8', (['Vals[n, 13]'], {}), '(Vals[n, 13])\n', (3106, 3119), True, 'import numpy as np\n'), ((3150, 3171), 'numpy.uint8', 'np.uint8', (['Vals[n, 14]'], {}), '(Vals[n, 14])\n', (3158, 3171), True, 'import numpy as np\n'), ((3294, 3315), 'numpy.uint8', 'np.uint8', (['Vals[n, 15]'], {}), '(Vals[n, 15])\n', (3302, 3315), True, 'import numpy as np\n'), ((3346, 3367), 'numpy.uint8', 'np.uint8', (['Vals[n, 16]'], {}), '(Vals[n, 16])\n', (3354, 3367), True, 'import numpy as np\n'), ((3490, 3511), 'numpy.uint8', 'np.uint8', (['Vals[n, 17]'], {}), '(Vals[n, 17])\n', (3498, 3511), True, 'import numpy as np\n'), ((3542, 3563), 'numpy.uint8', 'np.uint8', (['Vals[n, 18]'], {}), '(Vals[n, 18])\n', (3550, 3563), True, 'import numpy as np\n'), ((3611, 3632), 'numpy.uint8', 'np.uint8', (['Vals[n, 18]'], {}), '(Vals[n, 18])\n', (3619, 3632), True, 'import numpy as np\n'), ((3220, 3241), 'numpy.uint8', 'np.uint8', (['Vals[n, 14]'], {}), '(Vals[n, 14])\n', (3228, 3241), True, 'import numpy as np\n'), ((3416, 3437), 'numpy.uint8', 'np.uint8', (['Vals[n, 16]'], {}), '(Vals[n, 16])\n', (3424, 3437), True, 'import numpy as np\n')]
|
import discord, os, time, random, datetime, validators, asyncio, logging
from asyncio import sleep
from discord.ext import commands
from dateutil.parser import parse
PATH = "/home/nice/necibot"
logging.basicConfig(filename='necibot.log', level=logging.INFO)
logging.basicConfig(format='%(asctime)s %(message)s')
intents = discord.Intents().all()
bot = commands.Bot(command_prefix = '!', intents = intents)
guild_vc = {}
def log(msg):
time = datetime.datetime.now()
datef = time.strftime("%d/%m/%Y %H:%M:%S")
msg = '{0} - {1}'.format(datef, msg)
print(msg)
logging.warning(msg)
@bot.event
async def on_ready():
await bot.change_presence(activity = discord.Game(name = "!help"))
while True:
bot_names = open('{}/usernames.txt'.format(PATH), 'r').readlines()
name = random.choice(bot_names).strip()
avatars = os.listdir('{}/avatar/'.format(PATH))
avatar = random.choice(avatars)
fp = open('{}/avatar/{}'.format(PATH, avatar), 'rb')
pfp = fp.read()
await bot.user.edit(username=name, avatar=pfp)
await asyncio.sleep(60*60*24)
@bot.command(help = 'Reproduce un sonido en el canal de voz en el que se encuentre el usuario')
async def play(ctx, sound):
log('Called play')
try:
await ctx.message.delete()
if ctx.guild.id in guild_vc:
vc = guild_vc[ctx.guild.id]
vc.stop()
await vc.disconnect()
log('Called play with {0}'.format(sound))
channel = ctx.author.voice.channel
log('tryin to connect to {0}'.format(channel))
vc = await channel.connect()
player = vc.play(discord.FFmpegOpusAudio('{0}/sounds/{1}.mp3'.format(PATH, sound)))
guild_vc[ctx.guild.id] = vc
while vc.is_playing():
await sleep(1)
vc.stop()
await vc.disconnect()
except Exception as e:
log('Un error ocurrió: {0}'.format(e))
@bot.command()
@commands.has_role('Condestable')
async def punish(ctx, user):
log('Called play')
sound = "punish"
try:
await ctx.message.delete()
if ctx.guild.id in guild_vc:
vc = guild_vc[ctx.guild.id]
vc.stop()
await vc.disconnect()
log('Called play with {0}'.format(sound))
original_channel = ctx.author.voice.channel
member = ctx.guild.get_member_named(user)
channel = ctx.guild.get_channel(794355113334013952)
await member.move_to(channel)
log('tryin to connect to {0}'.format(channel))
vc = await channel.connect()
player = vc.play(discord.FFmpegOpusAudio('{0}/sounds/{1}.mp3'.format(PATH, sound)))
guild_vc[ctx.guild.id] = vc
while vc.is_playing():
await sleep(1)
vc.stop()
await vc.disconnect()
await member.move_to(original_channel)
except Exception as e:
log('Un error ocurrió: {0}'.format(e))
@bot.command()
async def stop(ctx):
log('Called stop')
await ctx.message.delete()
if ctx.guild.id in guild_vc:
vc = guild_vc[ctx.guild.id]
vc.stop()
await vc.disconnect()
@bot.command(help = 'Devuelve la lista de sonidos disponibles para reproucir')
async def sounds(ctx):
log('Called sounds')
try:
files = [f.replace('.mp3', '') for f in os.listdir('{}/sounds/'.format(PATH)) if 'mp3' in f]
await ctx.send('Sonidos disponibles:\n {0}'.format(files))
except Exception as e:
log('Un error ocurrió: {0}'.format(e))
@bot.command(help = 'Añade un nuevo sonido mp3')
async def add(ctx):
log('Called add')
try:
for att in ctx.message.attachments:
file_url = att.proxy_url
if '.mp3' in file_url:
print('File: {0}'.format(att.proxy_url))
os.system('wget -P . {}'.format(att.proxy_url))
else:
await ctx.send('Tiene que ser un fichero .mp3 valido')
except Exception as e:
dm_channel = await ctx.message.author.create_dm()
await dm_channel.send('Un error ocurrió: {0}'.format(e))
quotes = []
@bot.command()
async def addquote(ctx, *, quote):
log('Called addquote')
quotes.append(quote)
await ctx.send('Quote : \"{}\" added succesfully'.format(quote))
@bot.command()
async def quote(ctx):
log('Called quote')
await ctx.send(random.choice(quotes))
@bot.command()
async def playyt(ctx, link):
log('Called playYt')
valid_url = validators.url(link)
if valid_url:
guild_id = ctx.guild.id
yt_file_name = "yt-{0}".format(guild_id)
os.system('rm {0}/sounds/{1}.mp3'.format(PATH,yt_file_name))
os.system('youtube-dl --extract-audio --audio-format mp3 -o "{0}/sounds/{1}.mp3" {2}'.format(PATH, yt_file_name, link))
print('Downloaded {}'.format(yt_file_name))
await play(ctx, yt_file_name)
else:
dm_channel = await ctx.message.author.create_dm()
await dm_channel.send('Malformed youtube url {0}'.format(link))
@bot.command()
async def remember(ctx, msg, time):
await ctx.message.delete()
dm_channel = await ctx.message.author.create_dm()
try:
scheduled_time = parse(time)
td = scheduled_time - datetime.datetime.now()
seconds = td.seconds
log("Scheduled {0} at {1}".format(msg, scheduled_time))
await asyncio.sleep(seconds)
await dm_channel.send("{0} you told me to remember this:\n> {1}".format(ctx.message.author.mention, msg), tts=True)
except Exception as e:
log(e)
await dm_channel.send('Malformed date {0}'.format(time))
log('Init')
token = os.getenv('DISCORD_TOKEN')
bot.run(token)
|
[
"dateutil.parser.parse",
"logging.basicConfig",
"asyncio.sleep",
"logging.warning",
"time.strftime",
"validators.url",
"random.choice",
"discord.Game",
"discord.Intents",
"discord.ext.commands.Bot",
"discord.ext.commands.has_role",
"datetime.datetime.now",
"os.getenv"
] |
[((195, 258), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': '"""necibot.log"""', 'level': 'logging.INFO'}), "(filename='necibot.log', level=logging.INFO)\n", (214, 258), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((259, 312), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(asctime)s %(message)s"""'}), "(format='%(asctime)s %(message)s')\n", (278, 312), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((354, 403), 'discord.ext.commands.Bot', 'commands.Bot', ([], {'command_prefix': '"""!"""', 'intents': 'intents'}), "(command_prefix='!', intents=intents)\n", (366, 403), False, 'from discord.ext import commands\n'), ((1954, 1986), 'discord.ext.commands.has_role', 'commands.has_role', (['"""Condestable"""'], {}), "('Condestable')\n", (1971, 1986), False, 'from discord.ext import commands\n'), ((5666, 5692), 'os.getenv', 'os.getenv', (['"""DISCORD_TOKEN"""'], {}), "('DISCORD_TOKEN')\n", (5675, 5692), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((448, 471), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (469, 471), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((484, 518), 'time.strftime', 'time.strftime', (['"""%d/%m/%Y %H:%M:%S"""'], {}), "('%d/%m/%Y %H:%M:%S')\n", (497, 518), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((579, 599), 'logging.warning', 'logging.warning', (['msg'], {}), '(msg)\n', (594, 599), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((4500, 4520), 'validators.url', 'validators.url', (['link'], {}), '(link)\n', (4514, 4520), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((324, 341), 'discord.Intents', 'discord.Intents', ([], {}), '()\n', (339, 341), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((917, 939), 'random.choice', 'random.choice', (['avatars'], {}), '(avatars)\n', (930, 939), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((5218, 5229), 'dateutil.parser.parse', 'parse', (['time'], {}), '(time)\n', (5223, 5229), False, 'from dateutil.parser import parse\n'), ((1094, 1121), 'asyncio.sleep', 'asyncio.sleep', (['(60 * 60 * 24)'], {}), '(60 * 60 * 24)\n', (1107, 1121), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((4391, 4412), 'random.choice', 'random.choice', (['quotes'], {}), '(quotes)\n', (4404, 4412), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((5260, 5283), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (5281, 5283), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((5391, 5413), 'asyncio.sleep', 'asyncio.sleep', (['seconds'], {}), '(seconds)\n', (5404, 5413), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((675, 701), 'discord.Game', 'discord.Game', ([], {'name': '"""!help"""'}), "(name='!help')\n", (687, 701), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((811, 835), 'random.choice', 'random.choice', (['bot_names'], {}), '(bot_names)\n', (824, 835), False, 'import discord, os, time, random, datetime, validators, asyncio, logging\n'), ((1806, 1814), 'asyncio.sleep', 'sleep', (['(1)'], {}), '(1)\n', (1811, 1814), False, 'from asyncio import sleep\n'), ((2784, 2792), 'asyncio.sleep', 'sleep', (['(1)'], {}), '(1)\n', (2789, 2792), False, 'from asyncio import sleep\n')]
|
from os import remove
from pyrogram import filters
from YorForger import DEV_USERS, SUPPORT_USERS, WHITELIST_USERS, pbot as app
from YorForger.modules.wall import arq
from YorForger.utlis.error import capture_err
from YorForger.modules.helper_funcs.chun import adminsOnly
from YorForger.modules.sql.nsfw_sql import is_nsfw, rem_nsfw, set_nsfw
SUDOS = DEV_USERS, SUPPORT_USERS, WHITELIST_USERS
async def get_file_id_from_message(message):
file_id = None
if message.document:
if int(message.document.file_size) > 3145728:
return
mime_type = message.document.mime_type
if mime_type != "image/png" and mime_type != "image/jpeg":
return
file_id = message.document.file_id
if message.sticker:
if message.sticker.is_animated:
if not message.sticker.thumbs:
return
file_id = message.sticker.thumbs[0].file_id
else:
file_id = message.sticker.file_id
if message.photo:
file_id = message.photo.file_id
if message.animation:
if not message.animation.thumbs:
return
file_id = message.animation.thumbs[0].file_id
if message.video:
if not message.video.thumbs:
return
file_id = message.video.thumbs[0].file_id
return file_id
@app.on_message(
(
filters.document
| filters.photo
| filters.sticker
| filters.animation
| filters.video
)
& ~filters.private,
group=8,
)
@capture_err
async def detect_nsfw(_, message):
if not is_nsfw(message.chat.id):
return
if not message.from_user:
return
file_id = await get_file_id_from_message(message)
if not file_id:
return
file = await app.download_media(file_id)
try:
results = await arq.nsfw_scan(file=file)
except Exception:
return
if not results.ok:
return
results = results.result
remove(file)
nsfw = results.is_nsfw
if message.from_user.id in SUDOS:
return
if not nsfw:
return
try:
await message.delete()
except Exception:
return
await message.reply_text(
f"""
**NSFW Image Detected & Deleted Successfully!
————————————————————**
**User:** {message.from_user.mention} [`{message.from_user.id}`]
**Safe:** `{results.neutral} %`
**Porn:** `{results.porn} %`
**Adult:** `{results.sexy} %`
**Hentai:** `{results.hentai} %`
**Drawings:** `{results.drawings} %`
**————————————————————**.
"""
)
@app.on_message(filters.command(["nsfwscan", f"nsfwscan@voilet_probot"]))
@capture_err
async def nsfw_scan_command(_, message):
if not message.reply_to_message:
await message.reply_text(
"Reply to an image/document/sticker/animation to scan it."
)
return
reply = message.reply_to_message
if (
not reply.document
and not reply.photo
and not reply.sticker
and not reply.animation
and not reply.video
):
await message.reply_text(
"Reply to an image/document/sticker/animation to scan it."
)
return
m = await message.reply_text("Scanning")
file_id = await get_file_id_from_message(reply)
if not file_id:
return await m.edit("Something wrong happened.")
file = await app.download_media(file_id)
try:
results = await arq.nsfw_scan(file=file)
except Exception:
return
remove(file)
if not results.ok:
return await m.edit(results.result)
results = results.result
await m.edit(
f"""
**Neutral:** `{results.neutral} %`
**Porn:** `{results.porn} %`
**Hentai:** `{results.hentai} %`
**Sexy:** `{results.sexy} %`
**Drawings:** `{results.drawings} %`
**NSFW:** `{results.is_nsfw}`
"""
)
@app.on_message(filters.command(["antinsfw", f"antinsfw@voilet_probot"]) & ~filters.private)
@adminsOnly("can_change_info")
async def nsfw_enable_disable(_, message):
if len(message.command) != 2:
await message.reply_text(
"Usage: /antinsfw [on/off]"
)
return
status = message.text.split(None, 1)[1].strip()
status = status.lower()
chat_id = message.chat.id
if status == "on" or status == "yes":
await rem_nsfw(chat_id)
await message.reply_text(
"Enabled AntiNSFW System. I will Delete Messages Containing Inappropriate Content."
)
elif status == "off" or status == "no":
await set_nsfw(chat_id)
await message.reply_text("Disabled AntiNSFW System.")
else:
await message.reply_text(
"Unknown Suffix, Use /antinsfw [on/off]"
)
__mod_name__ = "Anti-NSFW"
__help__ = """
/nsfw_scan - Manually Scan An Image/Sticker/Document.
/anti_nsfw <on/off> - Turn This Module On/Off
"""
|
[
"os.remove",
"YorForger.modules.sql.nsfw_sql.rem_nsfw",
"pyrogram.filters.command",
"YorForger.modules.sql.nsfw_sql.is_nsfw",
"YorForger.pbot.on_message",
"YorForger.modules.sql.nsfw_sql.set_nsfw",
"YorForger.modules.wall.arq.nsfw_scan",
"YorForger.modules.helper_funcs.chun.adminsOnly",
"YorForger.pbot.download_media"
] |
[((1335, 1471), 'YorForger.pbot.on_message', 'app.on_message', (['((filters.document | filters.photo | filters.sticker | filters.animation |\n filters.video) & ~filters.private)'], {'group': '(8)'}), '((filters.document | filters.photo | filters.sticker |\n filters.animation | filters.video) & ~filters.private, group=8)\n', (1349, 1471), True, 'from YorForger import DEV_USERS, SUPPORT_USERS, WHITELIST_USERS, pbot as app\n'), ((3933, 3962), 'YorForger.modules.helper_funcs.chun.adminsOnly', 'adminsOnly', (['"""can_change_info"""'], {}), "('can_change_info')\n", (3943, 3962), False, 'from YorForger.modules.helper_funcs.chun import adminsOnly\n'), ((1975, 1987), 'os.remove', 'remove', (['file'], {}), '(file)\n', (1981, 1987), False, 'from os import remove\n'), ((3494, 3506), 'os.remove', 'remove', (['file'], {}), '(file)\n', (3500, 3506), False, 'from os import remove\n'), ((2569, 2625), 'pyrogram.filters.command', 'filters.command', (["['nsfwscan', f'nsfwscan@voilet_probot']"], {}), "(['nsfwscan', f'nsfwscan@voilet_probot'])\n", (2584, 2625), False, 'from pyrogram import filters\n'), ((1589, 1613), 'YorForger.modules.sql.nsfw_sql.is_nsfw', 'is_nsfw', (['message.chat.id'], {}), '(message.chat.id)\n', (1596, 1613), False, 'from YorForger.modules.sql.nsfw_sql import is_nsfw, rem_nsfw, set_nsfw\n'), ((1781, 1808), 'YorForger.pbot.download_media', 'app.download_media', (['file_id'], {}), '(file_id)\n', (1799, 1808), True, 'from YorForger import DEV_USERS, SUPPORT_USERS, WHITELIST_USERS, pbot as app\n'), ((3367, 3394), 'YorForger.pbot.download_media', 'app.download_media', (['file_id'], {}), '(file_id)\n', (3385, 3394), True, 'from YorForger import DEV_USERS, SUPPORT_USERS, WHITELIST_USERS, pbot as app\n'), ((3855, 3911), 'pyrogram.filters.command', 'filters.command', (["['antinsfw', f'antinsfw@voilet_probot']"], {}), "(['antinsfw', f'antinsfw@voilet_probot'])\n", (3870, 3911), False, 'from pyrogram import filters\n'), ((1842, 1866), 'YorForger.modules.wall.arq.nsfw_scan', 'arq.nsfw_scan', ([], {'file': 'file'}), '(file=file)\n', (1855, 1866), False, 'from YorForger.modules.wall import arq\n'), ((3428, 3452), 'YorForger.modules.wall.arq.nsfw_scan', 'arq.nsfw_scan', ([], {'file': 'file'}), '(file=file)\n', (3441, 3452), False, 'from YorForger.modules.wall import arq\n'), ((4305, 4322), 'YorForger.modules.sql.nsfw_sql.rem_nsfw', 'rem_nsfw', (['chat_id'], {}), '(chat_id)\n', (4313, 4322), False, 'from YorForger.modules.sql.nsfw_sql import is_nsfw, rem_nsfw, set_nsfw\n'), ((4521, 4538), 'YorForger.modules.sql.nsfw_sql.set_nsfw', 'set_nsfw', (['chat_id'], {}), '(chat_id)\n', (4529, 4538), False, 'from YorForger.modules.sql.nsfw_sql import is_nsfw, rem_nsfw, set_nsfw\n')]
|
#/usr/bin/env python3
import numpy as np
# Try importing matplotlib; if it works show a plot of generated data
try:
import matplotlib.pyplot as plt
except ImportError:
MAKE_PLOT = False
else:
MAKE_PLOT = True
FILTERSIZE = 50
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
np.hanning, np.hamming, np.bartlett, np.blackman, np.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len<3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError("Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s=np.r_[x[window_len-1:0:-1],x,x[-2:-window_len-1:-1]]
if window == 'flat': #moving average
w=np.ones(window_len,'d')
else:
w=eval('np.'+window+'(window_len)')
y=np.convolve(w/w.sum(),s,mode='valid')
return y
def main():
samples = np.random.rand(360000)
samples_smooth = smooth(samples, FILTERSIZE)
if MAKE_PLOT:
plt.plot(np.arange(len(samples)), samples)
plt.plot(np.arange(len(samples_smooth)), samples_smooth)
plt.xlim(0, len(samples))
plt.show()
for s in samples:
print(s, end=" ")
if __name__ == "__main__":
main()
|
[
"numpy.random.rand",
"matplotlib.pyplot.show",
"numpy.ones"
] |
[((2053, 2075), 'numpy.random.rand', 'np.random.rand', (['(360000)'], {}), '(360000)\n', (2067, 2075), True, 'import numpy as np\n'), ((1890, 1914), 'numpy.ones', 'np.ones', (['window_len', '"""d"""'], {}), "(window_len, 'd')\n", (1897, 1914), True, 'import numpy as np\n'), ((2301, 2311), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2309, 2311), True, 'import matplotlib.pyplot as plt\n')]
|
from pylsa.rbc1d import solve_rbc1d,solve_rbc1d_neutral
# Parameters
Ny = 51
alpha = 3.1
Ra = 1708
Pr = 1.0
# Find the growth rates for given Ra
evals,evecs = solve_rbc1d(Ny=Ny,Ra=Ra,Pr=Pr,
alpha=alpha,plot=True)
# Find Rac where the growth rate is zero
evals,evecs = solve_rbc1d_neutral(Ny=Ny,Pr=Pr,
alpha=alpha,plot=True)
|
[
"pylsa.rbc1d.solve_rbc1d_neutral",
"pylsa.rbc1d.solve_rbc1d"
] |
[((170, 226), 'pylsa.rbc1d.solve_rbc1d', 'solve_rbc1d', ([], {'Ny': 'Ny', 'Ra': 'Ra', 'Pr': 'Pr', 'alpha': 'alpha', 'plot': '(True)'}), '(Ny=Ny, Ra=Ra, Pr=Pr, alpha=alpha, plot=True)\n', (181, 226), False, 'from pylsa.rbc1d import solve_rbc1d, solve_rbc1d_neutral\n'), ((281, 338), 'pylsa.rbc1d.solve_rbc1d_neutral', 'solve_rbc1d_neutral', ([], {'Ny': 'Ny', 'Pr': 'Pr', 'alpha': 'alpha', 'plot': '(True)'}), '(Ny=Ny, Pr=Pr, alpha=alpha, plot=True)\n', (300, 338), False, 'from pylsa.rbc1d import solve_rbc1d, solve_rbc1d_neutral\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dnsalloc', '0005_auto_20161228_1014'),
]
operations = [
migrations.RenameField(
model_name='service',
old_name='plain_password',
new_name='password',
),
migrations.RenameField(
model_name='service',
old_name='plain_username',
new_name='username',
),
]
|
[
"django.db.migrations.RenameField"
] |
[((252, 348), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""service"""', 'old_name': '"""plain_password"""', 'new_name': '"""password"""'}), "(model_name='service', old_name='plain_password',\n new_name='password')\n", (274, 348), False, 'from django.db import migrations, models\n'), ((401, 497), 'django.db.migrations.RenameField', 'migrations.RenameField', ([], {'model_name': '"""service"""', 'old_name': '"""plain_username"""', 'new_name': '"""username"""'}), "(model_name='service', old_name='plain_username',\n new_name='username')\n", (423, 497), False, 'from django.db import migrations, models\n')]
|
from django.db import models
from django.contrib.auth.models import User
import re
from django.core.validators import RegexValidator
from TWT.apps.timathon.models import Team
from TWT.apps.challenges.models import Challenge
class Submission(models.Model):
id = models.AutoField(
primary_key=True,
help_text="A Submission ID, automatically generated by Postgres.",
)
github_link = models.CharField(
max_length=200,
help_text="Link to github repo",
validators=[
RegexValidator(
regex=re.compile(
r"https://github.com/[A-Za-z0-9-+_.]*/[A-Za-z0-9.+_-]*"
)
)
],
)
repl_link = models.CharField(
help_text="Link to repl",
max_length=200,
validators=[
RegexValidator(
regex=re.compile(
r"https://(replit.com|repl.it)/@[A-Za-z0-9-+_.]+/[A-Za-z0-9.+_-]+"
)
)
],
blank=True,
)
description = models.TextField(max_length=500, help_text="Project Description")
challenge = models.ForeignKey(Challenge, on_delete=models.CASCADE)
team = models.ForeignKey(Team, on_delete=models.CASCADE)
created_at = models.DateTimeField(auto_now=True)
def __str__(self):
return f"Submission: {self.team}"
|
[
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"re.compile"
] |
[((267, 373), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'help_text': '"""A Submission ID, automatically generated by Postgres."""'}), "(primary_key=True, help_text=\n 'A Submission ID, automatically generated by Postgres.')\n", (283, 373), False, 'from django.db import models\n'), ((1051, 1116), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(500)', 'help_text': '"""Project Description"""'}), "(max_length=500, help_text='Project Description')\n", (1067, 1116), False, 'from django.db import models\n'), ((1133, 1187), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Challenge'], {'on_delete': 'models.CASCADE'}), '(Challenge, on_delete=models.CASCADE)\n', (1150, 1187), False, 'from django.db import models\n'), ((1199, 1248), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Team'], {'on_delete': 'models.CASCADE'}), '(Team, on_delete=models.CASCADE)\n', (1216, 1248), False, 'from django.db import models\n'), ((1266, 1301), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1286, 1301), False, 'from django.db import models\n'), ((565, 631), 're.compile', 're.compile', (['"""https://github.com/[A-Za-z0-9-+_.]*/[A-Za-z0-9.+_-]*"""'], {}), "('https://github.com/[A-Za-z0-9-+_.]*/[A-Za-z0-9.+_-]*')\n", (575, 631), False, 'import re\n'), ((865, 942), 're.compile', 're.compile', (['"""https://(replit.com|repl.it)/@[A-Za-z0-9-+_.]+/[A-Za-z0-9.+_-]+"""'], {}), "('https://(replit.com|repl.it)/@[A-Za-z0-9-+_.]+/[A-Za-z0-9.+_-]+')\n", (875, 942), False, 'import re\n')]
|
from setuptools import setup
setup(name='eralchemy-magic',
packages=['eralchemy_magic'],
install_requires=['ipython-sql'],
dependency_links=['git+https://github.com/psychemedia/eralchemy.git']
)
|
[
"setuptools.setup"
] |
[((30, 203), 'setuptools.setup', 'setup', ([], {'name': '"""eralchemy-magic"""', 'packages': "['eralchemy_magic']", 'install_requires': "['ipython-sql']", 'dependency_links': "['git+https://github.com/psychemedia/eralchemy.git']"}), "(name='eralchemy-magic', packages=['eralchemy_magic'],\n install_requires=['ipython-sql'], dependency_links=[\n 'git+https://github.com/psychemedia/eralchemy.git'])\n", (35, 203), False, 'from setuptools import setup\n')]
|
# Generated by Django 3.0.8 on 2020-08-20 1:21
from django.conf import settings
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('auctions', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='TimeStamp',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('listing_date', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Listing',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=64)),
('category', models.CharField(choices=[('V', 'vehicles'), ('P', 'pets'), ('A', 'antiques'), ('M', 'media'), ('S', 'sports'), ('H', 'household'), ('C', 'clothing')], default='M', max_length=1)),
('start_bid', models.DecimalField(decimal_places=0, default=0, max_digits=7, validators=[django.core.validators.MinValueValidator(1, message='Starting Bid should be greater than $0')])),
('description', models.CharField(max_length=900)),
('image_url', models.URLField(blank=True, max_length=300)),
('status', models.CharField(choices=[('A', 'active'), ('C', 'closed')], default='A', max_length=1)),
('listing_date', models.DateTimeField(auto_now=True)),
('owner', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='listing_owner', to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='user',
name='watchlist',
field=models.ManyToManyField(blank=True, related_name='user_watch', to='auctions.Listing'),
),
]
|
[
"django.db.models.URLField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] |
[((1891, 1980), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""user_watch"""', 'to': '"""auctions.Listing"""'}), "(blank=True, related_name='user_watch', to=\n 'auctions.Listing')\n", (1913, 1980), False, 'from django.db import migrations, models\n'), ((418, 511), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (434, 511), False, 'from django.db import migrations, models\n'), ((543, 578), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (563, 578), False, 'from django.db import migrations, models\n'), ((711, 804), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (727, 804), False, 'from django.db import migrations, models\n'), ((829, 860), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (845, 860), False, 'from django.db import migrations, models\n'), ((892, 1078), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('V', 'vehicles'), ('P', 'pets'), ('A', 'antiques'), ('M', 'media'), ('S',\n 'sports'), ('H', 'household'), ('C', 'clothing')]", 'default': '"""M"""', 'max_length': '(1)'}), "(choices=[('V', 'vehicles'), ('P', 'pets'), ('A',\n 'antiques'), ('M', 'media'), ('S', 'sports'), ('H', 'household'), ('C',\n 'clothing')], default='M', max_length=1)\n", (908, 1078), False, 'from django.db import migrations, models\n'), ((1308, 1340), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(900)'}), '(max_length=900)\n', (1324, 1340), False, 'from django.db import migrations, models\n'), ((1373, 1416), 'django.db.models.URLField', 'models.URLField', ([], {'blank': '(True)', 'max_length': '(300)'}), '(blank=True, max_length=300)\n', (1388, 1416), False, 'from django.db import migrations, models\n'), ((1446, 1537), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('A', 'active'), ('C', 'closed')]", 'default': '"""A"""', 'max_length': '(1)'}), "(choices=[('A', 'active'), ('C', 'closed')], default='A',\n max_length=1)\n", (1462, 1537), False, 'from django.db import migrations, models\n'), ((1569, 1604), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1589, 1604), False, 'from django.db import migrations, models\n'), ((1633, 1759), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""listing_owner"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='listing_owner', to=settings.AUTH_USER_MODEL)\n", (1650, 1759), False, 'from django.db import migrations, models\n')]
|
from __future__ import annotations
from typing import Tuple, NoReturn
from ...base import BaseEstimator
import numpy as np
from itertools import product
class DecisionStump(BaseEstimator):
"""
A decision stump classifier for {-1,1} labels according to the CART algorithm
Attributes
----------
self.threshold_ : float
The threshold by which the data is split
self.j_ : int
The index of the feature by which to split the data
self.sign_: int
The label to predict for samples where the value of the j'th feature is above the threshold
"""
def __init__(self) -> DecisionStump:
"""
Instantiate a Decision stump classifier
"""
super().__init__()
self.threshold_, self.j_, self.sign_ = None, None, None
def _fit(self, X: np.ndarray, y: np.ndarray) -> NoReturn:
"""
fits a decision stump to the given data
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to fit an estimator for
y : ndarray of shape (n_samples, )
Responses of input data to fit to
"""
min_err = np.inf
for i in range(X.shape[1]):
thr_pos, thr_err_pos = self._find_threshold(X[:, i], y, 1)
thr_neg, thr_err_neg = self._find_threshold(X[:, i], y, -1)
if thr_err_pos < min_err:
min_err = thr_err_pos
self.threshold_ = thr_pos
self.sign_ = 1
self.j_ = i
if thr_err_neg < min_err:
min_err = thr_err_neg
self.threshold_ = thr_neg
self.sign_ = -1
self.j_ = i
def _predict(self, X: np.ndarray) -> np.ndarray:
"""
Predict responses for given samples using fitted estimator
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Input data to predict responses for
Returns
-------
responses : ndarray of shape (n_samples, )
Predicted responses of given samples
Notes
-----
Feature values strictly below threshold are predicted as `-sign` whereas values which equal
to or above the threshold are predicted as `sign`
"""
true_vec = X[:, self.j_] >= self.threshold_
res = np.full(X.shape[0], -1)
res[true_vec] = 1
return res * self.sign_
def _find_threshold(self, values: np.ndarray, labels: np.ndarray, sign: int) -> Tuple[float, float]:
"""
Given a feature vector and labels, find a threshold by which to perform a split
The threshold is found according to the value minimizing the misclassification
error along this feature
Parameters
----------
values: ndarray of shape (n_samples,)
A feature vector to find a splitting threshold for
labels: ndarray of shape (n_samples,)
The labels to compare against
sign: int
Predicted label assigned to values equal to or above threshold
Returns
-------
thr: float
Threshold by which to perform split
thr_err: float between 0 and 1
Misclassificaiton error of returned threshold
Notes
-----
For every tested threshold, values strictly below threshold are predicted as `-sign` whereas values
which equal to or above the threshold are predicted as `sign`
"""
size = labels.size
new_idx = np.argsort(values)
sorted_values = values[new_idx]
sorted_labels = labels[new_idx]
# create matrix of all possabilities
mat = np.full((size, size + 1), -sign)
iu = np.tril_indices(size)
mat[iu] = sign
res = sorted_labels @ mat
max_idx = np.argmax(res)
mis = float(np.sum((np.sign(sorted_labels) != np.sign(mat[:, max_idx])) * np.abs(sorted_labels)))
if max_idx == 0:
return -np.inf, mis
if max_idx == size:
return np.inf, mis
return sorted_values[max_idx], mis
def _loss(self, X: np.ndarray, y: np.ndarray) -> float:
"""
Evaluate performance under misclassification loss function
Parameters
----------
X : ndarray of shape (n_samples, n_features)
Test samples
y : ndarray of shape (n_samples, )
True labels of test samples
Returns
-------
loss : float
Performance under missclassification loss function
"""
return float(np.sum(self.predict(X) != y)) / y.size
|
[
"numpy.full",
"numpy.tril_indices",
"numpy.abs",
"numpy.argmax",
"numpy.argsort",
"numpy.sign"
] |
[((2399, 2422), 'numpy.full', 'np.full', (['X.shape[0]', '(-1)'], {}), '(X.shape[0], -1)\n', (2406, 2422), True, 'import numpy as np\n'), ((3600, 3618), 'numpy.argsort', 'np.argsort', (['values'], {}), '(values)\n', (3610, 3618), True, 'import numpy as np\n'), ((3758, 3790), 'numpy.full', 'np.full', (['(size, size + 1)', '(-sign)'], {}), '((size, size + 1), -sign)\n', (3765, 3790), True, 'import numpy as np\n'), ((3804, 3825), 'numpy.tril_indices', 'np.tril_indices', (['size'], {}), '(size)\n', (3819, 3825), True, 'import numpy as np\n'), ((3902, 3916), 'numpy.argmax', 'np.argmax', (['res'], {}), '(res)\n', (3911, 3916), True, 'import numpy as np\n'), ((4000, 4021), 'numpy.abs', 'np.abs', (['sorted_labels'], {}), '(sorted_labels)\n', (4006, 4021), True, 'import numpy as np\n'), ((3946, 3968), 'numpy.sign', 'np.sign', (['sorted_labels'], {}), '(sorted_labels)\n', (3953, 3968), True, 'import numpy as np\n'), ((3972, 3996), 'numpy.sign', 'np.sign', (['mat[:, max_idx]'], {}), '(mat[:, max_idx])\n', (3979, 3996), True, 'import numpy as np\n')]
|
from django.contrib.auth.models import Group
from social_core.pipeline.partial import partial
@partial
def verify_user(strategy, details, user=None, is_new=False, *args, **kwargs):
user.groups.add(Group.objects.get(name='Verified Users'))
user.save()
|
[
"django.contrib.auth.models.Group.objects.get"
] |
[((204, 244), 'django.contrib.auth.models.Group.objects.get', 'Group.objects.get', ([], {'name': '"""Verified Users"""'}), "(name='Verified Users')\n", (221, 244), False, 'from django.contrib.auth.models import Group\n')]
|
import torch
import torch.nn as nn
from numpy.random import random_sample
def make_rand_coords(input_size=(256,256,256), patch_size=(64,64,64)):
return [get_dims(input_size[0] - patch_size[0]), \
get_dims(input_size[1] - patch_size[1]), \
get_dims(input_size[2] - patch_size[2])]
def get_dims(upper):
# Random value in the range [0, upper)
return int(upper * random_sample())
def multi_gpu_check(gpu_map, l_name, *args):
"""
Can move computations to other GPUs if specified. The names of the layers and corresponding GPUs can be specified
in GPU map.
:param gpu_map:
:param l_name:
:param args:
:return:
"""
args = list(args)
if l_name in gpu_map.keys():
# print(l_name)
for idx, l in enumerate(args):
args[idx] = l.to(torch.device(gpu_map[l_name]))
# print(args[idx].device, gpu_map[l_name])
if len(args)==1:
return args[0]
return args
class RCVNet(nn.Module):
"""
Random Cropping VNet. Model is designed to extract patches randomly during feedforward pass unless specifically
prevented by setting a random patch coordinate manually. Can also move operations for individual layers to different
GPUs if specified in params
Standard VNet Architecture
"""
def __init__(self, params):
"""
Standard VNet Architecture
"""
super(RCVNet, self).__init__()
self.coords = None
self.input_shape = params['input_shape']
self.patch_size = params['patch_size']
self.gen_random = params['gen_random']
# Choose sub model
if params['sub_model_name'] == 'vnet':
from model.VNet import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_2d_3d':
from model.VNet_2D_3D import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_asym':
from model.VNetAsym import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_sym':
from model.VNetSym import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_denseadd':
from model.VNetDenseAdd import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_exclusion':
from model.VNetExclusion import EncoderBlock, BottleNeck, DecoderBlock
elif params['sub_model_name'] == 'vnet_se':
from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock
else:
raise ValueError(f"{params['sub_model_name']} does not exist.")
# Artefact from another file. Unfortunately was not removed and is a dangling layer in many of the saved models
# i.e. registered as a parameter, but never used
self.down_input_lower = nn.Sequential(
nn.Conv3d(in_channels=params['in_channels'], out_channels=4*params['out_channels'],
kernel_size=(4,4,4), padding=0, stride=4),
nn.GroupNorm(num_groups=4, num_channels=4*params['out_channels']),
nn.PReLU()
)
# Start model creation
# in_channels: 16, out_channels: 16
self.encoder_block_1 = EncoderBlock(params)
params['input'] = False
params['create_layer_1'] = True
params['in_channels'] = params['out_channels'] * 2 # 32
params['out_channels'] = params['out_channels'] * 2 # 32
self.encoder_block_2 = EncoderBlock(params)
params['create_layer_2'] = True
params['in_channels'] = params['out_channels'] * 2 # 64
params['out_channels'] = params['out_channels'] * 2 # 64
self.encoder_block_3 = EncoderBlock(params)
params['in_channels'] = params['out_channels'] * 2 # 128
params['out_channels'] = params['out_channels'] * 2 # 128
self.encoder_block_4 = EncoderBlock(params)
params['in_channels'] = params['out_channels'] * 2 # 256
params['out_channels'] = int(params['out_channels'] * 2) # 256
self.bottleneck_block = BottleNeck(params)
enc_channels = 128
params['in_channels'] = params['out_channels'] + enc_channels # 256 + 128
params['out_channels'] = params['out_channels'] # 256
self.decoder_block_4 = DecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels']/2) + enc_channels # 128 + 64
params['out_channels'] = int(params['out_channels'] / 2) # 128
self.decoder_block_3 = DecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels'] / 2) + enc_channels # 64 + 32
params['out_channels'] = int(params['out_channels'] / 2) # 64
params['create_layer_2'] = False
self.decoder_block_2 = DecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels'] / 2) + enc_channels # 32 + 16
params['out_channels'] = int(params['out_channels'] / 2) # 32
params['create_layer_1'] = False
params['out'] = True
self.decoder_block_1 = DecoderBlock(params)
params['out'] = False
self.output_block = nn.Conv3d(in_channels=params['out_channels'], out_channels=params['num_classes'],
kernel_size = (1, 1, 1), stride = 1, padding = 0)
self.gpu_map = params['gpu_map']
def forward(self, x):
if self.training:
return self.train_forward(x)
else:
return self.eval_forward(x)
def train_forward(self, x):
"""
Standard VNet Architecture
"""
# Generate Random Coordinates if needed
if self.gen_random: # For usage by QuadNet
self.coords = make_rand_coords(self.input_shape, self.patch_size)
assert self.coords is not None
# Cropped Patch for the part of encoder
x_upper = x[..., self.coords[0]:self.coords[0] + self.patch_size[0],
self.coords[1]:self.coords[1] + self.patch_size[1],
self.coords[2]:self.coords[2] + self.patch_size[2]]
# Running Encoder side of network
x_upper = multi_gpu_check(self.gpu_map, 'encoder_block_1', x_upper)
x_upper_res_1, x_down_1 = self.encoder_block_1(x_upper)
x_down_1 = multi_gpu_check(self.gpu_map, 'encoder_block_2', x_down_1)
x_upper_res_2, x_down_2 = self.encoder_block_2(x_down_1)
x_down_2 = multi_gpu_check(self.gpu_map, 'encoder_block_3', x_down_2)
x_lower_res_3, x_down_3 = self.encoder_block_3(x_down_2)
x_down_3 = multi_gpu_check(self.gpu_map, 'encoder_block_4', x_down_3)
x_lower_res_4, x_down_4 = self.encoder_block_4(x_down_3)
# Running bottleneck
x_down_4 = multi_gpu_check(self.gpu_map, 'bottleneck_block', x_down_4)
x_bottleneck = self.bottleneck_block(x_down_4)
# Run decoder
x_lower_res_4, x_bottleneck = multi_gpu_check(self.gpu_map, 'decoder_block_4', x_lower_res_4, x_bottleneck)
x_up_4 = self.decoder_block_4(x_lower_res_4, x_bottleneck)
x_lower_res_3, x_up_4 = multi_gpu_check(self.gpu_map, 'decoder_block_3', x_lower_res_3, x_up_4)
x_up_3 = self.decoder_block_3(x_lower_res_3, x_up_4)
x_upper_res_2, x_up_3 = multi_gpu_check(self.gpu_map, 'decoder_block_2', x_upper_res_2, x_up_3)
x_up_2 = self.decoder_block_2(x_upper_res_2, x_up_3)
x_upper_res_1, x_up_2 = multi_gpu_check(self.gpu_map, 'decoder_block_1', x_upper_res_1, x_up_2)
x_last = self.decoder_block_1(x_upper_res_1, x_up_2)
x_last = multi_gpu_check(self.gpu_map, 'output_block', x_last)
out = self.output_block(x_last)
return out
def eval_forward(self, x):
"""
Standard VNet Architecture
"""
# Standard evaluation. No patch extraction
x_upper = multi_gpu_check(self.gpu_map, 'encoder_block_1', x)
x_upper_res_1, x_down_1 = self.encoder_block_1(x_upper)
x_down_1 = multi_gpu_check(self.gpu_map, 'encoder_block_2', x_down_1)
x_upper_res_2, x_down_2 = self.encoder_block_2(x_down_1)
x_down_2 = multi_gpu_check(self.gpu_map, 'encoder_block_3', x_down_2)
x_lower_res_3, x_down_3 = self.encoder_block_3(x_down_2)
x_down_3 = multi_gpu_check(self.gpu_map, 'encoder_block_4', x_down_3)
x_lower_res_4, x_down_4 = self.encoder_block_4(x_down_3)
# Running bottlenext and decoder
x_down_4 = multi_gpu_check(self.gpu_map, 'bottleneck_block', x_down_4)
x_bottleneck = self.bottleneck_block(x_down_4)
x_lower_res_4, x_bottleneck = multi_gpu_check(self.gpu_map, 'decoder_block_4', x_lower_res_4, x_bottleneck)
x_up_4 = self.decoder_block_4(x_lower_res_4, x_bottleneck)
x_lower_res_3, x_up_4 = multi_gpu_check(self.gpu_map, 'decoder_block_3', x_lower_res_3, x_up_4)
x_up_3 = self.decoder_block_3(x_lower_res_3, x_up_4)
x_upper_res_2, x_up_3 = multi_gpu_check(self.gpu_map, 'decoder_block_2', x_upper_res_2, x_up_3)
x_up_2 = self.decoder_block_2(x_upper_res_2, x_up_3)
x_upper_res_1, x_up_2 = multi_gpu_check(self.gpu_map, 'decoder_block_1', x_upper_res_1, x_up_2)
x_last = self.decoder_block_1(x_upper_res_1, x_up_2)
x_last = multi_gpu_check(self.gpu_map, 'output_block', x_last)
out = self.output_block(x_last)
return out
class RCVNetAttention(RCVNet):
def __init__(self, params):
super(RCVNet, self).__init__()
from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, \
DecoderBlock as AttDecoderBlock
self.coords = None
self.input_shape = params['input_shape']
self.patch_size = params['patch_size']
self.gen_random = params['gen_random']
self.down_input_lower = nn.Sequential(
nn.Conv3d(in_channels=params['in_channels'], out_channels=4*params['out_channels'],
kernel_size=(4,4,4), padding=0, stride=4),
nn.GroupNorm(num_groups=4, num_channels=4*params['out_channels']),
nn.PReLU()
)
# in_channels: 16, out_channels: 16
self.encoder_block_1 = AttEncoderBlock(params)
params['input'] = False
params['create_layer_1'] = True
params['in_channels'] = params['out_channels'] * 2 # 32
params['out_channels'] = params['out_channels'] * 2 # 32
self.encoder_block_2 = AttEncoderBlock(params)
params['create_layer_2'] = True
params['in_channels'] = params['out_channels'] * 2 # 64
params['out_channels'] = params['out_channels'] * 2 # 64
self.encoder_block_3 = AttEncoderBlock(params)
params['in_channels'] = params['out_channels'] * 2 # 128
params['out_channels'] = params['out_channels'] * 2 # 128
self.encoder_block_4 = AttEncoderBlock(params)
params['in_channels'] = params['out_channels'] * 2 # 256
params['out_channels'] = int(params['out_channels'] * 2) # 256
self.bottleneck_block = AttBottleNeck(params)
enc_channels = 128
params['in_channels'] = params['out_channels'] + enc_channels # 256 + 128
params['F_g'], params['F_l'], params['F_int'] = (256, 128, 128)
params['out_channels'] = params['out_channels'] # 256
self.decoder_block_4 = AttDecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels']/2) + enc_channels # 128 + 64
params['out_channels'] = int(params['out_channels'] / 2) # 128
params['F_g'], params['F_l'], params['F_int'] = (128, 64, 64)
self.decoder_block_3 = AttDecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels'] / 2) + enc_channels # 64 + 32
params['out_channels'] = int(params['out_channels'] / 2) # 64
params['F_g'], params['F_l'], params['F_int'] = (64, 32, 32)
params['create_layer_2'] = False
self.decoder_block_2 = AttDecoderBlock(params)
enc_channels = int(enc_channels/2)
params['in_channels'] = int(params['out_channels'] / 2) + enc_channels # 32 + 16
params['out_channels'] = int(params['out_channels'] / 2) # 32
params['F_g'], params['F_l'], params['F_int'] = (32, 16, 16)
params['create_layer_1'] = False
params['out'] = True
self.decoder_block_1 = AttDecoderBlock(params)
params['out'] = False
self.output_block = nn.Conv3d(in_channels=params['out_channels'], out_channels=params['num_classes'],
kernel_size = (1, 1, 1), stride = 1, padding = 0)
if __name__ == "__main__":
# TEST CODE [RUN THIS TO VERIFY MODELS]
params = {'in_channels': 1,
'out_channels': 16,
'create_layer_1': False,
'create_layer_2': False,
'kernel_size': (5, 5, 5),
'input_shape': (64,64,64),
'patch_size': (64,64,64),
'num_classes': 40,
'out': False,
'input': True,
# 'F_g': None,
# 'F_l': None,
# 'F_int': None
'gen_random' : True,
'gpu_map':{}
}
m = RCVNet(params=params).cuda()
# m.eval()
# m = CompetitiveEncoderBlockInput(params=params).cuda()
try:
from torchsummary import summary
# print([l for l in m.named_children()])
summary(m, input_size=(1,64,64,64))
except ImportError:
pass
#
# print([l for l in m.decoder_block_1.parameters()])
# print([l.device() for _, l in m.named_children()])
|
[
"torch.nn.PReLU",
"numpy.random.random_sample",
"torch.nn.Conv3d",
"model.VNetAttention.EncoderBlock",
"model.VNetAttention.BottleNeck",
"torch.nn.GroupNorm",
"model.VNetSE.DecoderBlock",
"torchsummary.summary",
"model.VNetSE.BottleNeck",
"model.VNetSE.EncoderBlock",
"torch.device",
"model.VNetAttention.DecoderBlock"
] |
[((3296, 3316), 'model.VNetSE.EncoderBlock', 'EncoderBlock', (['params'], {}), '(params)\n', (3308, 3316), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((3550, 3570), 'model.VNetSE.EncoderBlock', 'EncoderBlock', (['params'], {}), '(params)\n', (3562, 3570), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((3772, 3792), 'model.VNetSE.EncoderBlock', 'EncoderBlock', (['params'], {}), '(params)\n', (3784, 3792), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((3956, 3976), 'model.VNetSE.EncoderBlock', 'EncoderBlock', (['params'], {}), '(params)\n', (3968, 3976), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((4146, 4164), 'model.VNetSE.BottleNeck', 'BottleNeck', (['params'], {}), '(params)\n', (4156, 4164), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((4368, 4388), 'model.VNetSE.DecoderBlock', 'DecoderBlock', (['params'], {}), '(params)\n', (4380, 4388), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((4624, 4644), 'model.VNetSE.DecoderBlock', 'DecoderBlock', (['params'], {}), '(params)\n', (4636, 4644), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((4921, 4941), 'model.VNetSE.DecoderBlock', 'DecoderBlock', (['params'], {}), '(params)\n', (4933, 4941), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((5246, 5266), 'model.VNetSE.DecoderBlock', 'DecoderBlock', (['params'], {}), '(params)\n', (5258, 5266), False, 'from model.VNetSE import EncoderBlock, BottleNeck, DecoderBlock\n'), ((5326, 5456), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['out_channels']", 'out_channels': "params['num_classes']", 'kernel_size': '(1, 1, 1)', 'stride': '(1)', 'padding': '(0)'}), "(in_channels=params['out_channels'], out_channels=params[\n 'num_classes'], kernel_size=(1, 1, 1), stride=1, padding=0)\n", (5335, 5456), True, 'import torch.nn as nn\n'), ((10418, 10441), 'model.VNetAttention.EncoderBlock', 'AttEncoderBlock', (['params'], {}), '(params)\n', (10433, 10441), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((10675, 10698), 'model.VNetAttention.EncoderBlock', 'AttEncoderBlock', (['params'], {}), '(params)\n', (10690, 10698), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((10900, 10923), 'model.VNetAttention.EncoderBlock', 'AttEncoderBlock', (['params'], {}), '(params)\n', (10915, 10923), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((11087, 11110), 'model.VNetAttention.EncoderBlock', 'AttEncoderBlock', (['params'], {}), '(params)\n', (11102, 11110), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((11280, 11301), 'model.VNetAttention.BottleNeck', 'AttBottleNeck', (['params'], {}), '(params)\n', (11293, 11301), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((11577, 11600), 'model.VNetAttention.DecoderBlock', 'AttDecoderBlock', (['params'], {}), '(params)\n', (11592, 11600), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((11906, 11929), 'model.VNetAttention.DecoderBlock', 'AttDecoderBlock', (['params'], {}), '(params)\n', (11921, 11929), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((12275, 12298), 'model.VNetAttention.DecoderBlock', 'AttDecoderBlock', (['params'], {}), '(params)\n', (12290, 12298), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((12672, 12695), 'model.VNetAttention.DecoderBlock', 'AttDecoderBlock', (['params'], {}), '(params)\n', (12687, 12695), True, 'from model.VNetAttention import EncoderBlock as AttEncoderBlock, BottleNeck as AttBottleNeck, DecoderBlock as AttDecoderBlock\n'), ((12755, 12885), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['out_channels']", 'out_channels': "params['num_classes']", 'kernel_size': '(1, 1, 1)', 'stride': '(1)', 'padding': '(0)'}), "(in_channels=params['out_channels'], out_channels=params[\n 'num_classes'], kernel_size=(1, 1, 1), stride=1, padding=0)\n", (12764, 12885), True, 'import torch.nn as nn\n'), ((13741, 13779), 'torchsummary.summary', 'summary', (['m'], {'input_size': '(1, 64, 64, 64)'}), '(m, input_size=(1, 64, 64, 64))\n', (13748, 13779), False, 'from torchsummary import summary\n'), ((397, 412), 'numpy.random.random_sample', 'random_sample', ([], {}), '()\n', (410, 412), False, 'from numpy.random import random_sample\n'), ((2912, 3046), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['in_channels']", 'out_channels': "(4 * params['out_channels'])", 'kernel_size': '(4, 4, 4)', 'padding': '(0)', 'stride': '(4)'}), "(in_channels=params['in_channels'], out_channels=4 * params[\n 'out_channels'], kernel_size=(4, 4, 4), padding=0, stride=4)\n", (2921, 3046), True, 'import torch.nn as nn\n'), ((3081, 3148), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "(4 * params['out_channels'])"}), "(num_groups=4, num_channels=4 * params['out_channels'])\n", (3093, 3148), True, 'import torch.nn as nn\n'), ((3164, 3174), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (3172, 3174), True, 'import torch.nn as nn\n'), ((10066, 10200), 'torch.nn.Conv3d', 'nn.Conv3d', ([], {'in_channels': "params['in_channels']", 'out_channels': "(4 * params['out_channels'])", 'kernel_size': '(4, 4, 4)', 'padding': '(0)', 'stride': '(4)'}), "(in_channels=params['in_channels'], out_channels=4 * params[\n 'out_channels'], kernel_size=(4, 4, 4), padding=0, stride=4)\n", (10075, 10200), True, 'import torch.nn as nn\n'), ((10235, 10302), 'torch.nn.GroupNorm', 'nn.GroupNorm', ([], {'num_groups': '(4)', 'num_channels': "(4 * params['out_channels'])"}), "(num_groups=4, num_channels=4 * params['out_channels'])\n", (10247, 10302), True, 'import torch.nn as nn\n'), ((10318, 10328), 'torch.nn.PReLU', 'nn.PReLU', ([], {}), '()\n', (10326, 10328), True, 'import torch.nn as nn\n'), ((828, 857), 'torch.device', 'torch.device', (['gpu_map[l_name]'], {}), '(gpu_map[l_name])\n', (840, 857), False, 'import torch\n')]
|
# Copyright 2015 Intel Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from neutron.common import utils
from neutron_lib.plugins.ml2 import api
from nuage_neutron.plugins.common import base_plugin
from nuage_neutron.plugins.common import nuagedb
LOG = logging.getLogger(__name__)
class NuageNetworkExtensionDriver(api.ExtensionDriver,
base_plugin.RootNuagePlugin):
_supported_extension_alias = 'nuage-network'
def initialize(self):
super(NuageNetworkExtensionDriver, self).__init__()
self.init_vsd_client()
@property
def extension_alias(self):
return self._supported_extension_alias
@utils.exception_logger()
def extend_network_dict(self, session, db_data, result):
result['nuage_l2bridge'] = nuagedb.get_nuage_l2bridge_id_for_network(
session, result['id'])
return result
|
[
"oslo_log.log.getLogger",
"neutron.common.utils.exception_logger",
"nuage_neutron.plugins.common.nuagedb.get_nuage_l2bridge_id_for_network"
] |
[((855, 882), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (872, 882), True, 'from oslo_log import log as logging\n'), ((1270, 1294), 'neutron.common.utils.exception_logger', 'utils.exception_logger', ([], {}), '()\n', (1292, 1294), False, 'from neutron.common import utils\n'), ((1391, 1455), 'nuage_neutron.plugins.common.nuagedb.get_nuage_l2bridge_id_for_network', 'nuagedb.get_nuage_l2bridge_id_for_network', (['session', "result['id']"], {}), "(session, result['id'])\n", (1432, 1455), False, 'from nuage_neutron.plugins.common import nuagedb\n')]
|
# -*- coding: utf-8 -*-
import stripe
from django.core.management.base import BaseCommand
from aa_stripe.models import StripeCoupon
from aa_stripe.settings import stripe_settings
from aa_stripe.utils import timestamp_to_timezone_aware_date
class Command(BaseCommand):
help = "Update the coupon list from Stripe API"
def handle(self, *args, **options):
stripe.api_key = stripe_settings.API_KEY
counts = {
"created": 0,
"updated": 0,
"deleted": 0
}
active_coupons_ids = []
last_stripe_coupon = None
while True:
stripe_coupon_list = stripe.Coupon.list(starting_after=last_stripe_coupon)
for stripe_coupon in stripe_coupon_list["data"]:
try:
coupon = StripeCoupon.objects.get(
coupon_id=stripe_coupon.id, created=timestamp_to_timezone_aware_date(stripe_coupon["created"]),
is_deleted=False)
counts["updated"] += coupon.update_from_stripe_data(stripe_coupon)
except StripeCoupon.DoesNotExist:
# already have the data - we do not need to call Stripe API again
coupon = StripeCoupon(coupon_id=stripe_coupon.id)
coupon.update_from_stripe_data(stripe_coupon, commit=False)
super(StripeCoupon, coupon).save()
counts["created"] += 1
# indicate which coupons should have is_deleted=False
active_coupons_ids.append(coupon.pk)
if not stripe_coupon_list["has_more"]:
break
else:
last_stripe_coupon = stripe_coupon_list["data"][-1]
# update can be used here, because those coupons does not exist in the Stripe API anymore
coupons_to_delete = StripeCoupon.objects.exclude(pk__in=active_coupons_ids)
for coupon in coupons_to_delete:
coupon.is_deleted = True
super(StripeCoupon, coupon).save() # make sure pre/post save signals are triggered without calling API
counts["deleted"] += coupons_to_delete.count()
if options.get("verbosity") > 1:
print("Coupons created: {created}, updated: {updated}, deleted: {deleted}".format(**counts))
|
[
"aa_stripe.models.StripeCoupon",
"aa_stripe.utils.timestamp_to_timezone_aware_date",
"stripe.Coupon.list",
"aa_stripe.models.StripeCoupon.objects.exclude"
] |
[((1874, 1929), 'aa_stripe.models.StripeCoupon.objects.exclude', 'StripeCoupon.objects.exclude', ([], {'pk__in': 'active_coupons_ids'}), '(pk__in=active_coupons_ids)\n', (1902, 1929), False, 'from aa_stripe.models import StripeCoupon\n'), ((639, 692), 'stripe.Coupon.list', 'stripe.Coupon.list', ([], {'starting_after': 'last_stripe_coupon'}), '(starting_after=last_stripe_coupon)\n', (657, 692), False, 'import stripe\n'), ((1244, 1284), 'aa_stripe.models.StripeCoupon', 'StripeCoupon', ([], {'coupon_id': 'stripe_coupon.id'}), '(coupon_id=stripe_coupon.id)\n', (1256, 1284), False, 'from aa_stripe.models import StripeCoupon\n'), ((890, 948), 'aa_stripe.utils.timestamp_to_timezone_aware_date', 'timestamp_to_timezone_aware_date', (["stripe_coupon['created']"], {}), "(stripe_coupon['created'])\n", (922, 948), False, 'from aa_stripe.utils import timestamp_to_timezone_aware_date\n')]
|
from copy import copy
from django.forms import formsets
from django.contrib import messages
from django.db.models import Q
from django.forms.formsets import formset_factory, BaseFormSet, all_valid
from detail import *
from edit import *
class SearchFormViewMixin(BaseFormView):
ignore_get_keys = ("page", ) # TODO this should be ignored in search form?
def get_form_kwargs(self):
"""Returns the keyword arguments for instantiating the form."""
req = self.request
kwargs = dict(initial=self.get_initial())
if req.method in ("POST", "PUT"):
kwargs.update(dict(data=req.POST, files=req.FILES))
elif req.GET:
# do get form processing if there's get data that's not in ignore list
get = dict((k,v) for k,v in req.GET.items() if k not in self.ignore_get_keys)
if get:
kwargs = dict(kwargs, initial=get, data=get)
return kwargs
def form_get(self, request):
form = self.get_form()
context = self.get_context_data(form=form)
if self.request.GET:
if form.is_valid() : context.update(self.form_valid(form))
else : context.update(self.form_invalid(form))
return context
class SearchFormView(FormView, SearchFormViewMixin):
"""FormView for search pages."""
class OwnObjMixin(SingleObjectMixin):
"""Access object, checking that it belongs to current user."""
item_name = None # used in permissions error message
owner_field = "creator" # object's field to compare to current user to check permission
def permission_error(self):
name = self.item_name or self.object.__class__.__name__
return HttpResponse("You don't have permissions to access this %s." % name)
def validate(self, obj):
if getattr(obj, self.owner_field) == self.request.user:
return True
def get_object(self, queryset=None):
obj = super(OwnObjMixin, self).get_object(queryset)
return obj if self.validate(obj) else None
class DeleteOwnObjView(OwnObjMixin, DeleteView):
"""Delete object, checking that it belongs to current user."""
class UpdateOwnObjView(OwnObjMixin, UpdateView):
"""Update object, checking that it belongs to current user."""
class UpdateRelatedView(DetailView, UpdateView):
"""Update object related to detail object; create if does not exist."""
detail_model = None
form_model = None
fk_attr = None
related_name = None
def get_modelform_object(self, queryset=None):
""" Get related object: detail_model.<related_name>
If does not exist, create: form_model.<fk_attr>
"""
obj = self.get_detail_object()
kwargs = {self.fk_attr: obj}
try:
related_obj = getattr(obj, self.related_name)
except self.form_model.DoesNotExist:
related_obj = self.form_model.obj.create(**kwargs)
setattr(obj, self.related_name, related_obj)
return related_obj
class SearchEditFormset(SearchFormView):
"""Search form filtering a formset of items to be updated."""
model = None
formset_class = None
form_class = None
def get_form_class(self):
if self.request.method == "GET": return self.form_class
else: return self.formset_class
def get_queryset(self, form=None):
return self.model.objects.filter(self.get_query(form))
def get_query(self, form):
"""This method should always be overridden, applying search from the `form`."""
return Q()
def form_valid(self, form):
formset = None
if self.request.method == "GET":
formset = self.formset_class(queryset=self.get_queryset(form))
else:
form.save()
messages.success(self.request, "%s(s) were updated successfully" % self.model.__name__.capitalize())
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def form_invalid(self, form):
formset = form
form = self.form_class(self.request.GET)
return self.render_to_response(self.get_context_data(form=form, formset=formset))
def get(self, request, *args, **kwargs):
form = self.get_form()
if form.is_bound:
if form.is_valid(): return self.form_valid(form)
else: return self.form_invalid(form)
return self.render_to_response(self.get_context_data(form=form))
|
[
"django.db.models.Q"
] |
[((3625, 3628), 'django.db.models.Q', 'Q', ([], {}), '()\n', (3626, 3628), False, 'from django.db.models import Q\n')]
|
# Copyright (c) Microsoft. All rights reserved.
# Licensed under the MIT license. See LICENSE.md file in the project root
# for full license information.
# ==============================================================================
import pytest
import numpy as np
import scipy.sparse as sparse
import cntk as C
csr = sparse.csr_matrix
from ..core import *
from cntk.tests.test_utils import *
from cntk.ops.tests.ops_test_utils import compare_lists_of_np_arrays, cntk_device
from cntk import *
from cntk.internal import _value_as_sequence_or_array
from cntk import asarray, asvalue
from cntk.tests.test_utils import _to_dense, _to_csr
test_numbers = [4., 5, 6., 7., 8.]
test_array = AA(test_numbers, dtype=np.float32)
def _dense_value_to_ndarray_test(data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes):
shape = (5,)
if num_of_dynamic_axes == 2:
var = sequence.input(shape)
elif num_of_dynamic_axes == 1:
var = input(shape)
else:
var = input(shape, dynamic_axes=[])
# conversion array -> value
val = asvalue(var, data)
assert val.shape == expected_value_shape
# conversion value -> array
dense_result = _value_as_sequence_or_array(val, var)
if isinstance(dense_result, list):
result_shapes = [AA(v).shape for v in dense_result]
else:
result_shapes = dense_result.shape
assert result_shapes == expected_array_shapes
def _sparse_value_to_csr_test(data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes):
shape = (3,)
if num_of_dynamic_axes == 2:
var = sequence.input(shape, is_sparse=True)
elif num_of_dynamic_axes == 1:
var = input(shape, is_sparse=True)
else:
var = input(shape, is_sparse=True, dynamic_axes=[])
# conversion csr array -> value
val = asvalue(var, data)
assert val.shape == expected_value_shape
# conversion value -> csr array
csr_result = val.as_sequences(var)
csr_result_shapes = [v.shape for v in csr_result]
assert csr_result_shapes == expected_csr_shapes
DENSE_CONFIGURATIONS = [
# (dense data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
([[test_array],
[test_array, test_array]], 2, (2,2,5), [(1,5),(2,5)]),
([test_array,
test_array], 2, (2, 1, 5), [(1,5), (1,5)]),
([[test_array],
[test_array]], 2, (2, 1, 5), [(1,5), (1,5)]),
(test_array, 2, (5,), [(), (), (), (), ()]),
(AA([test_numbers], dtype=np.float32), 2, (1,5), [(5,)]),
(AA([test_numbers, test_numbers], dtype=np.float32),
2, (2, 5), [(5,), (5,)]),
([test_array,
test_array], 1, (2,1,5), (2,1,5)),
([[test_array],
[test_array]], 1, (2,1,5), (2,1,5)),
(AA([test_numbers, test_numbers], dtype=np.float32), 1, (2,5), (2,5)),
(AA([test_numbers], dtype=np.float32), 1, (1,5), (1,5)),
([test_array,
test_array], 0, (2,5), (2,5)),
(AA([test_numbers, test_numbers], dtype=np.float32), 0, (2,5), (2,5)),
(test_array, 0, (5,), (5,)),
]
@pytest.mark.parametrize("data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes", DENSE_CONFIGURATIONS)
def test_dense_value_to_ndarray(data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes):
_dense_value_to_ndarray_test(
data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
SPARSE_ARRAYS = [
# (sparse data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
([csr([[1.,0.,2.], [2.,3.,0.]]),
csr([5.,0.,1.])], 2, (2, 2, 3), [(2,3),(1,3)]),
([csr([1,0,2]),
csr([5,0,1])], 2, (2, 1, 3),[(1,3),(1,3)]),
([csr([[1,0,2],[2,3,4]])], 2, (1, 2, 3), [(2,3)]),
([csr([1,0,2]),
csr([5,0,1])], 1, (2, 1, 3), [(1,3),(1,3)]),
([csr([[1,0,2], [2,3,0]]),
csr([[5,0,1], [2,3,0]])], 1, (2, 2, 3), [(2,3),(2,3)]),
([csr([[1,0,2],[2,3,4]])], 1, (1, 2, 3), [(2,3)]),
(csr([1,0,2]), 0, (1, 3), [(1,3)]),
]
@pytest.mark.parametrize("data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes", SPARSE_ARRAYS)
def test_sparse_value_to_csr(data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes):
_sparse_value_to_csr_test(
data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes)
DENSE_FAILING_CONFIGURATIONS = [
# (dense data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
([[test_array],
[test_array, test_array]], 0, (2,2,5), [(1,5),(2,5)]),
# TODO: enable once check is implemented
#([[test_array],
# [test_array]], 0, (2, 1, 5), [(1, 5),(1, 5)]),
#([[test_array],
# [test_array, test_array]], 1, (2,2,5), [(1,5),(2,5)]),
]
SPARSE_FAILING_CONFIGURATIONS = [
# (sparse data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
# TODO: Following configurations are not meant to fail as expected
#(csr([[1,0,2], [2,3,0]]), 2, (1, 3), [(1,3)]),
#(csr([[1,0,2],[2,3,4]]), 2, (2, 1, 3), [(1,3),(1,3)]),
#(csr([[1,0,2], [2,3,0]]), 1, (1, 3), [(1,3)]),
([csr([[1,0,2],[2,3,4]])], 0, (1, 2, 3), [(2,3)]),
([csr([[1,0,2], [2,3,0]]),
csr([5,0,1])], 0, (2, 2, 3), [(2,3),(1,3)]),
([csr([1,0,2])], 0, (1, 3), [(1,3)]),
# TODO: enable once check is implemented
#([csr([[1,0,2], [2,3,0]]),
# csr([5,0,1])], 1, (2, 2, 3), [(2,3),(1,3)]),
]
@pytest.mark.parametrize("data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes", DENSE_FAILING_CONFIGURATIONS)
def test_dense_failing_value_to_ndarray(data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes):
with pytest.raises(ValueError):
_dense_value_to_ndarray_test(
data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes)
@pytest.mark.parametrize("data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes", SPARSE_FAILING_CONFIGURATIONS)
def test_sparse_failing_value_to_csr(data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes):
with pytest.raises(ValueError):
_sparse_value_to_csr_test(
data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes)
def test_asarray_method():
shape = (3,)
var = sequence.input(shape, is_sparse=True)
data = [csr([[1,0,2], [5,0,1]])]
# conversion array -> value
val = asvalue(var, data)
as_csr = val.as_sequences(var)
for a, d in zip(as_csr, data):
assert (a==d).toarray().all()
var = C.input(shape, is_sparse=True)
data = csr([[1,0,2], [5,0,1]])
# conversion array -> value
val = asvalue(var, data)
for v in [
val, # Value
super(Value, val), # cntk_py.Value
val.data, # NDArrayView
super(NDArrayView, val.data), # cntk_py.NDArrayView
]:
as_csr = v.asarray()
for a, d in zip(as_csr, data):
assert (a==d).toarray().all()
def test_value_properties():
ndav = NDArrayView((1, 2, 3), np.float32, device=C.cpu())
val = Value(batch=ndav)
dev = val.device
assert isinstance(dev, DeviceDescriptor)
assert str(dev) == 'CPU'
assert val.is_read_only == False
assert val.is_sparse == False
assert val.dtype == np.float32
def test_ndarray_properties():
ndav = NDArrayView((2, 3), np.float32, device=C.cpu())
dev = ndav.device
assert isinstance(dev, DeviceDescriptor)
assert str(dev) == 'CPU'
assert ndav.is_read_only == False
assert ndav.is_sparse == False
assert ndav.dtype == np.float32
def test_ndarrayview_from_csr(device_id):
dev = cntk_device(device_id)
data = [[[0, 1, 1], [0, 1, 0]], [[1, 0, 0], [1, 0, 1]]]
csr_data = _to_csr(data)
ndarrayview = NDArrayView.from_csr(csr_data, shape=(2, 2, 3))
assert np.array_equal(_to_dense(ndarrayview), data)
with pytest.raises(ValueError):
ndarrayview = NDArrayView.from_csr(csr_data, shape=(3, 2, 3))
with pytest.raises(ValueError):
ndarrayview = NDArrayView.from_csr(csr_data, shape=(2, 2, 4))
def test_2d_sparse_sequences_value(device_id):
dev = cntk_device(device_id)
seq1_data = [[[0, 1, 1], [0, 1, 0]], [[1, 0, 0], [1, 0, 1]]]
csr_seq1 = _to_csr(seq1_data)
ndarrayview1 = NDArrayView.from_csr(csr_seq1, shape=(2, 2, 3), device=cpu())
seq2_data = [[0, 1, 1], [1, 1, 0]]
csr_seq2 = _to_csr(seq2_data)
ndarrayview2 = NDArrayView.from_csr(csr_seq2, shape=(1, 2, 3), device=cpu())
x = sequence.input((2, 3))
sequence_value = Value.create(x, [ndarrayview1, ndarrayview2], device=dev)
assert np.array_equal(_to_dense(sequence_value.data), [seq1_data, [seq2_data, [[0, 0, 0], [0, 0, 0]]]])
def test_as_shape_to_1d(device_id):
dev = cntk_device(device_id)
x = C.input(1)
w_1d = C.parameter((1), device=dev)
assert np.array_equal(w_1d.value, [0])
op = x * 0.1
value = op.eval({x:np.asarray([[1]], dtype=np.float32)}, as_numpy=False, device=dev)
value = value.data.as_shape(value.data.shape[1:])
w_1d.value = value
assert np.array_equal(w_1d.value, np.asarray([0.1], dtype=np.float32))
def test_is_valid(device_id):
a = C.input((2,), needs_gradient=True)
b = a*a
a0 = np.array([1,2],dtype=np.float32)
g = b.grad({a:a0}, as_numpy=False)
g2 = b.grad({a:a0}, as_numpy=False)
assert (g.is_valid == False)
assert (g2.is_valid == True)
|
[
"cntk.ops.tests.ops_test_utils.cntk_device",
"cntk.input",
"cntk.asvalue",
"cntk.tests.test_utils._to_csr",
"numpy.asarray",
"cntk.cpu",
"cntk.parameter",
"pytest.raises",
"numpy.array",
"cntk.tests.test_utils._to_dense",
"numpy.array_equal",
"pytest.mark.parametrize",
"cntk.internal._value_as_sequence_or_array"
] |
[((3041, 3169), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes"""', 'DENSE_CONFIGURATIONS'], {}), "(\n 'data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes',\n DENSE_CONFIGURATIONS)\n", (3064, 3169), False, 'import pytest\n'), ((3964, 4083), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes"""', 'SPARSE_ARRAYS'], {}), "(\n 'data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes',\n SPARSE_ARRAYS)\n", (3987, 4083), False, 'import pytest\n'), ((5356, 5492), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes"""', 'DENSE_FAILING_CONFIGURATIONS'], {}), "(\n 'data, num_of_dynamic_axes, expected_value_shape, expected_array_shapes',\n DENSE_FAILING_CONFIGURATIONS)\n", (5379, 5492), False, 'import pytest\n'), ((5757, 5892), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes"""', 'SPARSE_FAILING_CONFIGURATIONS'], {}), "(\n 'data, num_of_dynamic_axes, expected_value_shape, expected_csr_shapes',\n SPARSE_FAILING_CONFIGURATIONS)\n", (5780, 5892), False, 'import pytest\n'), ((1078, 1096), 'cntk.asvalue', 'asvalue', (['var', 'data'], {}), '(var, data)\n', (1085, 1096), False, 'from cntk import asarray, asvalue\n'), ((1194, 1231), 'cntk.internal._value_as_sequence_or_array', '_value_as_sequence_or_array', (['val', 'var'], {}), '(val, var)\n', (1221, 1231), False, 'from cntk.internal import _value_as_sequence_or_array\n'), ((1836, 1854), 'cntk.asvalue', 'asvalue', (['var', 'data'], {}), '(var, data)\n', (1843, 1854), False, 'from cntk import asarray, asvalue\n'), ((6319, 6337), 'cntk.asvalue', 'asvalue', (['var', 'data'], {}), '(var, data)\n', (6326, 6337), False, 'from cntk import asarray, asvalue\n'), ((6457, 6487), 'cntk.input', 'C.input', (['shape'], {'is_sparse': '(True)'}), '(shape, is_sparse=True)\n', (6464, 6487), True, 'import cntk as C\n'), ((6566, 6584), 'cntk.asvalue', 'asvalue', (['var', 'data'], {}), '(var, data)\n', (6573, 6584), False, 'from cntk import asarray, asvalue\n'), ((7578, 7600), 'cntk.ops.tests.ops_test_utils.cntk_device', 'cntk_device', (['device_id'], {}), '(device_id)\n', (7589, 7600), False, 'from cntk.ops.tests.ops_test_utils import compare_lists_of_np_arrays, cntk_device\n'), ((7676, 7689), 'cntk.tests.test_utils._to_csr', '_to_csr', (['data'], {}), '(data)\n', (7683, 7689), False, 'from cntk.tests.test_utils import _to_dense, _to_csr\n'), ((8085, 8107), 'cntk.ops.tests.ops_test_utils.cntk_device', 'cntk_device', (['device_id'], {}), '(device_id)\n', (8096, 8107), False, 'from cntk.ops.tests.ops_test_utils import compare_lists_of_np_arrays, cntk_device\n'), ((8188, 8206), 'cntk.tests.test_utils._to_csr', '_to_csr', (['seq1_data'], {}), '(seq1_data)\n', (8195, 8206), False, 'from cntk.tests.test_utils import _to_dense, _to_csr\n'), ((8342, 8360), 'cntk.tests.test_utils._to_csr', '_to_csr', (['seq2_data'], {}), '(seq2_data)\n', (8349, 8360), False, 'from cntk.tests.test_utils import _to_dense, _to_csr\n'), ((8708, 8730), 'cntk.ops.tests.ops_test_utils.cntk_device', 'cntk_device', (['device_id'], {}), '(device_id)\n', (8719, 8730), False, 'from cntk.ops.tests.ops_test_utils import compare_lists_of_np_arrays, cntk_device\n'), ((8739, 8749), 'cntk.input', 'C.input', (['(1)'], {}), '(1)\n', (8746, 8749), True, 'import cntk as C\n'), ((8761, 8787), 'cntk.parameter', 'C.parameter', (['(1)'], {'device': 'dev'}), '(1, device=dev)\n', (8772, 8787), True, 'import cntk as C\n'), ((8801, 8832), 'numpy.array_equal', 'np.array_equal', (['w_1d.value', '[0]'], {}), '(w_1d.value, [0])\n', (8815, 8832), True, 'import numpy as np\n'), ((9132, 9166), 'cntk.input', 'C.input', (['(2,)'], {'needs_gradient': '(True)'}), '((2,), needs_gradient=True)\n', (9139, 9166), True, 'import cntk as C\n'), ((9188, 9222), 'numpy.array', 'np.array', (['[1, 2]'], {'dtype': 'np.float32'}), '([1, 2], dtype=np.float32)\n', (9196, 9222), True, 'import numpy as np\n'), ((5606, 5631), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (5619, 5631), False, 'import pytest\n'), ((6001, 6026), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (6014, 6026), False, 'import pytest\n'), ((7782, 7804), 'cntk.tests.test_utils._to_dense', '_to_dense', (['ndarrayview'], {}), '(ndarrayview)\n', (7791, 7804), False, 'from cntk.tests.test_utils import _to_dense, _to_csr\n'), ((7822, 7847), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7835, 7847), False, 'import pytest\n'), ((7929, 7954), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7942, 7954), False, 'import pytest\n'), ((8579, 8609), 'cntk.tests.test_utils._to_dense', '_to_dense', (['sequence_value.data'], {}), '(sequence_value.data)\n', (8588, 8609), False, 'from cntk.tests.test_utils import _to_dense, _to_csr\n'), ((9055, 9090), 'numpy.asarray', 'np.asarray', (['[0.1]'], {'dtype': 'np.float32'}), '([0.1], dtype=np.float32)\n', (9065, 9090), True, 'import numpy as np\n'), ((6980, 6987), 'cntk.cpu', 'C.cpu', ([], {}), '()\n', (6985, 6987), True, 'import cntk as C\n'), ((7305, 7312), 'cntk.cpu', 'C.cpu', ([], {}), '()\n', (7310, 7312), True, 'import cntk as C\n'), ((8874, 8909), 'numpy.asarray', 'np.asarray', (['[[1]]'], {'dtype': 'np.float32'}), '([[1]], dtype=np.float32)\n', (8884, 8909), True, 'import numpy as np\n')]
|
# MIT License
#
# Copyright (c) 2019-2021 Tskit Developers
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""
Test cases for generating genotypes/haplotypes.
"""
import itertools
import random
import textwrap
import msprime
import numpy as np
import pytest
import tests
import tests.test_wright_fisher as wf
import tests.tsutil as tsutil
import tskit
from tests.test_highlevel import get_example_tree_sequences
from tskit import exceptions
# ↑ See https://github.com/tskit-dev/tskit/issues/1804 for when
# we can remove this.
# TODO replace this with a call to
# example_tree_sequences(discrete_genome=True, snps_only=True)
@tests.cached_example
def get_example_discrete_genome_tree_sequences():
ret = []
for ts in get_example_tree_sequences():
if ts.discrete_genome:
snps = all(len(site.ancestral_state) == 1 for site in ts.sites()) and all(
len(mut.derived_state) == 1 for mut in ts.mutations()
)
if snps:
ret.append(ts)
return ret
def naive_get_ancestral_haplotypes(ts):
"""
Simple implementation using tree traversals. Note that this definition
won't work when we have topology that's not reachable from a root,
but this seems more trouble than it's worth dealing with.
"""
A = np.zeros((ts.num_nodes, ts.num_sites), dtype=np.int8)
A[:] = tskit.MISSING_DATA
for t in ts.trees():
for site in t.sites():
alleles = {site.ancestral_state: 0}
for u in t.nodes():
A[u, site.id] = 0
j = 1
for mutation in site.mutations:
if mutation.derived_state not in alleles:
alleles[mutation.derived_state] = j
j += 1
for u in t.nodes(mutation.node):
A[u, site.id] = alleles[mutation.derived_state]
return A
class TestGetAncestralHaplotypes:
"""
Tests for the engine to the actual ancestors from a simulation.
"""
def verify(self, ts):
A = naive_get_ancestral_haplotypes(ts)
# To detect missing data in ancestors we must set all nodes
# to be samples
tables = ts.dump_tables()
nodes = tables.nodes
flags = nodes.flags[:]
flags[:] = 1
nodes.set_columns(time=nodes.time, flags=flags)
ts = tables.tree_sequence()
B = ts.genotype_matrix().T
assert np.array_equal(A, B)
def test_single_tree(self):
ts = msprime.simulate(5, mutation_rate=1, random_seed=234)
self.verify(ts)
def test_many_trees(self):
ts = msprime.simulate(
8, recombination_rate=10, mutation_rate=10, random_seed=234
)
assert ts.num_trees > 1
assert ts.num_sites > 1
self.verify(ts)
def test_single_tree_jukes_cantor(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
self.verify(ts)
def test_single_tree_multichar_mutations(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
ts = tsutil.insert_multichar_mutations(ts)
self.verify(ts)
def test_many_trees_infinite_sites(self):
ts = msprime.simulate(6, recombination_rate=2, mutation_rate=2, random_seed=1)
assert ts.num_sites > 0
assert ts.num_trees > 2
self.verify(ts)
def test_wright_fisher_initial_generation(self):
tables = wf.wf_sim(
6, 5, seed=3, deep_history=True, initial_generation_samples=True, num_loci=2
)
tables.sort()
tables.simplify()
ts = msprime.mutate(tables.tree_sequence(), rate=0.08, random_seed=2)
assert ts.num_sites > 0
self.verify(ts)
def test_wright_fisher_simplified(self):
tables = wf.wf_sim(
9,
10,
seed=1,
deep_history=True,
initial_generation_samples=False,
num_loci=5,
)
tables.sort()
ts = tables.tree_sequence().simplify()
ts = msprime.mutate(ts, rate=0.2, random_seed=1234)
assert ts.num_sites > 0
self.verify(ts)
def test_empty_ts(self):
tables = tskit.TableCollection(1.0)
for _ in range(10):
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
ts = tables.tree_sequence()
self.verify(ts)
def isolated_samples_genotype_matrix(ts):
"""
Returns the genotype matrix for the specified tree sequence
where isolated samples are marked with MISSING_DATA.
"""
G = ts.genotype_matrix()
samples = ts.samples()
sample_index_map = np.zeros(ts.num_nodes, dtype=int) - 1
for index, sample in enumerate(samples):
sample_index_map[sample] = index
for tree in ts.trees():
for site in tree.sites():
for root in tree.roots:
# An isolated sample is any root that has no children.
if tree.left_child(root) == -1:
assert sample_index_map[root] != -1
G[site.id, sample_index_map[root]] = -1
return G
class TestVariantGenerator:
"""
Tests the variants() method to ensure the output is consistent.
"""
def get_tree_sequence(self):
ts = msprime.simulate(
10, length=10, recombination_rate=1, mutation_rate=10, random_seed=3
)
assert ts.get_num_mutations() > 10
return ts
def test_as_bytes(self):
ts = self.get_tree_sequence()
n = ts.get_sample_size()
m = ts.get_num_mutations()
A = np.zeros((m, n), dtype="u1")
B = np.zeros((m, n), dtype="u1")
for variant in ts.variants():
A[variant.index] = variant.genotypes
for variant in ts.variants(as_bytes=True):
assert isinstance(variant.genotypes, bytes)
B[variant.index] = np.frombuffer(variant.genotypes, np.uint8) - ord("0")
assert np.all(A == B)
bytes_variants = list(ts.variants(as_bytes=True))
for j, variant in enumerate(bytes_variants):
assert j == variant.index
row = np.frombuffer(variant.genotypes, np.uint8) - ord("0")
assert np.all(A[j] == row)
def test_as_bytes_fails(self):
ts = tsutil.insert_multichar_mutations(self.get_tree_sequence())
with pytest.raises(ValueError):
list(ts.variants(as_bytes=True))
def test_dtype(self):
ts = self.get_tree_sequence()
for var in ts.variants():
assert var.genotypes.dtype == np.int8
def test_dtype_conversion(self):
# Check if we hit any issues if we assume the variants are uint8
# as they were prior to version 0.2.0
ts = self.get_tree_sequence()
G = ts.genotype_matrix().astype(np.uint8)
assert G.dtype == np.uint8
for var in ts.variants():
assert np.array_equal(G[var.index], var.genotypes)
assert np.all(G[var.index] == var.genotypes)
assert [var.alleles[g] for g in var.genotypes] == [
var.alleles[g] for g in G[var.index]
]
G[var.index, :] = var.genotypes
assert np.array_equal(G[var.index], var.genotypes)
def test_multichar_alleles(self):
ts = tsutil.insert_multichar_mutations(self.get_tree_sequence())
for var in ts.variants():
assert len(var.alleles) == 2
assert var.site.ancestral_state == var.alleles[0]
assert var.site.mutations[0].derived_state == var.alleles[1]
assert all(0 <= var.genotypes)
assert all(var.genotypes <= 1)
def test_many_alleles(self):
ts = self.get_tree_sequence()
tables = ts.dump_tables()
tables.sites.clear()
tables.mutations.clear()
# This gives us a total of 360 permutations.
alleles = list(map("".join, itertools.permutations("ABCDEF", 4)))
assert len(alleles) > 127
tables.sites.add_row(0, alleles[0])
parent = -1
num_alleles = 1
for allele in alleles[1:]:
ts = tables.tree_sequence()
if num_alleles > 127:
with pytest.raises(exceptions.LibraryError):
next(ts.variants())
else:
var = next(ts.variants())
assert not var.has_missing_data
assert var.num_alleles == num_alleles
assert len(var.alleles) == num_alleles
assert list(var.alleles) == alleles[:num_alleles]
assert var.alleles[var.genotypes[0]] == alleles[num_alleles - 1]
for u in ts.samples():
if u != 0:
assert var.alleles[var.genotypes[u]] == alleles[0]
tables.mutations.add_row(0, 0, allele, parent=parent)
parent += 1
num_alleles += 1
def test_many_alleles_missing_data(self):
ts = self.get_tree_sequence()
tables = ts.dump_tables()
tables.sites.clear()
tables.mutations.clear()
# Add an isolated sample
tables.nodes.add_row(flags=1, time=0)
# This gives us a total of 360 permutations.
alleles = list(map("".join, itertools.permutations("ABCDEF", 4)))
assert len(alleles) > 127
tables.sites.add_row(0, alleles[0])
parent = -1
num_alleles = 1
for allele in alleles[1:]:
ts = tables.tree_sequence()
if num_alleles > 127:
with pytest.raises(exceptions.LibraryError):
next(ts.variants())
else:
var = next(ts.variants())
assert var.has_missing_data
assert var.num_alleles == num_alleles
assert len(var.alleles) == num_alleles + 1
assert list(var.alleles)[:-1] == alleles[:num_alleles]
assert var.alleles[-1] is None
assert var.alleles[var.genotypes[0]] == alleles[num_alleles - 1]
assert var.genotypes[-1] == -1
samples = ts.samples()
for u in samples[:-1]:
if u != 0:
assert var.alleles[var.genotypes[u]] == alleles[0]
tables.mutations.add_row(0, 0, allele, parent=parent)
parent += 1
num_alleles += 1
def test_site_information(self):
ts = self.get_tree_sequence()
for site, variant in zip(ts.sites(), ts.variants()):
assert site.position == variant.position
assert site == variant.site
def test_no_mutations(self):
ts = msprime.simulate(10)
assert ts.get_num_mutations() == 0
variants = list(ts.variants())
assert len(variants) == 0
def test_genotype_matrix(self):
ts = self.get_tree_sequence()
G = np.empty((ts.num_sites, ts.num_samples), dtype=np.int8)
for v in ts.variants():
G[v.index, :] = v.genotypes
G2 = ts.genotype_matrix()
assert np.array_equal(G, G2)
assert G2.dtype == np.int8
def test_recurrent_mutations_over_samples(self):
ts = self.get_tree_sequence()
tables = ts.dump_tables()
tables.sites.clear()
tables.mutations.clear()
num_sites = 5
for j in range(num_sites):
tables.sites.add_row(
position=j * ts.sequence_length / num_sites, ancestral_state="0"
)
for u in range(ts.sample_size):
tables.mutations.add_row(site=j, node=u, derived_state="1")
ts = tables.tree_sequence()
variants = list(ts.variants())
assert len(variants) == num_sites
for site, variant in zip(ts.sites(), variants):
assert site.position == variant.position
assert site == variant.site
assert site.id == variant.index
assert variant.alleles == ("0", "1")
assert np.all(variant.genotypes == np.ones(ts.sample_size))
def test_silent_mutations(self):
ts = self.get_tree_sequence()
tree = next(ts.trees())
tables = ts.dump_tables()
for u in tree.nodes():
for sample in tree.samples(u):
if sample != u:
tables.sites.clear()
tables.mutations.clear()
site = tables.sites.add_row(position=0, ancestral_state="0")
tables.mutations.add_row(site=site, node=u, derived_state="1")
tables.mutations.add_row(site=site, node=sample, derived_state="1")
ts_new = tables.tree_sequence()
assert all([v.genotypes[sample] == 1 for v in ts_new.variants()])
def test_zero_samples(self):
ts = self.get_tree_sequence()
for var1, var2 in zip(ts.variants(), ts.variants(samples=[])):
assert var1.site == var2.site
assert var1.alleles == var2.alleles
assert var2.genotypes.shape[0] == 0
def test_samples(self):
n = 4
ts = msprime.simulate(
n, length=5, recombination_rate=1, mutation_rate=5, random_seed=2
)
assert ts.num_sites > 1
samples = list(range(n))
# Generate all possible sample lists.
for j in range(n + 1):
for s in itertools.permutations(samples, j):
s = np.array(s, dtype=np.int32)
count = 0
for var1, var2 in zip(ts.variants(), ts.variants(samples=s)):
assert var1.site == var2.site
assert var1.alleles == var2.alleles
assert var2.genotypes.shape == (len(s),)
assert np.array_equal(var1.genotypes[s], var2.genotypes)
count += 1
assert count == ts.num_sites
def test_samples_missing_data(self):
n = 4
ts = msprime.simulate(
n, length=5, recombination_rate=1, mutation_rate=5, random_seed=2
)
assert ts.num_sites > 1
tables = ts.dump_tables()
tables.delete_intervals([[0.5, 0.6]])
tables.sites.add_row(0.5, ancestral_state="0")
tables.sort()
ts = tables.tree_sequence()
samples = list(range(n))
# Generate all possible sample lists.
for j in range(1, n + 1):
for s in itertools.permutations(samples, j):
s = np.array(s, dtype=np.int32)
count = 0
for var1, var2 in zip(ts.variants(), ts.variants(samples=s)):
assert var1.site == var2.site
assert var1.alleles == var2.alleles
assert var2.genotypes.shape == (len(s),)
assert np.array_equal(var1.genotypes[s], var2.genotypes)
count += 1
assert count == ts.num_sites
def test_non_sample_samples(self):
# We don't have to use sample nodes. This does make the terminology confusing
# but it's probably still the best option.
ts = msprime.simulate(
10, length=5, recombination_rate=1, mutation_rate=5, random_seed=2
)
tables = ts.dump_tables()
tables.nodes.set_columns(
flags=np.zeros_like(tables.nodes.flags) + tskit.NODE_IS_SAMPLE,
time=tables.nodes.time,
)
all_samples_ts = tables.tree_sequence()
assert all_samples_ts.num_samples == ts.num_nodes
count = 0
samples = range(ts.num_nodes)
for var1, var2 in zip(
all_samples_ts.variants(isolated_as_missing=False),
ts.variants(samples=samples, isolated_as_missing=False),
):
assert var1.site == var2.site
assert var1.alleles == var2.alleles
assert var2.genotypes.shape == (len(samples),)
assert np.array_equal(var1.genotypes, var2.genotypes)
count += 1
assert count == ts.num_sites
def verify_jukes_cantor(self, ts):
assert np.array_equal(ts.genotype_matrix(), ts.genotype_matrix())
tree = ts.first()
for variant in ts.variants():
assert not variant.has_missing_data
mutations = {
mutation.node: mutation.derived_state
for mutation in variant.site.mutations
}
for sample_index, u in enumerate(ts.samples()):
while u not in mutations and u != tskit.NULL:
u = tree.parent(u)
state1 = mutations.get(u, variant.site.ancestral_state)
state2 = variant.alleles[variant.genotypes[sample_index]]
assert state1 == state2
def test_jukes_cantor_n5(self):
ts = msprime.simulate(5, random_seed=2)
ts = tsutil.jukes_cantor(ts, 5, 1, seed=2)
self.verify_jukes_cantor(ts)
def test_jukes_cantor_n20(self):
ts = msprime.simulate(20, random_seed=2)
ts = tsutil.jukes_cantor(ts, 5, 1, seed=2)
self.verify_jukes_cantor(ts)
def test_zero_edge_missing_data(self):
ts = msprime.simulate(10, random_seed=2, mutation_rate=2)
tables = ts.dump_tables()
tables.keep_intervals([[0.25, 0.75]])
# add some sites in the deleted regions
tables.sites.add_row(0.1, "A")
tables.sites.add_row(0.2, "A")
tables.sites.add_row(0.8, "A")
tables.sites.add_row(0.9, "A")
tables.sort()
ts = tables.tree_sequence()
Gnm = ts.genotype_matrix(isolated_as_missing=False)
assert np.all(Gnm[0] == 0)
assert np.all(Gnm[1] == 0)
assert np.all(Gnm[-1] == 0)
assert np.all(Gnm[-2] == 0)
Gm = isolated_samples_genotype_matrix(ts)
assert np.all(Gm[0] == -1)
assert np.all(Gm[1] == -1)
assert np.all(Gm[-1] == -1)
assert np.all(Gm[-2] == -1)
Gm2 = ts.genotype_matrix(isolated_as_missing=True)
assert np.array_equal(Gm, Gm2)
# Test deprecated param
with pytest.warns(FutureWarning):
Gi = ts.genotype_matrix(impute_missing_data=True)
assert np.array_equal(Gnm, Gi)
with pytest.warns(FutureWarning):
Gni = ts.genotype_matrix(impute_missing_data=False)
assert np.array_equal(Gm, Gni)
with pytest.warns(FutureWarning):
G = ts.genotype_matrix(isolated_as_missing=False, impute_missing_data=True)
assert np.array_equal(Gnm, G)
with pytest.warns(FutureWarning):
G = ts.genotype_matrix(isolated_as_missing=True, impute_missing_data=False)
assert np.array_equal(Gm, G)
def test_empty_ts_missing_data(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
ts = tables.tree_sequence()
variants = list(ts.variants())
assert len(variants) == 1
var = variants[0]
assert var.alleles == ("A", None)
assert var.num_alleles == 1
assert np.all(var.genotypes == -1)
def test_empty_ts_incomplete_samples(self):
# https://github.com/tskit-dev/tskit/issues/776
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
ts = tables.tree_sequence()
variants = list(ts.variants(samples=[0]))
assert list(variants[0].genotypes) == [-1]
variants = list(ts.variants(samples=[1]))
assert list(variants[0].genotypes) == [-1]
def test_missing_data_samples(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
tables.mutations.add_row(0, 0, "T")
ts = tables.tree_sequence()
# If we have no samples we still get a list of variants.
variants = list(ts.variants(samples=[]))
assert len(variants[0].genotypes) == 0
assert not variants[0].has_missing_data
assert variants[0].alleles == ("A", "T")
# If we have a single sample that's not missing, there's no
# missing data.
variants = list(ts.variants(samples=[0]))
assert len(variants[0].genotypes) == 1
assert variants[0].genotypes[0] == 1
assert not variants[0].has_missing_data
assert variants[0].alleles == ("A", "T")
# If we have a single sample that is missing, there is
# missing data.
variants = list(ts.variants(samples=[1]))
assert len(variants[0].genotypes) == 1
assert variants[0].genotypes[0] == -1
assert variants[0].has_missing_data
assert variants[0].alleles == ("A", "T", None)
def test_mutation_over_isolated_sample_not_missing(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
tables.mutations.add_row(0, 0, "T")
ts = tables.tree_sequence()
variants = list(ts.variants())
assert len(variants) == 1
var = variants[0]
assert var.alleles == ("A", "T", None)
assert var.num_alleles == 2
assert list(var.genotypes) == [1, -1]
def test_multiple_mutations_over_isolated_sample(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
tables.mutations.add_row(0, 0, "T")
tables.mutations.add_row(0, 0, "G", parent=0)
ts = tables.tree_sequence()
variants = list(ts.variants())
assert len(variants) == 1
var = variants[0]
assert var.alleles == ("A", "T", "G", None)
assert var.num_alleles == 3
assert len(var.site.mutations) == 2
assert list(var.genotypes) == [2, -1]
def test_snipped_tree_sequence_missing_data(self):
ts = msprime.simulate(
10, length=10, recombination_rate=0.1, mutation_rate=10, random_seed=3
)
tables = ts.dump_tables()
tables.delete_intervals([[4, 6]], simplify=False)
tables.sites.add_row(4, ancestral_state="0")
tables.sites.add_row(5, ancestral_state="0")
tables.sites.add_row(5.999999, ancestral_state="0")
tables.sort()
ts = tables.tree_sequence()
G = ts.genotype_matrix()
num_missing = 0
for var in ts.variants():
if 4 <= var.site.position < 6:
assert var.has_missing_data
assert np.all(var.genotypes == tskit.MISSING_DATA)
num_missing += 1
else:
assert not var.has_missing_data
assert np.all(var.genotypes != tskit.MISSING_DATA)
assert np.array_equal(var.genotypes, G[var.site.id])
assert num_missing == 3
G = ts.genotype_matrix(isolated_as_missing=False)
for var in ts.variants(isolated_as_missing=False):
if 4 <= var.site.position < 6:
assert not var.has_missing_data
assert np.all(var.genotypes == 0)
else:
assert not var.has_missing_data
assert np.all(var.genotypes != tskit.MISSING_DATA)
assert np.array_equal(var.genotypes, G[var.site.id])
def test_snipped_tree_sequence_mutations_over_isolated(self):
ts = msprime.simulate(
10, length=10, recombination_rate=0.1, mutation_rate=10, random_seed=3
)
tables = ts.dump_tables()
tables.delete_intervals([[4, 6]], simplify=False)
missing_site = tables.sites.add_row(4, ancestral_state="0")
tables.mutations.add_row(missing_site, node=0, derived_state="1")
# Add another site in which all the samples are marked with a mutation
# to the ancestral state. Note: this would normally not be allowed because
# there's not state change. However, this allows us to mark a sample
# as not-missing, so it's an important feature.
missing_site = tables.sites.add_row(5, ancestral_state="0")
for u in range(10):
tables.mutations.add_row(missing_site, node=u, derived_state="0")
tables.sort()
ts = tables.tree_sequence()
G = ts.genotype_matrix()
missing_found = False
non_missing_found = False
for var in ts.variants():
if var.site.position == 4:
assert var.has_missing_data
assert var.genotypes[0] == 1
assert np.all(var.genotypes[1:] == tskit.MISSING_DATA)
missing_found += 1
elif var.site.position == 5:
assert not var.has_missing_data
assert np.all(var.genotypes == 0)
non_missing_found = 1
else:
assert not var.has_missing_data
assert np.all(var.genotypes != tskit.MISSING_DATA)
assert np.array_equal(var.genotypes, G[var.site.id])
assert non_missing_found
assert missing_found
class TestHaplotypeGenerator:
"""
Tests the haplotype generation code.
"""
def verify_haplotypes(self, n, haplotypes):
"""
Verify that the specified set of haplotypes is consistent.
"""
assert len(haplotypes) == n
m = len(haplotypes[0])
for h in haplotypes:
assert len(h) == m
# Examine each column in H; we must have a mixture of 0s and 1s
for k in range(m):
zeros = 0
ones = 0
col = ""
for j in range(n):
b = haplotypes[j][k]
zeros += b == "0"
ones += b == "1"
col += b
assert zeros + ones == n
def verify_tree_sequence(self, tree_sequence):
n = tree_sequence.sample_size
m = tree_sequence.num_sites
haplotypes = list(tree_sequence.haplotypes())
A = np.zeros((n, m), dtype="u1")
B = np.zeros((n, m), dtype="u1")
for j, h in enumerate(haplotypes):
assert len(h) == m
A[j] = np.frombuffer(h.encode("ascii"), np.uint8) - ord("0")
for variant in tree_sequence.variants():
B[:, variant.index] = variant.genotypes
assert np.all(A == B)
self.verify_haplotypes(n, haplotypes)
def verify_simulation(self, n, m, r, theta):
"""
Verifies a simulation for the specified parameters.
"""
recomb_map = msprime.RecombinationMap.uniform_map(m, r, m)
tree_sequence = msprime.simulate(
n, recombination_map=recomb_map, mutation_rate=theta
)
self.verify_tree_sequence(tree_sequence)
def test_random_parameters(self):
num_random_sims = 10
for _ in range(num_random_sims):
n = random.randint(2, 50)
m = random.randint(10, 200)
r = random.random()
theta = random.uniform(0, 2)
self.verify_simulation(n, m, r, theta)
def test_nonbinary_trees(self):
bottlenecks = [
msprime.SimpleBottleneck(0.01, 0, proportion=0.05),
msprime.SimpleBottleneck(0.02, 0, proportion=0.25),
msprime.SimpleBottleneck(0.03, 0, proportion=1),
]
ts = msprime.simulate(
10,
length=100,
recombination_rate=1,
demographic_events=bottlenecks,
random_seed=1,
)
self.verify_tree_sequence(ts)
def test_acgt_mutations(self):
ts = msprime.simulate(10, mutation_rate=10)
assert ts.num_sites > 0
tables = ts.tables
sites = tables.sites
mutations = tables.mutations
sites.set_columns(
position=sites.position,
ancestral_state=np.zeros(ts.num_sites, dtype=np.int8) + ord("A"),
ancestral_state_offset=np.arange(ts.num_sites + 1, dtype=np.uint32),
)
mutations.set_columns(
site=mutations.site,
node=mutations.node,
derived_state=np.zeros(ts.num_sites, dtype=np.int8) + ord("T"),
derived_state_offset=np.arange(ts.num_sites + 1, dtype=np.uint32),
)
tsp = tables.tree_sequence()
H = [h.replace("0", "A").replace("1", "T") for h in ts.haplotypes()]
assert H == list(tsp.haplotypes())
def test_fails_multiletter_mutations(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.tables
tables.sites.add_row(0, "ACTG")
tsp = tables.tree_sequence()
with pytest.raises(TypeError):
list(tsp.haplotypes())
def test_fails_deletion_mutations(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.tables
tables.sites.add_row(0, "")
tsp = tables.tree_sequence()
with pytest.raises(TypeError):
list(tsp.haplotypes())
def test_nonascii_mutations(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.tables
tables.sites.add_row(0, chr(169)) # Copyright symbol
tsp = tables.tree_sequence()
with pytest.raises(TypeError):
list(tsp.haplotypes())
def test_recurrent_mutations_over_samples(self):
ts = msprime.simulate(10, random_seed=2)
num_sites = 5
tables = ts.dump_tables()
for j in range(num_sites):
tables.sites.add_row(
position=j * ts.sequence_length / num_sites, ancestral_state="0"
)
for u in range(ts.sample_size):
tables.mutations.add_row(site=j, node=u, derived_state="1")
ts_new = tables.tree_sequence()
ones = "1" * num_sites
for h in ts_new.haplotypes():
assert ones == h
def test_silent_mutations(self):
ts = msprime.simulate(10, random_seed=2)
tables = ts.dump_tables()
tree = next(ts.trees())
for u in tree.children(tree.root):
tables.sites.clear()
tables.mutations.clear()
site = tables.sites.add_row(position=0, ancestral_state="0")
tables.mutations.add_row(site=site, node=u, derived_state="1")
tables.mutations.add_row(site=site, node=tree.root, derived_state="1")
ts_new = tables.tree_sequence()
all(h == 1 for h in ts_new.haplotypes())
def test_back_mutations(self):
base_ts = msprime.simulate(10, random_seed=2)
for j in [1, 2, 3]:
ts = tsutil.insert_branch_mutations(base_ts, mutations_per_branch=j)
self.verify_tree_sequence(ts)
def test_missing_data(self):
tables = tskit.TableCollection(1.0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.nodes.add_row(tskit.NODE_IS_SAMPLE, 0)
tables.sites.add_row(0.5, "A")
ts = tables.tree_sequence()
with pytest.raises(ValueError):
list(ts.haplotypes(missing_data_character="A"))
for c in ("-", ".", "a"):
h = list(ts.haplotypes(missing_data_character=c))
assert h == [c, c]
h = list(ts.haplotypes(isolated_as_missing=True))
assert h == ["N", "N"]
h = list(ts.haplotypes(isolated_as_missing=False))
assert h == ["A", "A"]
h = list(ts.haplotypes())
assert h == ["N", "N"]
# Test deprecated method
with pytest.warns(FutureWarning):
h = list(ts.haplotypes(impute_missing_data=True))
assert h == ["A", "A"]
with pytest.warns(FutureWarning):
h = list(ts.haplotypes(impute_missing_data=False))
assert h == ["N", "N"]
with pytest.warns(FutureWarning):
h = list(ts.haplotypes(isolated_as_missing=True, impute_missing_data=True))
assert h == ["N", "N"]
with pytest.warns(FutureWarning):
h = list(ts.haplotypes(isolated_as_missing=True, impute_missing_data=False))
assert h == ["N", "N"]
with pytest.warns(FutureWarning):
h = list(ts.haplotypes(isolated_as_missing=False, impute_missing_data=True))
assert h == ["A", "A"]
with pytest.warns(FutureWarning):
h = list(
ts.haplotypes(isolated_as_missing=False, impute_missing_data=False)
)
assert h == ["A", "A"]
class TestUserAlleles:
"""
Tests the functionality of providing a user-specified allele mapping.
"""
def test_simple_01(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
assert ts.num_sites > 2
G1 = ts.genotype_matrix()
G2 = ts.genotype_matrix(alleles=("0", "1"))
assert np.array_equal(G1, G2)
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=("0", "1"))
):
assert v1.alleles == v2.alleles
assert v1.site == v2.site
assert np.array_equal(v1.genotypes, v2.genotypes)
def test_simple_01_trailing_alleles(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
assert ts.num_sites > 2
G1 = ts.genotype_matrix()
alleles = ("0", "1", "2", "xxxxx")
G2 = ts.genotype_matrix(alleles=alleles)
assert np.array_equal(G1, G2)
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=alleles)
):
assert v2.alleles == alleles
assert v1.site == v2.site
assert np.array_equal(v1.genotypes, v2.genotypes)
def test_simple_01_leading_alleles(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
assert ts.num_sites > 2
G1 = ts.genotype_matrix()
alleles = ("A", "B", "C", "0", "1")
G2 = ts.genotype_matrix(alleles=alleles)
assert np.array_equal(G1 + 3, G2)
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=alleles)
):
assert v2.alleles == alleles
assert v1.site == v2.site
assert np.array_equal(v1.genotypes + 3, v2.genotypes)
def test_simple_01_duplicate_alleles(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
assert ts.num_sites > 2
G1 = ts.genotype_matrix()
alleles = ("0", "0", "1")
G2 = ts.genotype_matrix(alleles=alleles)
index = np.where(G1 == 1)
G1[index] = 2
assert np.array_equal(G1, G2)
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=alleles)
):
assert v2.alleles == alleles
assert v1.site == v2.site
g = v1.genotypes
index = np.where(g == 1)
g[index] = 2
assert np.array_equal(g, v2.genotypes)
def test_simple_acgt(self):
ts = msprime.simulate(10, random_seed=2)
ts = msprime.mutate(
ts, rate=4, random_seed=2, model=msprime.InfiniteSites(msprime.NUCLEOTIDES)
)
assert ts.num_sites > 2
alleles = tskit.ALLELES_ACGT
G = ts.genotype_matrix(alleles=alleles)
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=alleles)
):
assert v2.alleles == alleles
assert v1.site == v2.site
h1 = "".join(v1.alleles[g] for g in v1.genotypes)
h2 = "".join(v2.alleles[g] for g in v2.genotypes)
assert h1 == h2
assert np.array_equal(v2.genotypes, G[v1.site.id])
def test_missing_alleles(self):
ts = msprime.simulate(10, random_seed=2)
ts = msprime.mutate(
ts, rate=4, random_seed=2, model=msprime.InfiniteSites(msprime.NUCLEOTIDES)
)
assert ts.num_sites > 2
bad_allele_examples = [
tskit.ALLELES_01,
tuple(["A"]),
("C", "T", "G"),
("AA", "C", "T", "G"),
tuple(["ACTG"]),
]
for bad_alleles in bad_allele_examples:
with pytest.raises(exceptions.LibraryError):
ts.genotype_matrix(alleles=bad_alleles)
with pytest.raises(exceptions.LibraryError):
list(ts.variants(alleles=bad_alleles))
def test_too_many_alleles(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
for n in range(128, 138):
bad_alleles = tuple(["0" for _ in range(n)])
with pytest.raises(exceptions.LibraryError):
ts.genotype_matrix(alleles=bad_alleles)
with pytest.raises(exceptions.LibraryError):
list(ts.variants(alleles=bad_alleles))
def test_zero_allele(self):
ts = msprime.simulate(10, mutation_rate=5, random_seed=2)
with pytest.raises(ValueError):
ts.genotype_matrix(alleles=tuple())
with pytest.raises(ValueError):
list(ts.variants(alleles=tuple()))
def test_missing_data(self):
tables = tskit.TableCollection(1)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
tables.sites.add_row(0.5, "0")
tables.mutations.add_row(0, 0, "1")
ts = tables.tree_sequence()
for isolated_as_missing in [True, False]:
G1 = ts.genotype_matrix(isolated_as_missing=isolated_as_missing)
G2 = ts.genotype_matrix(
isolated_as_missing=isolated_as_missing, alleles=tskit.ALLELES_01
)
assert np.array_equal(G1, G2)
vars1 = ts.variants(isolated_as_missing=isolated_as_missing)
vars2 = ts.variants(
isolated_as_missing=isolated_as_missing, alleles=tskit.ALLELES_01
)
for v1, v2 in itertools.zip_longest(vars1, vars2):
assert v2.alleles == v1.alleles
assert v1.site == v2.site
assert np.array_equal(v1.genotypes, v2.genotypes)
class TestUserAllelesRoundTrip:
"""
Tests that we correctly produce haplotypes in a variety of situations for
the user specified allele map encoding.
"""
def verify(self, ts, alleles):
for v1, v2 in itertools.zip_longest(
ts.variants(), ts.variants(alleles=alleles)
):
h1 = [v1.alleles[g] for g in v1.genotypes]
h2 = [v2.alleles[g] for g in v2.genotypes]
assert h1 == h2
def test_simple_01(self):
ts = msprime.simulate(5, mutation_rate=2, random_seed=3)
assert ts.num_sites > 3
valid_alleles = [
tskit.ALLELES_01,
("0", "1", "xry"),
("xry", "0", "1", "xry"),
tuple([str(j) for j in range(127)]),
tuple(["0" for j in range(126)] + ["1"]),
]
for alleles in valid_alleles:
self.verify(ts, alleles)
def test_simple_acgt(self):
ts = msprime.simulate(5, random_seed=3)
ts = msprime.mutate(
ts, rate=4, random_seed=3, model=msprime.InfiniteSites(msprime.NUCLEOTIDES)
)
assert ts.num_sites > 3
valid_alleles = [
tskit.ALLELES_ACGT,
("A", "C", "T", "G", "AAAAAAAAAAAAAA"),
("AA", "CC", "TT", "GG", "A", "C", "T", "G"),
]
for alleles in valid_alleles:
self.verify(ts, alleles)
def test_jukes_cantor(self):
ts = msprime.simulate(6, random_seed=1, mutation_rate=1)
ts = tsutil.jukes_cantor(ts, 20, 1, seed=10)
valid_alleles = [
tskit.ALLELES_ACGT,
("A", "C", "T", "G", "AAAAAAAAAAAAAA"),
("AA", "CC", "TT", "GG", "A", "C", "T", "G"),
]
for alleles in valid_alleles:
self.verify(ts, alleles)
def test_multichar_mutations(self):
ts = msprime.simulate(6, random_seed=1, recombination_rate=2)
ts = tsutil.insert_multichar_mutations(ts)
assert ts.num_sites > 5
all_alleles = set()
for var in ts.variants():
all_alleles.update(var.alleles)
all_alleles = tuple(all_alleles)
self.verify(ts, all_alleles)
self.verify(ts, all_alleles[::-1])
def test_simple_01_missing_data(self):
ts = msprime.simulate(6, mutation_rate=2, random_seed=3)
tables = ts.dump_tables()
# Add another sample node. This will be missing data everywhere.
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
ts = tables.tree_sequence()
assert ts.num_sites > 3
valid_alleles = [
tskit.ALLELES_01,
("0", "1", "xry"),
("xry", "0", "1", "xry"),
tuple([str(j) for j in range(127)]),
tuple(["0" for j in range(126)] + ["1"]),
]
for alleles in valid_alleles:
self.verify(ts, alleles)
class TestBinaryTreeExample:
# 2.00┊ 4 ┊
# ┊ ┏━┻┓ ┊
# 1.00┊ ┃ 3 ┊
# ┊ ┃ ┏┻┓ ┊
# 0.00┊ 0 1 2 ┊
# 0 10
# | |
# pos 2 9
# anc A T
@tests.cached_example
def ts(self):
ts = tskit.Tree.generate_balanced(3, span=10).tree_sequence
tables = ts.dump_tables()
tables.sites.add_row(2, ancestral_state="A")
tables.sites.add_row(9, ancestral_state="T")
tables.mutations.add_row(site=0, node=0, derived_state="G")
tables.mutations.add_row(site=1, node=3, derived_state="C")
return tables.tree_sequence()
def test_haplotypes(self):
H = list(self.ts().haplotypes())
assert H[0] == "GT"
assert H[1] == "AC"
assert H[2] == "AC"
def test_genotypes(self):
G = self.ts().genotype_matrix()
Gp = [[1, 0, 0], [0, 1, 1]]
np.testing.assert_array_equal(G, Gp)
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_default(self):
A = list(self.ts().alignments())
assert A[0] == "NNGNNNNNNT"
assert A[1] == "NNANNNNNNC"
assert A[2] == "NNANNNNNNC"
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_missing_char(self):
A = list(self.ts().alignments(missing_data_character="z"))
assert A[0] == "zzGzzzzzzT"
assert A[1] == "zzAzzzzzzC"
assert A[2] == "zzAzzzzzzC"
def test_alignments_reference_sequence(self):
ref = "0123456789"
A = list(self.ts().alignments(reference_sequence=ref))
assert A[0] == "01G345678T"
assert A[1] == "01A345678C"
assert A[2] == "01A345678C"
def test_alignments_reference_sequence_embedded_null(self):
# This is a total corner case, but just want to make sure
# we do something sensible.
ref = "0123" + "\0" + "56789"
A = list(self.ts().alignments(reference_sequence=ref))
assert A[0] == "01G3\x005678T"
assert A[1] == "01A3\x005678C"
assert A[2] == "01A3\x005678C"
def test_fasta_reference_sequence(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
>n0
01G345678T
>n1
01A345678C
>n2
01A345678C
"""
)
assert expected == self.ts().as_fasta(reference_sequence=ref)
def test_nexus_reference_sequence(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
#NEXUS
BEGIN TAXA;
DIMENSIONS NTAX=3;
TAXLABELS n0 n1 n2;
END;
BEGIN DATA;
DIMENSIONS NCHAR=10;
FORMAT DATATYPE=DNA;
MATRIX
n0 01G345678T
n1 01A345678C
n2 01A345678C
;
END;
BEGIN TREES;
TREE t0^10 = [&R] (n0:2,(n1:1,n2:1):1);
END;
"""
)
assert expected == self.ts().as_nexus(reference_sequence=ref)
class TestMissingDataExample:
# 2.00┊ 4 ┊
# ┊ ┏━┻┓ ┊
# 1.00┊ ┃ 3 ┊
# ┊ ┃ ┏┻┓ ┊
# 0.00┊ 0 1 2 5 ┊
# 0 10
# | |
# pos 2 9
# anc A T
@tests.cached_example
def ts(self):
ts = tskit.Tree.generate_balanced(3, span=10).tree_sequence
tables = ts.dump_tables()
tables.nodes.add_row(flags=tskit.NODE_IS_SAMPLE, time=0)
tables.sites.add_row(2, ancestral_state="A")
tables.sites.add_row(9, ancestral_state="T")
tables.mutations.add_row(site=0, node=0, derived_state="G")
tables.mutations.add_row(site=1, node=3, derived_state="C")
return tables.tree_sequence()
def test_haplotypes(self):
H = list(self.ts().haplotypes())
assert H[0] == "GT"
assert H[1] == "AC"
assert H[2] == "AC"
assert H[3] == "NN"
def test_haplotypes_missing_data_char(self):
H = list(self.ts().haplotypes(missing_data_character="?"))
assert H[0] == "GT"
assert H[1] == "AC"
assert H[2] == "AC"
assert H[3] == "??"
def test_genotypes(self):
G = self.ts().genotype_matrix()
Gp = [[1, 0, 0, -1], [0, 1, 1, -1]]
np.testing.assert_array_equal(G, Gp)
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_default(self):
A = list(self.ts().alignments())
assert A[0] == "NNGNNNNNNT"
assert A[1] == "NNANNNNNNC"
assert A[2] == "NNANNNNNNC"
assert A[3] == "NNNNNNNNNN"
def test_alignments_fails(self):
# https://github.com/tskit-dev/tskit/issues/1896
ref = "N" * 10
with pytest.raises(ValueError, match="1896"):
next(self.ts().alignments(reference_sequence=ref))
@pytest.mark.skip("Missing data in alignments: #1896")
def test_alignments_impute_missing(self):
ref = "N" * 10
A = list(
self.ts().alignments(reference_sequence=ref, isolated_as_missing=False)
)
assert A[0] == "NNGNNNNNNT"
assert A[1] == "NNANNNNNNC"
assert A[2] == "NNANNNNNNC"
assert A[3] == "NNANNNNNNT"
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_missing_char(self):
A = list(self.ts().alignments(missing_data_character="z"))
assert A[0] == "zzGzzzzzzT"
assert A[1] == "zzAzzzzzzC"
assert A[2] == "zzAzzzzzzC"
assert A[3] == "zzzzzzzzzz"
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_missing_char_ref(self):
A = list(self.ts().alignments(missing_data_character="z"))
assert A[0] == "NNGNNNNNNT"
assert A[1] == "NNANNNNNNC"
assert A[2] == "NNANNNNNNC"
assert A[3] == "zzzzzzzzzz"
@pytest.mark.skip("Missing data in alignments: #1896")
def test_alignments_reference_sequence(self):
ref = "0123456789"
A = list(self.ts().alignments(reference_sequence=ref))
assert A[0] == "01G345678T"
assert A[1] == "01A345678C"
assert A[2] == "01A345678C"
assert A[3] == "NNNNNNNNNN"
@pytest.mark.skip("Missing data in alignments: #1896")
def test_alignments_reference_sequence_missing_data_char(self):
ref = "0123456789"
A = list(
self.ts().alignments(reference_sequence=ref, missing_data_character="Q")
)
assert A[0] == "01G345678T"
assert A[1] == "01A345678C"
assert A[2] == "01A345678C"
assert A[3] == "QQQQQQQQQQ"
@pytest.mark.skip("Missing data in alignments: #1896")
def test_fasta_reference_sequence(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
>n0
01G345678T
>n1
01A345678C
>n2
01A345678C
>n5
NNNNNNNNNN
"""
)
assert expected == self.ts().as_fasta(reference_sequence=ref)
@pytest.mark.skip("Missing data in alignments: #1896")
def test_fasta_reference_sequence_missing_data_char(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
>n0
01G345678T
>n1
01A345678C
>n2
01A345678C
>n5
QQQQQQQQQQ
"""
)
assert expected == self.ts().as_fasta(
reference_sequence=ref, missing_data_character="Q"
)
@pytest.mark.skip("Missing data in alignments: #1896")
def test_fasta_impute_missing(self):
ref = "N" * 10
expected = textwrap.dedent(
"""\
>n0
NNGNNNNNNT
>n1
NNANNNNNNC
>n2
NNANNNNNNC
>n5
NNANNNNNNT
"""
)
assert expected == self.ts().as_fasta(
reference_sequence=ref, isolated_as_missing=False
)
# Note: the nexus tree output isn't compatible with our representation of
# missing data as trees with isolated roots (newick parsers won't accept
# this as valid input), so we set include_trees=False for these examples.
@pytest.mark.skip("Missing data in alignments: #1896")
def test_nexus_reference_sequence(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
#NEXUS
BEGIN TAXA;
DIMENSIONS NTAX=4;
TAXLABELS n0 n1 n2 n5;
END;
BEGIN DATA;
DIMENSIONS NCHAR=10;
FORMAT DATATYPE=DNA MISSING=?;
MATRIX
n0 01G345678T
n1 01A345678C
n2 01A345678C
n5 ??????????
;
END;
"""
)
assert expected == self.ts().as_nexus(
reference_sequence=ref, include_trees=False
)
@pytest.mark.skip("Missing data in alignments: #1896")
def test_nexus_reference_sequence_missing_data_char(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
#NEXUS
BEGIN TAXA;
DIMENSIONS NTAX=4;
TAXLABELS n0 n1 n2 n5;
END;
BEGIN DATA;
DIMENSIONS NCHAR=10;
FORMAT DATATYPE=DNA MISSING=Q;
MATRIX
n0 01G345678T
n1 01A345678C
n2 01A345678C
n5 QQQQQQQQQQ
;
END;
"""
)
assert expected == self.ts().as_nexus(
reference_sequence=ref,
missing_data_character="Q",
include_trees=False,
)
@pytest.mark.skip("Missing data in alignments: #1896")
def test_nexus_impute_missing(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
#NEXUS
BEGIN TAXA;
DIMENSIONS NTAX=4;
TAXLABELS n0 n1 n2 n5;
END;
BEGIN DATA;
DIMENSIONS NCHAR=10;
FORMAT DATATYPE=DNA MISSING=?;
MATRIX
n0 01G345678T
n1 01A345678C
n2 01A345678C
n5 01A345678T
;
END;
"""
)
assert expected == self.ts().as_nexus(
reference_sequence=ref,
isolated_as_missing=False,
include_trees=False,
)
class TestMultiRootExample:
# 1.00┊ 4 5 ┊
# ┊ ┏┻┓ ┏┻┓ ┊
# 0.00┊ 0 1 2 3 ┊
# 0 10
# | |
# pos 2 8
# anc G C
@tests.cached_example
def ts(self):
tree = tskit.Tree.generate_balanced(4, arity=2, span=10)
tables = tree.tree_sequence.dump_tables()
edges = tables.edges.copy()
tables.edges.clear()
for edge in edges:
if edge.parent != 6:
tables.edges.append(edge)
tables.sites.add_row(2, ancestral_state="G")
tables.sites.add_row(8, ancestral_state="C")
tables.mutations.add_row(site=0, node=0, derived_state="T")
tables.mutations.add_row(site=1, node=5, derived_state="A")
return tables.tree_sequence()
def test_haplotypes(self):
H = list(self.ts().haplotypes())
assert H[0] == "TC"
assert H[1] == "GC"
assert H[2] == "GA"
assert H[3] == "GA"
def test_genotypes(self):
G = self.ts().genotype_matrix()
Gp = [[1, 0, 0, 0], [0, 0, 1, 1]]
np.testing.assert_array_equal(G, Gp)
@pytest.mark.skip("Reference sequence not implemented #1888")
def test_alignments_default(self):
A = list(self.ts().alignments())
assert A[0] == "NNTNNNNNCN"
assert A[1] == "NNGNNNNNCN"
assert A[2] == "NNGNNNNNAN"
assert A[3] == "NNGNNNNNAN"
def test_alignments_N_ref(self):
A = list(self.ts().alignments(reference_sequence="N" * 10))
assert A[0] == "NNTNNNNNCN"
assert A[1] == "NNGNNNNNCN"
assert A[2] == "NNGNNNNNAN"
assert A[3] == "NNGNNNNNAN"
def test_fasta_reference_sequence(self):
ref = "0123456789"
expected = textwrap.dedent(
"""\
>n0
01T34567C9
>n1
01G34567C9
>n2
01G34567A9
>n3
01G34567A9
"""
)
assert expected == self.ts().as_fasta(reference_sequence=ref)
class TestAlignmentsErrors:
@tests.cached_example
def simplest_ts(self):
tables = tskit.TableCollection(1)
tables.nodes.add_row(flags=1, time=0)
return tables.tree_sequence()
def test_non_discrete_genome(self):
ts = tskit.TableCollection(1.1).tree_sequence()
assert not ts.discrete_genome
with pytest.raises(ValueError, match="defined for discrete genomes"):
list(ts.alignments())
def test_no_reference(self):
ts = tskit.TableCollection(1).tree_sequence()
with pytest.raises(ValueError, match="1888"):
list(ts.alignments())
@pytest.mark.parametrize("ref", ["", "xy"])
def test_reference_sequence_length_mismatch(self, ref):
ts = self.simplest_ts()
with pytest.raises(ValueError, match="same length"):
list(ts.alignments(reference_sequence=ref))
@pytest.mark.parametrize("ref", ["À", "┃", "α"])
def test_non_ascii_references(self, ref):
ts = self.simplest_ts()
with pytest.raises(UnicodeEncodeError):
list(ts.alignments(reference_sequence=ref))
@pytest.mark.skip("Missing data in alignments: #1896")
@pytest.mark.parametrize("missing_data_char", ["À", "┃", "α"])
def test_non_ascii_missing_data_char(self, missing_data_char):
ts = self.simplest_ts()
with pytest.raises(UnicodeEncodeError):
list(
ts.alignments(
reference_sequence="-", missing_data_character=missing_data_char
)
)
class TestAlignmentExamples:
@pytest.mark.skip("Reference sequence not implemented #1888")
@pytest.mark.parametrize("ts", get_example_discrete_genome_tree_sequences())
def test_defaults(self, ts):
A = list(ts.alignments())
assert len(A) == ts.num_samples
H = list(ts.haplotypes())
pos = ts.tables.sites.position.astype(int)
for a, h in map(np.array, zip(A, H)):
last = 0
for j, x in enumerate(pos):
assert a[last:x] == "-" * (x - last)
assert a[x] == h[j]
last = x + 1
@pytest.mark.parametrize("ts", get_example_discrete_genome_tree_sequences())
def test_reference_sequence(self, ts):
ref = tskit.random_nucleotides(ts.sequence_length, seed=1234)
if any(tree.num_roots > 1 for tree in ts.trees()):
with pytest.raises(ValueError, match="1896"):
list(ts.alignments(reference_sequence=ref))
else:
A = list(ts.alignments(reference_sequence=ref))
assert len(A) == ts.num_samples
H = list(ts.haplotypes())
pos = ts.tables.sites.position.astype(int)
for a, h in map(np.array, zip(A, H)):
last = 0
for j, x in enumerate(pos):
assert a[last:x] == ref[last:x]
assert a[x] == h[j]
last = x + 1
assert a[last:] == ref[last:]
|
[
"msprime.RecombinationMap.uniform_map",
"numpy.empty",
"numpy.ones",
"msprime.InfiniteSites",
"numpy.arange",
"pytest.mark.parametrize",
"pytest.mark.skip",
"numpy.zeros_like",
"random.randint",
"pytest.warns",
"tskit.TableCollection",
"itertools.permutations",
"itertools.zip_longest",
"pytest.raises",
"tests.tsutil.jukes_cantor",
"tskit.random_nucleotides",
"numpy.testing.assert_array_equal",
"tests.test_highlevel.get_example_tree_sequences",
"numpy.frombuffer",
"random.random",
"msprime.simulate",
"msprime.mutate",
"numpy.all",
"msprime.SimpleBottleneck",
"textwrap.dedent",
"tests.tsutil.insert_multichar_mutations",
"random.uniform",
"tests.test_wright_fisher.wf_sim",
"numpy.zeros",
"numpy.where",
"numpy.array",
"tests.tsutil.insert_branch_mutations",
"numpy.array_equal",
"tskit.Tree.generate_balanced"
] |
[((1739, 1767), 'tests.test_highlevel.get_example_tree_sequences', 'get_example_tree_sequences', ([], {}), '()\n', (1765, 1767), False, 'from tests.test_highlevel import get_example_tree_sequences\n'), ((2312, 2365), 'numpy.zeros', 'np.zeros', (['(ts.num_nodes, ts.num_sites)'], {'dtype': 'np.int8'}), '((ts.num_nodes, ts.num_sites), dtype=np.int8)\n', (2320, 2365), True, 'import numpy as np\n'), ((43203, 43263), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (43219, 43263), False, 'import pytest\n'), ((43458, 43518), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (43474, 43518), False, 'import pytest\n'), ((46689, 46749), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (46705, 46749), False, 'import pytest\n'), ((47215, 47268), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (47231, 47268), False, 'import pytest\n'), ((47600, 47660), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (47616, 47660), False, 'import pytest\n'), ((47922, 47982), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (47938, 47982), False, 'import pytest\n'), ((48248, 48301), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (48264, 48301), False, 'import pytest\n'), ((48592, 48645), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (48608, 48645), False, 'import pytest\n'), ((49004, 49057), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (49020, 49057), False, 'import pytest\n'), ((49441, 49494), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (49457, 49494), False, 'import pytest\n'), ((49946, 49999), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (49962, 49999), False, 'import pytest\n'), ((50657, 50710), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (50673, 50710), False, 'import pytest\n'), ((51389, 51442), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (51405, 51442), False, 'import pytest\n'), ((52192, 52245), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (52208, 52245), False, 'import pytest\n'), ((54099, 54159), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (54115, 54159), False, 'import pytest\n'), ((55650, 55692), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ref"""', "['', 'xy']"], {}), "('ref', ['', 'xy'])\n", (55673, 55692), False, 'import pytest\n'), ((55908, 55955), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ref"""', "['À', '┃', 'α']"], {}), "('ref', ['À', '┃', 'α'])\n", (55931, 55955), False, 'import pytest\n'), ((56144, 56197), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Missing data in alignments: #1896"""'], {}), "('Missing data in alignments: #1896')\n", (56160, 56197), False, 'import pytest\n'), ((56203, 56264), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""missing_data_char"""', "['À', '┃', 'α']"], {}), "('missing_data_char', ['À', '┃', 'α'])\n", (56226, 56264), False, 'import pytest\n'), ((56614, 56674), 'pytest.mark.skip', 'pytest.mark.skip', (['"""Reference sequence not implemented #1888"""'], {}), "('Reference sequence not implemented #1888')\n", (56630, 56674), False, 'import pytest\n'), ((3442, 3462), 'numpy.array_equal', 'np.array_equal', (['A', 'B'], {}), '(A, B)\n', (3456, 3462), True, 'import numpy as np\n'), ((3509, 3562), 'msprime.simulate', 'msprime.simulate', (['(5)'], {'mutation_rate': '(1)', 'random_seed': '(234)'}), '(5, mutation_rate=1, random_seed=234)\n', (3525, 3562), False, 'import msprime\n'), ((3632, 3709), 'msprime.simulate', 'msprime.simulate', (['(8)'], {'recombination_rate': '(10)', 'mutation_rate': '(10)', 'random_seed': '(234)'}), '(8, recombination_rate=10, mutation_rate=10, random_seed=234)\n', (3648, 3709), False, 'import msprime\n'), ((3879, 3930), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'random_seed': '(1)', 'mutation_rate': '(1)'}), '(6, random_seed=1, mutation_rate=1)\n', (3895, 3930), False, 'import msprime\n'), ((3944, 3983), 'tests.tsutil.jukes_cantor', 'tsutil.jukes_cantor', (['ts', '(20)', '(1)'], {'seed': '(10)'}), '(ts, 20, 1, seed=10)\n', (3963, 3983), True, 'import tests.tsutil as tsutil\n'), ((4074, 4125), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'random_seed': '(1)', 'mutation_rate': '(1)'}), '(6, random_seed=1, mutation_rate=1)\n', (4090, 4125), False, 'import msprime\n'), ((4139, 4176), 'tests.tsutil.insert_multichar_mutations', 'tsutil.insert_multichar_mutations', (['ts'], {}), '(ts)\n', (4172, 4176), True, 'import tests.tsutil as tsutil\n'), ((4261, 4334), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'recombination_rate': '(2)', 'mutation_rate': '(2)', 'random_seed': '(1)'}), '(6, recombination_rate=2, mutation_rate=2, random_seed=1)\n', (4277, 4334), False, 'import msprime\n'), ((4494, 4585), 'tests.test_wright_fisher.wf_sim', 'wf.wf_sim', (['(6)', '(5)'], {'seed': '(3)', 'deep_history': '(True)', 'initial_generation_samples': '(True)', 'num_loci': '(2)'}), '(6, 5, seed=3, deep_history=True, initial_generation_samples=True,\n num_loci=2)\n', (4503, 4585), True, 'import tests.test_wright_fisher as wf\n'), ((4849, 4943), 'tests.test_wright_fisher.wf_sim', 'wf.wf_sim', (['(9)', '(10)'], {'seed': '(1)', 'deep_history': '(True)', 'initial_generation_samples': '(False)', 'num_loci': '(5)'}), '(9, 10, seed=1, deep_history=True, initial_generation_samples=\n False, num_loci=5)\n', (4858, 4943), True, 'import tests.test_wright_fisher as wf\n'), ((5104, 5150), 'msprime.mutate', 'msprime.mutate', (['ts'], {'rate': '(0.2)', 'random_seed': '(1234)'}), '(ts, rate=0.2, random_seed=1234)\n', (5118, 5150), False, 'import msprime\n'), ((5254, 5280), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (5275, 5280), False, 'import tskit\n'), ((5687, 5720), 'numpy.zeros', 'np.zeros', (['ts.num_nodes'], {'dtype': 'int'}), '(ts.num_nodes, dtype=int)\n', (5695, 5720), True, 'import numpy as np\n'), ((6318, 6408), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'length': '(10)', 'recombination_rate': '(1)', 'mutation_rate': '(10)', 'random_seed': '(3)'}), '(10, length=10, recombination_rate=1, mutation_rate=10,\n random_seed=3)\n', (6334, 6408), False, 'import msprime\n'), ((6636, 6664), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': '"""u1"""'}), "((m, n), dtype='u1')\n", (6644, 6664), True, 'import numpy as np\n'), ((6677, 6705), 'numpy.zeros', 'np.zeros', (['(m, n)'], {'dtype': '"""u1"""'}), "((m, n), dtype='u1')\n", (6685, 6705), True, 'import numpy as np\n'), ((7000, 7014), 'numpy.all', 'np.all', (['(A == B)'], {}), '(A == B)\n', (7006, 7014), True, 'import numpy as np\n'), ((11715, 11735), 'msprime.simulate', 'msprime.simulate', (['(10)'], {}), '(10)\n', (11731, 11735), False, 'import msprime\n'), ((11939, 11994), 'numpy.empty', 'np.empty', (['(ts.num_sites, ts.num_samples)'], {'dtype': 'np.int8'}), '((ts.num_sites, ts.num_samples), dtype=np.int8)\n', (11947, 11994), True, 'import numpy as np\n'), ((12116, 12137), 'numpy.array_equal', 'np.array_equal', (['G', 'G2'], {}), '(G, G2)\n', (12130, 12137), True, 'import numpy as np\n'), ((14159, 14246), 'msprime.simulate', 'msprime.simulate', (['n'], {'length': '(5)', 'recombination_rate': '(1)', 'mutation_rate': '(5)', 'random_seed': '(2)'}), '(n, length=5, recombination_rate=1, mutation_rate=5,\n random_seed=2)\n', (14175, 14246), False, 'import msprime\n'), ((15005, 15092), 'msprime.simulate', 'msprime.simulate', (['n'], {'length': '(5)', 'recombination_rate': '(1)', 'mutation_rate': '(5)', 'random_seed': '(2)'}), '(n, length=5, recombination_rate=1, mutation_rate=5,\n random_seed=2)\n', (15021, 15092), False, 'import msprime\n'), ((16168, 16256), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'length': '(5)', 'recombination_rate': '(1)', 'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, length=5, recombination_rate=1, mutation_rate=5,\n random_seed=2)\n', (16184, 16256), False, 'import msprime\n'), ((17850, 17884), 'msprime.simulate', 'msprime.simulate', (['(5)'], {'random_seed': '(2)'}), '(5, random_seed=2)\n', (17866, 17884), False, 'import msprime\n'), ((17898, 17935), 'tests.tsutil.jukes_cantor', 'tsutil.jukes_cantor', (['ts', '(5)', '(1)'], {'seed': '(2)'}), '(ts, 5, 1, seed=2)\n', (17917, 17935), True, 'import tests.tsutil as tsutil\n'), ((18024, 18059), 'msprime.simulate', 'msprime.simulate', (['(20)'], {'random_seed': '(2)'}), '(20, random_seed=2)\n', (18040, 18059), False, 'import msprime\n'), ((18073, 18110), 'tests.tsutil.jukes_cantor', 'tsutil.jukes_cantor', (['ts', '(5)', '(1)'], {'seed': '(2)'}), '(ts, 5, 1, seed=2)\n', (18092, 18110), True, 'import tests.tsutil as tsutil\n'), ((18205, 18257), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)', 'mutation_rate': '(2)'}), '(10, random_seed=2, mutation_rate=2)\n', (18221, 18257), False, 'import msprime\n'), ((18675, 18694), 'numpy.all', 'np.all', (['(Gnm[0] == 0)'], {}), '(Gnm[0] == 0)\n', (18681, 18694), True, 'import numpy as np\n'), ((18710, 18729), 'numpy.all', 'np.all', (['(Gnm[1] == 0)'], {}), '(Gnm[1] == 0)\n', (18716, 18729), True, 'import numpy as np\n'), ((18745, 18765), 'numpy.all', 'np.all', (['(Gnm[-1] == 0)'], {}), '(Gnm[-1] == 0)\n', (18751, 18765), True, 'import numpy as np\n'), ((18781, 18801), 'numpy.all', 'np.all', (['(Gnm[-2] == 0)'], {}), '(Gnm[-2] == 0)\n', (18787, 18801), True, 'import numpy as np\n'), ((18867, 18886), 'numpy.all', 'np.all', (['(Gm[0] == -1)'], {}), '(Gm[0] == -1)\n', (18873, 18886), True, 'import numpy as np\n'), ((18902, 18921), 'numpy.all', 'np.all', (['(Gm[1] == -1)'], {}), '(Gm[1] == -1)\n', (18908, 18921), True, 'import numpy as np\n'), ((18937, 18957), 'numpy.all', 'np.all', (['(Gm[-1] == -1)'], {}), '(Gm[-1] == -1)\n', (18943, 18957), True, 'import numpy as np\n'), ((18973, 18993), 'numpy.all', 'np.all', (['(Gm[-2] == -1)'], {}), '(Gm[-2] == -1)\n', (18979, 18993), True, 'import numpy as np\n'), ((19068, 19091), 'numpy.array_equal', 'np.array_equal', (['Gm', 'Gm2'], {}), '(Gm, Gm2)\n', (19082, 19091), True, 'import numpy as np\n'), ((19245, 19268), 'numpy.array_equal', 'np.array_equal', (['Gnm', 'Gi'], {}), '(Gnm, Gi)\n', (19259, 19268), True, 'import numpy as np\n'), ((19390, 19413), 'numpy.array_equal', 'np.array_equal', (['Gm', 'Gni'], {}), '(Gm, Gni)\n', (19404, 19413), True, 'import numpy as np\n'), ((19560, 19582), 'numpy.array_equal', 'np.array_equal', (['Gnm', 'G'], {}), '(Gnm, G)\n', (19574, 19582), True, 'import numpy as np\n'), ((19728, 19749), 'numpy.array_equal', 'np.array_equal', (['Gm', 'G'], {}), '(Gm, G)\n', (19742, 19749), True, 'import numpy as np\n'), ((19810, 19836), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (19831, 19836), False, 'import tskit\n'), ((20212, 20239), 'numpy.all', 'np.all', (['(var.genotypes == -1)'], {}), '(var.genotypes == -1)\n', (20218, 20239), True, 'import numpy as np\n'), ((20362, 20388), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (20383, 20388), False, 'import tskit\n'), ((20833, 20859), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (20854, 20859), False, 'import tskit\n'), ((22088, 22114), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (22109, 22114), False, 'import tskit\n'), ((22648, 22674), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (22669, 22674), False, 'import tskit\n'), ((23302, 23394), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'length': '(10)', 'recombination_rate': '(0.1)', 'mutation_rate': '(10)', 'random_seed': '(3)'}), '(10, length=10, recombination_rate=0.1, mutation_rate=10,\n random_seed=3)\n', (23318, 23394), False, 'import msprime\n'), ((24774, 24866), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'length': '(10)', 'recombination_rate': '(0.1)', 'mutation_rate': '(10)', 'random_seed': '(3)'}), '(10, length=10, recombination_rate=0.1, mutation_rate=10,\n random_seed=3)\n', (24790, 24866), False, 'import msprime\n'), ((27356, 27384), 'numpy.zeros', 'np.zeros', (['(n, m)'], {'dtype': '"""u1"""'}), "((n, m), dtype='u1')\n", (27364, 27384), True, 'import numpy as np\n'), ((27397, 27425), 'numpy.zeros', 'np.zeros', (['(n, m)'], {'dtype': '"""u1"""'}), "((n, m), dtype='u1')\n", (27405, 27425), True, 'import numpy as np\n'), ((27689, 27703), 'numpy.all', 'np.all', (['(A == B)'], {}), '(A == B)\n', (27695, 27703), True, 'import numpy as np\n'), ((27905, 27950), 'msprime.RecombinationMap.uniform_map', 'msprime.RecombinationMap.uniform_map', (['m', 'r', 'm'], {}), '(m, r, m)\n', (27941, 27950), False, 'import msprime\n'), ((27975, 28045), 'msprime.simulate', 'msprime.simulate', (['n'], {'recombination_map': 'recomb_map', 'mutation_rate': 'theta'}), '(n, recombination_map=recomb_map, mutation_rate=theta)\n', (27991, 28045), False, 'import msprime\n'), ((28701, 28807), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'length': '(100)', 'recombination_rate': '(1)', 'demographic_events': 'bottlenecks', 'random_seed': '(1)'}), '(10, length=100, recombination_rate=1, demographic_events=\n bottlenecks, random_seed=1)\n', (28717, 28807), False, 'import msprime\n'), ((28961, 28999), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(10)'}), '(10, mutation_rate=10)\n', (28977, 28999), False, 'import msprime\n'), ((29839, 29874), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (29855, 29874), False, 'import msprime\n'), ((30112, 30147), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (30128, 30147), False, 'import msprime\n'), ((30375, 30410), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (30391, 30410), False, 'import msprime\n'), ((30678, 30713), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (30694, 30713), False, 'import msprime\n'), ((31243, 31278), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (31259, 31278), False, 'import msprime\n'), ((31840, 31875), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (31856, 31875), False, 'import msprime\n'), ((32078, 32104), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.0)'], {}), '(1.0)\n', (32099, 32104), False, 'import tskit\n'), ((33900, 33952), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (33916, 33952), False, 'import msprime\n'), ((34086, 34108), 'numpy.array_equal', 'np.array_equal', (['G1', 'G2'], {}), '(G1, G2)\n', (34100, 34108), True, 'import numpy as np\n'), ((34429, 34481), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (34445, 34481), False, 'import msprime\n'), ((34655, 34677), 'numpy.array_equal', 'np.array_equal', (['G1', 'G2'], {}), '(G1, G2)\n', (34669, 34677), True, 'import numpy as np\n'), ((34991, 35043), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (35007, 35043), False, 'import msprime\n'), ((35218, 35244), 'numpy.array_equal', 'np.array_equal', (['(G1 + 3)', 'G2'], {}), '(G1 + 3, G2)\n', (35232, 35244), True, 'import numpy as np\n'), ((35564, 35616), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (35580, 35616), False, 'import msprime\n'), ((35782, 35799), 'numpy.where', 'np.where', (['(G1 == 1)'], {}), '(G1 == 1)\n', (35790, 35799), True, 'import numpy as np\n'), ((35837, 35859), 'numpy.array_equal', 'np.array_equal', (['G1', 'G2'], {}), '(G1, G2)\n', (35851, 35859), True, 'import numpy as np\n'), ((36239, 36274), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (36255, 36274), False, 'import msprime\n'), ((36975, 37010), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'random_seed': '(2)'}), '(10, random_seed=2)\n', (36991, 37010), False, 'import msprime\n'), ((37685, 37737), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (37701, 37737), False, 'import msprime\n'), ((38100, 38152), 'msprime.simulate', 'msprime.simulate', (['(10)'], {'mutation_rate': '(5)', 'random_seed': '(2)'}), '(10, mutation_rate=5, random_seed=2)\n', (38116, 38152), False, 'import msprime\n'), ((38379, 38403), 'tskit.TableCollection', 'tskit.TableCollection', (['(1)'], {}), '(1)\n', (38400, 38403), False, 'import tskit\n'), ((39879, 39930), 'msprime.simulate', 'msprime.simulate', (['(5)'], {'mutation_rate': '(2)', 'random_seed': '(3)'}), '(5, mutation_rate=2, random_seed=3)\n', (39895, 39930), False, 'import msprime\n'), ((40322, 40356), 'msprime.simulate', 'msprime.simulate', (['(5)'], {'random_seed': '(3)'}), '(5, random_seed=3)\n', (40338, 40356), False, 'import msprime\n'), ((40816, 40867), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'random_seed': '(1)', 'mutation_rate': '(1)'}), '(6, random_seed=1, mutation_rate=1)\n', (40832, 40867), False, 'import msprime\n'), ((40881, 40920), 'tests.tsutil.jukes_cantor', 'tsutil.jukes_cantor', (['ts', '(20)', '(1)'], {'seed': '(10)'}), '(ts, 20, 1, seed=10)\n', (40900, 40920), True, 'import tests.tsutil as tsutil\n'), ((41228, 41284), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'random_seed': '(1)', 'recombination_rate': '(2)'}), '(6, random_seed=1, recombination_rate=2)\n', (41244, 41284), False, 'import msprime\n'), ((41298, 41335), 'tests.tsutil.insert_multichar_mutations', 'tsutil.insert_multichar_mutations', (['ts'], {}), '(ts)\n', (41331, 41335), True, 'import tests.tsutil as tsutil\n'), ((41652, 41703), 'msprime.simulate', 'msprime.simulate', (['(6)'], {'mutation_rate': '(2)', 'random_seed': '(3)'}), '(6, mutation_rate=2, random_seed=3)\n', (41668, 41703), False, 'import msprime\n'), ((43160, 43196), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['G', 'Gp'], {}), '(G, Gp)\n', (43189, 43196), True, 'import numpy as np\n'), ((44464, 44626), 'textwrap.dedent', 'textwrap.dedent', (['""" >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n """'], {}), '(\n """ >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n """\n )\n', (44479, 44626), False, 'import textwrap\n'), ((44803, 45309), 'textwrap.dedent', 'textwrap.dedent', (['""" #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=3;\n TAXLABELS n0 n1 n2;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n ;\n END;\n BEGIN TREES;\n TREE t0^10 = [&R] (n0:2,(n1:1,n2:1):1);\n END;\n """'], {}), '(\n """ #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=3;\n TAXLABELS n0 n1 n2;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n ;\n END;\n BEGIN TREES;\n TREE t0^10 = [&R] (n0:2,(n1:1,n2:1):1);\n END;\n """\n )\n', (44818, 45309), False, 'import textwrap\n'), ((46646, 46682), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['G', 'Gp'], {}), '(G, Gp)\n', (46675, 46682), True, 'import numpy as np\n'), ((49149, 49350), 'textwrap.dedent', 'textwrap.dedent', (['""" >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n >n5\n NNNNNNNNNN\n """'], {}), '(\n """ >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n >n5\n NNNNNNNNNN\n """\n )\n', (49164, 49350), False, 'import textwrap\n'), ((49604, 49805), 'textwrap.dedent', 'textwrap.dedent', (['""" >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n >n5\n QQQQQQQQQQ\n """'], {}), '(\n """ >n0\n 01G345678T\n >n1\n 01A345678C\n >n2\n 01A345678C\n >n5\n QQQQQQQQQQ\n """\n )\n', (49619, 49805), False, 'import textwrap\n'), ((50083, 50284), 'textwrap.dedent', 'textwrap.dedent', (['""" >n0\n NNGNNNNNNT\n >n1\n NNANNNNNNC\n >n2\n NNANNNNNNC\n >n5\n NNANNNNNNT\n """'], {}), '(\n """ >n0\n NNGNNNNNNT\n >n1\n NNANNNNNNC\n >n2\n NNANNNNNNC\n >n5\n NNANNNNNNT\n """\n )\n', (50098, 50284), False, 'import textwrap\n'), ((50802, 51255), 'textwrap.dedent', 'textwrap.dedent', (['""" #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=?;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 ??????????\n ;\n END;\n """'], {}), '(\n """ #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=?;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 ??????????\n ;\n END;\n """\n )\n', (50817, 51255), False, 'import textwrap\n'), ((51552, 52005), 'textwrap.dedent', 'textwrap.dedent', (['""" #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=Q;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 QQQQQQQQQQ\n ;\n END;\n """'], {}), '(\n """ #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=Q;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 QQQQQQQQQQ\n ;\n END;\n """\n )\n', (51567, 52005), False, 'import textwrap\n'), ((52333, 52786), 'textwrap.dedent', 'textwrap.dedent', (['""" #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=?;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 01A345678T\n ;\n END;\n """'], {}), '(\n """ #NEXUS\n BEGIN TAXA;\n DIMENSIONS NTAX=4;\n TAXLABELS n0 n1 n2 n5;\n END;\n BEGIN DATA;\n DIMENSIONS NCHAR=10;\n FORMAT DATATYPE=DNA MISSING=?;\n MATRIX\n n0 01G345678T\n n1 01A345678C\n n2 01A345678C\n n5 01A345678T\n ;\n END;\n """\n )\n', (52348, 52786), False, 'import textwrap\n'), ((53203, 53252), 'tskit.Tree.generate_balanced', 'tskit.Tree.generate_balanced', (['(4)'], {'arity': '(2)', 'span': '(10)'}), '(4, arity=2, span=10)\n', (53231, 53252), False, 'import tskit\n'), ((54056, 54092), 'numpy.testing.assert_array_equal', 'np.testing.assert_array_equal', (['G', 'Gp'], {}), '(G, Gp)\n', (54085, 54092), True, 'import numpy as np\n'), ((54726, 54927), 'textwrap.dedent', 'textwrap.dedent', (['""" >n0\n 01T34567C9\n >n1\n 01G34567C9\n >n2\n 01G34567A9\n >n3\n 01G34567A9\n """'], {}), '(\n """ >n0\n 01T34567C9\n >n1\n 01G34567C9\n >n2\n 01G34567A9\n >n3\n 01G34567A9\n """\n )\n', (54741, 54927), False, 'import textwrap\n'), ((55112, 55136), 'tskit.TableCollection', 'tskit.TableCollection', (['(1)'], {}), '(1)\n', (55133, 55136), False, 'import tskit\n'), ((57312, 57367), 'tskit.random_nucleotides', 'tskit.random_nucleotides', (['ts.sequence_length'], {'seed': '(1234)'}), '(ts.sequence_length, seed=1234)\n', (57336, 57367), False, 'import tskit\n'), ((7255, 7274), 'numpy.all', 'np.all', (['(A[j] == row)'], {}), '(A[j] == row)\n', (7261, 7274), True, 'import numpy as np\n'), ((7397, 7422), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (7410, 7422), False, 'import pytest\n'), ((7951, 7994), 'numpy.array_equal', 'np.array_equal', (['G[var.index]', 'var.genotypes'], {}), '(G[var.index], var.genotypes)\n', (7965, 7994), True, 'import numpy as np\n'), ((8014, 8051), 'numpy.all', 'np.all', (['(G[var.index] == var.genotypes)'], {}), '(G[var.index] == var.genotypes)\n', (8020, 8051), True, 'import numpy as np\n'), ((8246, 8289), 'numpy.array_equal', 'np.array_equal', (['G[var.index]', 'var.genotypes'], {}), '(G[var.index], var.genotypes)\n', (8260, 8289), True, 'import numpy as np\n'), ((14428, 14462), 'itertools.permutations', 'itertools.permutations', (['samples', 'j'], {}), '(samples, j)\n', (14450, 14462), False, 'import itertools\n'), ((15470, 15504), 'itertools.permutations', 'itertools.permutations', (['samples', 'j'], {}), '(samples, j)\n', (15492, 15504), False, 'import itertools\n'), ((16971, 17017), 'numpy.array_equal', 'np.array_equal', (['var1.genotypes', 'var2.genotypes'], {}), '(var1.genotypes, var2.genotypes)\n', (16985, 17017), True, 'import numpy as np\n'), ((19139, 19166), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (19151, 19166), False, 'import pytest\n'), ((19282, 19309), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (19294, 19309), False, 'import pytest\n'), ((19428, 19455), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (19440, 19455), False, 'import pytest\n'), ((19596, 19623), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (19608, 19623), False, 'import pytest\n'), ((24159, 24204), 'numpy.array_equal', 'np.array_equal', (['var.genotypes', 'G[var.site.id]'], {}), '(var.genotypes, G[var.site.id])\n', (24173, 24204), True, 'import numpy as np\n'), ((24648, 24693), 'numpy.array_equal', 'np.array_equal', (['var.genotypes', 'G[var.site.id]'], {}), '(var.genotypes, G[var.site.id])\n', (24662, 24693), True, 'import numpy as np\n'), ((26340, 26385), 'numpy.array_equal', 'np.array_equal', (['var.genotypes', 'G[var.site.id]'], {}), '(var.genotypes, G[var.site.id])\n', (26354, 26385), True, 'import numpy as np\n'), ((28242, 28263), 'random.randint', 'random.randint', (['(2)', '(50)'], {}), '(2, 50)\n', (28256, 28263), False, 'import random\n'), ((28280, 28303), 'random.randint', 'random.randint', (['(10)', '(200)'], {}), '(10, 200)\n', (28294, 28303), False, 'import random\n'), ((28320, 28335), 'random.random', 'random.random', ([], {}), '()\n', (28333, 28335), False, 'import random\n'), ((28356, 28376), 'random.uniform', 'random.uniform', (['(0)', '(2)'], {}), '(0, 2)\n', (28370, 28376), False, 'import random\n'), ((28501, 28551), 'msprime.SimpleBottleneck', 'msprime.SimpleBottleneck', (['(0.01)', '(0)'], {'proportion': '(0.05)'}), '(0.01, 0, proportion=0.05)\n', (28525, 28551), False, 'import msprime\n'), ((28565, 28615), 'msprime.SimpleBottleneck', 'msprime.SimpleBottleneck', (['(0.02)', '(0)'], {'proportion': '(0.25)'}), '(0.02, 0, proportion=0.25)\n', (28589, 28615), False, 'import msprime\n'), ((28629, 28676), 'msprime.SimpleBottleneck', 'msprime.SimpleBottleneck', (['(0.03)', '(0)'], {'proportion': '(1)'}), '(0.03, 0, proportion=1)\n', (28653, 28676), False, 'import msprime\n'), ((29992, 30016), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (30005, 30016), False, 'import pytest\n'), ((30261, 30285), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (30274, 30285), False, 'import pytest\n'), ((30550, 30574), 'pytest.raises', 'pytest.raises', (['TypeError'], {}), '(TypeError)\n', (30563, 30574), False, 'import pytest\n'), ((31921, 31984), 'tests.tsutil.insert_branch_mutations', 'tsutil.insert_branch_mutations', (['base_ts'], {'mutations_per_branch': 'j'}), '(base_ts, mutations_per_branch=j)\n', (31951, 31984), True, 'import tests.tsutil as tsutil\n'), ((32301, 32326), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (32314, 32326), False, 'import pytest\n'), ((32805, 32832), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (32817, 32832), False, 'import pytest\n'), ((32940, 32967), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (32952, 32967), False, 'import pytest\n'), ((33076, 33103), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (33088, 33103), False, 'import pytest\n'), ((33237, 33264), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (33249, 33264), False, 'import pytest\n'), ((33399, 33426), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (33411, 33426), False, 'import pytest\n'), ((33561, 33588), 'pytest.warns', 'pytest.warns', (['FutureWarning'], {}), '(FutureWarning)\n', (33573, 33588), False, 'import pytest\n'), ((34325, 34367), 'numpy.array_equal', 'np.array_equal', (['v1.genotypes', 'v2.genotypes'], {}), '(v1.genotypes, v2.genotypes)\n', (34339, 34367), True, 'import numpy as np\n'), ((34888, 34930), 'numpy.array_equal', 'np.array_equal', (['v1.genotypes', 'v2.genotypes'], {}), '(v1.genotypes, v2.genotypes)\n', (34902, 34930), True, 'import numpy as np\n'), ((35455, 35501), 'numpy.array_equal', 'np.array_equal', (['(v1.genotypes + 3)', 'v2.genotypes'], {}), '(v1.genotypes + 3, v2.genotypes)\n', (35469, 35501), True, 'import numpy as np\n'), ((36100, 36116), 'numpy.where', 'np.where', (['(g == 1)'], {}), '(g == 1)\n', (36108, 36116), True, 'import numpy as np\n'), ((36161, 36192), 'numpy.array_equal', 'np.array_equal', (['g', 'v2.genotypes'], {}), '(g, v2.genotypes)\n', (36175, 36192), True, 'import numpy as np\n'), ((36881, 36924), 'numpy.array_equal', 'np.array_equal', (['v2.genotypes', 'G[v1.site.id]'], {}), '(v2.genotypes, G[v1.site.id])\n', (36895, 36924), True, 'import numpy as np\n'), ((38166, 38191), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (38179, 38191), False, 'import pytest\n'), ((38254, 38279), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (38267, 38279), False, 'import pytest\n'), ((38933, 38955), 'numpy.array_equal', 'np.array_equal', (['G1', 'G2'], {}), '(G1, G2)\n', (38947, 38955), True, 'import numpy as np\n'), ((39184, 39219), 'itertools.zip_longest', 'itertools.zip_longest', (['vars1', 'vars2'], {}), '(vars1, vars2)\n', (39205, 39219), False, 'import itertools\n'), ((42519, 42559), 'tskit.Tree.generate_balanced', 'tskit.Tree.generate_balanced', (['(3)'], {'span': '(10)'}), '(3, span=10)\n', (42547, 42559), False, 'import tskit\n'), ((45675, 45715), 'tskit.Tree.generate_balanced', 'tskit.Tree.generate_balanced', (['(3)'], {'span': '(10)'}), '(3, span=10)\n', (45703, 45715), False, 'import tskit\n'), ((47105, 47144), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""1896"""'}), "(ValueError, match='1896')\n", (47118, 47144), False, 'import pytest\n'), ((55369, 55432), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""defined for discrete genomes"""'}), "(ValueError, match='defined for discrete genomes')\n", (55382, 55432), False, 'import pytest\n'), ((55569, 55608), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""1888"""'}), "(ValueError, match='1888')\n", (55582, 55608), False, 'import pytest\n'), ((55798, 55844), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""same length"""'}), "(ValueError, match='same length')\n", (55811, 55844), False, 'import pytest\n'), ((56047, 56080), 'pytest.raises', 'pytest.raises', (['UnicodeEncodeError'], {}), '(UnicodeEncodeError)\n', (56060, 56080), False, 'import pytest\n'), ((56377, 56410), 'pytest.raises', 'pytest.raises', (['UnicodeEncodeError'], {}), '(UnicodeEncodeError)\n', (56390, 56410), False, 'import pytest\n'), ((6931, 6973), 'numpy.frombuffer', 'np.frombuffer', (['variant.genotypes', 'np.uint8'], {}), '(variant.genotypes, np.uint8)\n', (6944, 6973), True, 'import numpy as np\n'), ((7182, 7224), 'numpy.frombuffer', 'np.frombuffer', (['variant.genotypes', 'np.uint8'], {}), '(variant.genotypes, np.uint8)\n', (7195, 7224), True, 'import numpy as np\n'), ((8955, 8990), 'itertools.permutations', 'itertools.permutations', (['"""ABCDEF"""', '(4)'], {}), "('ABCDEF', 4)\n", (8977, 8990), False, 'import itertools\n'), ((10302, 10337), 'itertools.permutations', 'itertools.permutations', (['"""ABCDEF"""', '(4)'], {}), "('ABCDEF', 4)\n", (10324, 10337), False, 'import itertools\n'), ((14484, 14511), 'numpy.array', 'np.array', (['s'], {'dtype': 'np.int32'}), '(s, dtype=np.int32)\n', (14492, 14511), True, 'import numpy as np\n'), ((15526, 15553), 'numpy.array', 'np.array', (['s'], {'dtype': 'np.int32'}), '(s, dtype=np.int32)\n', (15534, 15553), True, 'import numpy as np\n'), ((23930, 23973), 'numpy.all', 'np.all', (['(var.genotypes == tskit.MISSING_DATA)'], {}), '(var.genotypes == tskit.MISSING_DATA)\n', (23936, 23973), True, 'import numpy as np\n'), ((24096, 24139), 'numpy.all', 'np.all', (['(var.genotypes != tskit.MISSING_DATA)'], {}), '(var.genotypes != tskit.MISSING_DATA)\n', (24102, 24139), True, 'import numpy as np\n'), ((24469, 24495), 'numpy.all', 'np.all', (['(var.genotypes == 0)'], {}), '(var.genotypes == 0)\n', (24475, 24495), True, 'import numpy as np\n'), ((24585, 24628), 'numpy.all', 'np.all', (['(var.genotypes != tskit.MISSING_DATA)'], {}), '(var.genotypes != tskit.MISSING_DATA)\n', (24591, 24628), True, 'import numpy as np\n'), ((25928, 25975), 'numpy.all', 'np.all', (['(var.genotypes[1:] == tskit.MISSING_DATA)'], {}), '(var.genotypes[1:] == tskit.MISSING_DATA)\n', (25934, 25975), True, 'import numpy as np\n'), ((29302, 29346), 'numpy.arange', 'np.arange', (['(ts.num_sites + 1)'], {'dtype': 'np.uint32'}), '(ts.num_sites + 1, dtype=np.uint32)\n', (29311, 29346), True, 'import numpy as np\n'), ((29564, 29608), 'numpy.arange', 'np.arange', (['(ts.num_sites + 1)'], {'dtype': 'np.uint32'}), '(ts.num_sites + 1, dtype=np.uint32)\n', (29573, 29608), True, 'import numpy as np\n'), ((36349, 36391), 'msprime.InfiniteSites', 'msprime.InfiniteSites', (['msprime.NUCLEOTIDES'], {}), '(msprime.NUCLEOTIDES)\n', (36370, 36391), False, 'import msprime\n'), ((37085, 37127), 'msprime.InfiniteSites', 'msprime.InfiniteSites', (['msprime.NUCLEOTIDES'], {}), '(msprime.NUCLEOTIDES)\n', (37106, 37127), False, 'import msprime\n'), ((37426, 37464), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (37439, 37464), False, 'import pytest\n'), ((37539, 37577), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (37552, 37577), False, 'import pytest\n'), ((37846, 37884), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (37859, 37884), False, 'import pytest\n'), ((37959, 37997), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (37972, 37997), False, 'import pytest\n'), ((39334, 39376), 'numpy.array_equal', 'np.array_equal', (['v1.genotypes', 'v2.genotypes'], {}), '(v1.genotypes, v2.genotypes)\n', (39348, 39376), True, 'import numpy as np\n'), ((40431, 40473), 'msprime.InfiniteSites', 'msprime.InfiniteSites', (['msprime.NUCLEOTIDES'], {}), '(msprime.NUCLEOTIDES)\n', (40452, 40473), False, 'import msprime\n'), ((55275, 55301), 'tskit.TableCollection', 'tskit.TableCollection', (['(1.1)'], {}), '(1.1)\n', (55296, 55301), False, 'import tskit\n'), ((55515, 55539), 'tskit.TableCollection', 'tskit.TableCollection', (['(1)'], {}), '(1)\n', (55536, 55539), False, 'import tskit\n'), ((57444, 57483), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""1896"""'}), "(ValueError, match='1896')\n", (57457, 57483), False, 'import pytest\n'), ((9245, 9283), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (9258, 9283), False, 'import pytest\n'), ((10592, 10630), 'pytest.raises', 'pytest.raises', (['exceptions.LibraryError'], {}), '(exceptions.LibraryError)\n', (10605, 10630), False, 'import pytest\n'), ((13073, 13096), 'numpy.ones', 'np.ones', (['ts.sample_size'], {}), '(ts.sample_size)\n', (13080, 13096), True, 'import numpy as np\n'), ((14810, 14859), 'numpy.array_equal', 'np.array_equal', (['var1.genotypes[s]', 'var2.genotypes'], {}), '(var1.genotypes[s], var2.genotypes)\n', (14824, 14859), True, 'import numpy as np\n'), ((15852, 15901), 'numpy.array_equal', 'np.array_equal', (['var1.genotypes[s]', 'var2.genotypes'], {}), '(var1.genotypes[s], var2.genotypes)\n', (15866, 15901), True, 'import numpy as np\n'), ((16361, 16394), 'numpy.zeros_like', 'np.zeros_like', (['tables.nodes.flags'], {}), '(tables.nodes.flags)\n', (16374, 16394), True, 'import numpy as np\n'), ((26123, 26149), 'numpy.all', 'np.all', (['(var.genotypes == 0)'], {}), '(var.genotypes == 0)\n', (26129, 26149), True, 'import numpy as np\n'), ((26277, 26320), 'numpy.all', 'np.all', (['(var.genotypes != tskit.MISSING_DATA)'], {}), '(var.genotypes != tskit.MISSING_DATA)\n', (26283, 26320), True, 'import numpy as np\n'), ((29217, 29254), 'numpy.zeros', 'np.zeros', (['ts.num_sites'], {'dtype': 'np.int8'}), '(ts.num_sites, dtype=np.int8)\n', (29225, 29254), True, 'import numpy as np\n'), ((29481, 29518), 'numpy.zeros', 'np.zeros', (['ts.num_sites'], {'dtype': 'np.int8'}), '(ts.num_sites, dtype=np.int8)\n', (29489, 29518), True, 'import numpy as np\n')]
|
# -*- coding: utf-8 -*-
"""
MIT License
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
# @file wrapper.py
# @Author <NAME> (adityavaishampayan)
# @copyright MIT
# @brief main file that calls all other sub functions
import sys
# noinspection PyBroadException
try:
sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')
except BaseException:
pass
import cv2
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import axes3d, Axes3D
import argparse
from scripts.LoadData import *
from scripts.bdl_adjst import *
from scripts.visibikity_matrx import *
from scripts.NonLinearPnP import *
from scripts.DrawCameras import camera_draw
from scripts.ExtractCameraPose import extract_cam_pose
from scripts.EssentialMatrixFromFundamentalMatrix import e_from_fundamental
from scripts.NonLinearTriangulation import *
from scripts.EstimateFundamentalMatrix import est_from_funda
from scripts.PnPRANSAC import *
from scripts.disambguate_camera_pose import *
from scripts.LinearTriangulation import *
K = np.array([[568.996140852, 0, 643.21055941],
[0, 568.988362396, 477.982801038],
[0, 0, 1]])
W = np.array([[0, -1, 0],
[1, 0, 0],
[0, 0, 1]])
n_images = 6
img1 = 1
img2 = 4
Parser = argparse.ArgumentParser()
Parser.add_argument(
'--DataPath', default="./Data/", help='Folder of Images')
Parser.add_argument(
'--Visualize', default=False, help='Show correspondences')
Args = Parser.parse_args()
DataPath = Args.DataPath
visualize = Args.Visualize
x_coord_matrix, y_coord_matrix, M, Color = LoadData(DataPath)
M, outlier_indices = inlier_filter(x_coord_matrix, y_coord_matrix, M, n_images)
recon_bin = np.zeros((M.shape[0], 1))
X_3D = np.zeros((M.shape[0], 3))
opt = np.logical_and(M[:, img1 - 1], M[:, img2 - 1])
outlier_idx = np.where(np.logical_and(outlier_indices[:, img1 - 1],
outlier_indices[:, img2 - 1]) == True)
indices, = np.where(opt == True)
rgb_list = Color[indices]
best_F = est_from_funda(np.float32(np.hstack((x_coord_matrix[indices, img1 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img1 - 1].reshape((-1, 1))))),
np.float32(np.hstack((x_coord_matrix[indices, img2 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img2 - 1].reshape((-1, 1))))))
E = e_from_fundamental(best_F, K)
R_set, C_set = extract_cam_pose(E, K)
X_set = []
for n in range(0, 4):
X1 = LinearTriangulation(K, np.zeros((3, 1)), np.identity(3),
C_set[n].T, R_set[n],
np.float32(np.hstack((x_coord_matrix[indices, img1 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img1 - 1].reshape((-1, 1))))),
np.float32(np.hstack((x_coord_matrix[indices, img2 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img2 - 1].reshape((-1, 1))))))
X_set.append(X1)
X, R, C = disambguate_camera_pose(C_set, R_set, X_set)
recon_bin = np.zeros((M.shape[0], 1))
X_3D = np.zeros((M.shape[0], 3))
Visibility = np.zeros((M.shape[0], n_images))
X = NonLinearTriangulation(K, np.float32(np.hstack((x_coord_matrix[indices, img1 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img1 - 1].reshape((-1, 1))))),
np.float32(np.hstack((x_coord_matrix[indices, img2 - 1].reshape((-1, 1)),
y_coord_matrix[indices, img2 - 1].reshape((-1, 1))))), X, np.eye(3),
np.zeros((3, 1)), R, C)
recon_bin[indices] = 1
X_3D[indices, :] = X
Visibility[indices, img1 - 1] = 1
Visibility[indices, img2 - 1] = 1
Cset = []
Rset = []
Cset.append(C)
Rset.append(R)
r_indx = [img1, img2]
for i in range(0, n_images):
if (np.isin(r_indx, i)[0]):
continue
opt = np.logical_and(recon_bin, M[:, i].reshape((-1, 1)))
indices, _ = np.where(opt == True)
if (len(indices) < 8):
continue
x = np.transpose([x_coord_matrix[indices, i], y_coord_matrix[indices, i]])
X = X_3D[indices, :]
C, R = PnPRANSAC(X, x, K)
C, R = NonLinearPnP(X, x, K, C, R)
Cset.append(C)
Rset.append(R)
r_indx.append(i)
Visibility[indices, i] = 1
for j in range(0, len(r_indx) - 1):
opt = np.logical_and(
np.logical_and(1 - recon_bin, M[:, r_indx[j]].reshape(
(-1, 1))), M[:, i].reshape((-1, 1)))
indices, _ = np.where(opt == True)
if (len(indices) < 8):
continue
x1 = np.hstack((x_coord_matrix[indices, r_indx[j]].reshape((-1, 1)),
y_coord_matrix[indices, r_indx[j]].reshape((-1, 1))))
x2 = np.hstack((x_coord_matrix[indices, i].reshape((-1, 1)),
y_coord_matrix[indices, i].reshape((-1, 1))))
X = LinearTriangulation(K, Cset[j], Rset[j], C, R, x1, x2)
X_3D[indices, :] = X
recon_bin[indices] = 1
Visibility[indices, r_indx[j]] = 1
Visibility[indices, j] = 1
for o in range(len(X_3D)):
if (X_3D[o, 2] < 0):
Visibility[o, :] = 0
recon_bin[o] = 0
V_bundle = visibikity_matrx(Visibility, r_indx)
point_indices, _ = np.where(recon_bin == 1)
camera_indices = i * np.ones((len(point_indices), 1))
points_2d = np.hstack((x_coord_matrix[point_indices, i].reshape((-1, 1)),
x_coord_matrix[point_indices, i].reshape((-1, 1))))
Rset, Cset, X_3D = bdl_adjst(Cset, Rset, X_3D, K, points_2d,
camera_indices, recon_bin,
V_bundle)
ind, _ = np.where(recon_bin == 1)
X_3D = X_3D[ind, :]
Color = Color[ind, :]
ax = plt.axes(projection='3d')
ax.scatter3D(
X_3D[:, 0], X_3D[:, 1], X_3D[:, 2], c=Color / 255.0,
s=1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
ax.set_xlim([-0.5, 1])
ax.set_ylim([-0.5, 1])
ax.set_zlim([0, 1.5])
plt.show()
plt.scatter(X_3D[:, 0], X_3D[:, 2], c=Color / 255.0, s=1)
camera_draw(C_set, R_set)
ax1 = plt.gca()
ax1.set_xlabel('x')
ax1.set_ylabel('z')
plt.show()
|
[
"numpy.isin",
"sys.path.remove",
"matplotlib.pyplot.show",
"argparse.ArgumentParser",
"numpy.logical_and",
"scripts.DrawCameras.camera_draw",
"matplotlib.pyplot.axes",
"matplotlib.pyplot.scatter",
"numpy.eye",
"numpy.zeros",
"numpy.transpose",
"numpy.identity",
"numpy.where",
"numpy.array",
"matplotlib.pyplot.gca",
"scripts.ExtractCameraPose.extract_cam_pose",
"scripts.EssentialMatrixFromFundamentalMatrix.e_from_fundamental"
] |
[((2049, 2144), 'numpy.array', 'np.array', (['[[568.996140852, 0, 643.21055941], [0, 568.988362396, 477.982801038], [0, 0, 1]\n ]'], {}), '([[568.996140852, 0, 643.21055941], [0, 568.988362396, \n 477.982801038], [0, 0, 1]])\n', (2057, 2144), True, 'import numpy as np\n'), ((2240, 2284), 'numpy.array', 'np.array', (['[[0, -1, 0], [1, 0, 0], [0, 0, 1]]'], {}), '([[0, -1, 0], [1, 0, 0], [0, 0, 1]])\n', (2248, 2284), True, 'import numpy as np\n'), ((2358, 2383), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (2381, 2383), False, 'import argparse\n'), ((2787, 2812), 'numpy.zeros', 'np.zeros', (['(M.shape[0], 1)'], {}), '((M.shape[0], 1))\n', (2795, 2812), True, 'import numpy as np\n'), ((2821, 2846), 'numpy.zeros', 'np.zeros', (['(M.shape[0], 3)'], {}), '((M.shape[0], 3))\n', (2829, 2846), True, 'import numpy as np\n'), ((2854, 2900), 'numpy.logical_and', 'np.logical_and', (['M[:, img1 - 1]', 'M[:, img2 - 1]'], {}), '(M[:, img1 - 1], M[:, img2 - 1])\n', (2868, 2900), True, 'import numpy as np\n'), ((3058, 3079), 'numpy.where', 'np.where', (['(opt == True)'], {}), '(opt == True)\n', (3066, 3079), True, 'import numpy as np\n'), ((3544, 3573), 'scripts.EssentialMatrixFromFundamentalMatrix.e_from_fundamental', 'e_from_fundamental', (['best_F', 'K'], {}), '(best_F, K)\n', (3562, 3573), False, 'from scripts.EssentialMatrixFromFundamentalMatrix import e_from_fundamental\n'), ((3589, 3611), 'scripts.ExtractCameraPose.extract_cam_pose', 'extract_cam_pose', (['E', 'K'], {}), '(E, K)\n', (3605, 3611), False, 'from scripts.ExtractCameraPose import extract_cam_pose\n'), ((4271, 4296), 'numpy.zeros', 'np.zeros', (['(M.shape[0], 1)'], {}), '((M.shape[0], 1))\n', (4279, 4296), True, 'import numpy as np\n'), ((4304, 4329), 'numpy.zeros', 'np.zeros', (['(M.shape[0], 3)'], {}), '((M.shape[0], 3))\n', (4312, 4329), True, 'import numpy as np\n'), ((4343, 4375), 'numpy.zeros', 'np.zeros', (['(M.shape[0], n_images)'], {}), '((M.shape[0], n_images))\n', (4351, 4375), True, 'import numpy as np\n'), ((6976, 7000), 'numpy.where', 'np.where', (['(recon_bin == 1)'], {}), '(recon_bin == 1)\n', (6984, 7000), True, 'import numpy as np\n'), ((7051, 7076), 'matplotlib.pyplot.axes', 'plt.axes', ([], {'projection': '"""3d"""'}), "(projection='3d')\n", (7059, 7076), True, 'import matplotlib.pyplot as plt\n'), ((7290, 7300), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7298, 7300), True, 'import matplotlib.pyplot as plt\n'), ((7302, 7359), 'matplotlib.pyplot.scatter', 'plt.scatter', (['X_3D[:, 0]', 'X_3D[:, 2]'], {'c': '(Color / 255.0)', 's': '(1)'}), '(X_3D[:, 0], X_3D[:, 2], c=Color / 255.0, s=1)\n', (7313, 7359), True, 'import matplotlib.pyplot as plt\n'), ((7361, 7386), 'scripts.DrawCameras.camera_draw', 'camera_draw', (['C_set', 'R_set'], {}), '(C_set, R_set)\n', (7372, 7386), False, 'from scripts.DrawCameras import camera_draw\n'), ((7394, 7403), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (7401, 7403), True, 'import matplotlib.pyplot as plt\n'), ((7447, 7457), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (7455, 7457), True, 'import matplotlib.pyplot as plt\n'), ((1284, 1347), 'sys.path.remove', 'sys.path.remove', (['"""/opt/ros/kinetic/lib/python2.7/dist-packages"""'], {}), "('/opt/ros/kinetic/lib/python2.7/dist-packages')\n", (1299, 1347), False, 'import sys\n'), ((4796, 4805), 'numpy.eye', 'np.eye', (['(3)'], {}), '(3)\n', (4802, 4805), True, 'import numpy as np\n'), ((4834, 4850), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (4842, 4850), True, 'import numpy as np\n'), ((5210, 5231), 'numpy.where', 'np.where', (['(opt == True)'], {}), '(opt == True)\n', (5218, 5231), True, 'import numpy as np\n'), ((5286, 5356), 'numpy.transpose', 'np.transpose', (['[x_coord_matrix[indices, i], y_coord_matrix[indices, i]]'], {}), '([x_coord_matrix[indices, i], y_coord_matrix[indices, i]])\n', (5298, 5356), True, 'import numpy as np\n'), ((6542, 6566), 'numpy.where', 'np.where', (['(recon_bin == 1)'], {}), '(recon_bin == 1)\n', (6550, 6566), True, 'import numpy as np\n'), ((2925, 2999), 'numpy.logical_and', 'np.logical_and', (['outlier_indices[:, img1 - 1]', 'outlier_indices[:, img2 - 1]'], {}), '(outlier_indices[:, img1 - 1], outlier_indices[:, img2 - 1])\n', (2939, 2999), True, 'import numpy as np\n'), ((3678, 3694), 'numpy.zeros', 'np.zeros', (['(3, 1)'], {}), '((3, 1))\n', (3686, 3694), True, 'import numpy as np\n'), ((3696, 3710), 'numpy.identity', 'np.identity', (['(3)'], {}), '(3)\n', (3707, 3710), True, 'import numpy as np\n'), ((5088, 5106), 'numpy.isin', 'np.isin', (['r_indx', 'i'], {}), '(r_indx, i)\n', (5095, 5106), True, 'import numpy as np\n'), ((5762, 5783), 'numpy.where', 'np.where', (['(opt == True)'], {}), '(opt == True)\n', (5770, 5783), True, 'import numpy as np\n')]
|
"""Function for extracting tiff tiles."""
import os
from abc import abstractmethod
from math import ceil
import struct
from cogdumper.errors import TIFFError
from cogdumper.jpegreader import insert_tables
from cogdumper.tifftags import compression as CompressionType
from cogdumper.tifftags import sizes as TIFFSizes
from cogdumper.tifftags import tags as TIFFTags
class AbstractReader: # pragma: no cover
@abstractmethod
def read(offset, len):
pass
class COGTiff:
"""
Cloud Optimised GeoTIFF
-----
Format
TIFF / BigTIFF signature
IFD (Image File Directory) of full resolution image
Values of TIFF tags that don't fit inline in the IFD directory, such as TileOffsets, TileByteCounts and GeoTIFF keys
Optional: IFD (Image File Directory) of first overview (typically subsampled by a factor of 2), followed by the values of its tags that don't fit inline
Optional: IFD (Image File Directory) of second overview (typically subsampled by a factor of 4), followed by the values of its tags that don't fit inline
...
Optional: IFD (Image File Directory) of last overview (typically subsampled by a factor of 2N), followed by the values of its tags that don't fit inline
Optional: tile content of last overview level
...
Optional: tile content of first overview level
Tile content of full resolution image.
"""
def __init__(self, reader):
"""Parses a (Big)TIFF for image tiles.
Parameters
----------
reader:
A reader that implements the cogdumper.cog_tiles.AbstractReader methods
"""
self._endian = '<'
self._version = 42
self.read = reader
self._big_tiff = False
self.header = ''
self._offset = 0
self._image_ifds = []
self._mask_ifds = []
self.read_header()
def _ifds(self):
"""Reads TIFF image file directories from a COG recursively.
Parameters
-----------
offset:
number, offset into the tiff stream to read from, this is only
required for the first image file directory
overview:
number, an identifier that is the overview level in the COG
image pyramid
Yield
--------
dict: Image File Directory for the next IFD
"""
while self._offset != 0:
next_offset = 0
pos = 0
tags = []
fallback_size = 4096 if self._big_tiff else 1024
if self._offset > len(self.header):
byte_starts = len(self.header)
byte_ends = byte_starts + self._offset + fallback_size
self.header += self.read(byte_starts, byte_ends)
if self._big_tiff:
bytes = self.header[self._offset: self._offset + 8]
num_tags = struct.unpack(f'{self._endian}Q', bytes)[0]
byte_starts = self._offset + 8
byte_ends = (num_tags * 20) + 8 + byte_starts
if byte_ends > len(self.header):
s = len(self.header)
self.header += self.read(s, byte_ends)
bytes = self.header[byte_starts: byte_ends]
for t in range(0, num_tags):
code = struct.unpack(
f'{self._endian}H',
bytes[pos: pos + 2]
)[0]
if code in TIFFTags:
dtype = struct.unpack(
f'{self._endian}H',
bytes[pos + 2: pos + 4]
)[0]
if dtype not in TIFFSizes: # pragma: no cover
raise TIFFError(f'Unrecognised data type {dtype}')
num_values = struct.unpack(
f'{self._endian}Q',
bytes[pos + 4: pos + 12]
)[0]
tag_len = num_values * TIFFSizes[dtype]['size']
if tag_len <= 8:
data = bytes[pos + 12: pos + 12 + tag_len]
else: # pragma: no cover
data_offset = struct.unpack(
f'{self._endian}Q',
bytes[pos + 12: pos + 20]
)[0]
byte_starts = data_offset
byte_ends = byte_starts + tag_len
if byte_ends > len(self.header):
s = len(self.header)
self.header += self.read(s, byte_ends)
data = self.header[byte_starts: byte_ends]
tags.append(
{
'code': code,
'dtype': TIFFSizes[dtype],
'num_values': num_values,
'data': data
}
)
pos = pos + 20
self._offset = self._offset + 8 + pos
next_offset = struct.unpack(
f'{self._endian}Q',
self.header[self._offset: self._offset + 8]
)[0]
else:
bytes = self.header[self._offset: self._offset + 2]
num_tags = struct.unpack(f'{self._endian}H', bytes)[0]
byte_starts = self._offset + 2
byte_ends = (num_tags * 12) + 2 + byte_starts
if byte_ends > len(self.header):
s = len(self.header)
self.header += self.read(s, byte_ends)
bytes = self.header[byte_starts: byte_ends]
for t in range(0, num_tags):
code = struct.unpack(
f'{self._endian}H',
bytes[pos: pos + 2]
)[0]
if code in TIFFTags:
dtype = struct.unpack(
f'{self._endian}H',
bytes[pos + 2: pos + 4]
)[0]
if dtype not in TIFFSizes: # pragma: no cover
raise TIFFError(f'Unrecognised data type {dtype}')
num_values = struct.unpack(
f'{self._endian}L',
bytes[pos + 4: pos + 8]
)[0]
tag_len = num_values * TIFFSizes[dtype]['size']
if tag_len <= 4:
data = bytes[pos + 8: pos + 8 + tag_len]
else:
data_offset = struct.unpack(
f'{self._endian}L',
bytes[pos + 8: pos + 12]
)[0]
byte_starts = data_offset
byte_ends = byte_starts + tag_len
if byte_ends > len(self.header):
s = len(self.header)
self.header += self.read(s, byte_ends)
data = self.header[byte_starts: byte_ends]
tags.append(
{
'code': code,
'dtype': TIFFSizes[dtype],
'num_values': num_values,
'data': data
}
)
pos = pos + 12
self._offset = self._offset + 2 + pos
next_offset = struct.unpack(
f'{self._endian}L',
self.header[self._offset: self._offset + 4]
)[0]
self._offset = next_offset
yield {
'tags': tags,
'next_offset': next_offset
}
def read_header(self):
"""Read and parse COG header."""
buff_size = int(os.environ.get('COG_INGESTED_BYTES_AT_OPEN', '16384'))
self.header = self.read(0, buff_size)
# read first 4 bytes to determine tiff or bigtiff and byte order
if self.header[:2] == b'MM':
self._endian = '>'
self._version = struct.unpack(f'{self._endian}H', self.header[2:4])[0]
if self._version == 42:
# TIFF
self._big_tiff = False
# read offset to first IFD
self._offset = struct.unpack(f'{self._endian}L', self.header[4:8])[0]
elif self._version == 43:
# BIGTIFF
self._big_tiff = True
bytes = self.header[4:16]
bytesize = struct.unpack(f'{self._endian}H', bytes[0:2])[0]
w = struct.unpack(f'{self._endian}H', bytes[2:4])[0]
self._offset = struct.unpack(f'{self._endian}Q', bytes[4:])[0]
if bytesize != 8 or w != 0: # pragma: no cover
raise TIFFError(f"Invalid BigTIFF with bytesize {bytesize} and word {w}")
else: # pragma: no cover
raise TIFFError(f"Invalid version {self._version} for TIFF file")
self._init = True
# for JPEG we need to read all IFDs, they are at the front of the file
for ifd in self._ifds():
mime_type = 'image/jpeg'
# tile offsets are an extension but if they aren't in the file then
# you can't get a tile back!
offsets = []
byte_counts = []
image_width = 0
image_height = 0
tile_width = 0
tile_height = 0
jpeg_tables = None
for t in ifd['tags']:
code = t['code']
fmt = t['dtype']['format']
if code == 256:
# image width
image_width = struct.unpack(
f'{self._endian}{fmt}',
t['data']
)[0]
elif code == 257:
# image height
image_height = struct.unpack(
f'{self._endian}{fmt}',
t['data']
)[0]
elif code == 259:
# compression
val = struct.unpack(
f'{self._endian}{fmt}',
t['data']
)[0]
if val in CompressionType:
mime_type = CompressionType[val]
else:
mime_type = 'application/octet-stream'
elif code == 322:
# tile width
tile_width = struct.unpack(
f'{self._endian}{fmt}',
t['data']
)[0]
elif code == 323:
# tile height
tile_height = struct.unpack(
f'{self._endian}{fmt}',
t['data']
)[0]
elif code == 324:
# tile offsets
offsets = struct.unpack(
f'{self._endian}{t["num_values"]}{fmt}',
t['data']
)
elif code == 325:
# tile byte counts
byte_counts = struct.unpack(
f'{self._endian}{t["num_values"]}{fmt}',
t['data']
)
elif code == 347:
# JPEG Tables
jpeg_tables = t['data']
if len(offsets) == 0:
raise TIFFError('TIFF Tiles are not found in IFD {z}')
ifd['image_width'] = image_width
ifd['image_height'] = image_height
ifd['compression'] = mime_type
ifd['tile_width'] = tile_width
ifd['tile_height'] = tile_height
ifd['offsets'] = offsets
ifd['byte_counts'] = byte_counts
ifd['jpeg_tables'] = jpeg_tables
ifd['nx_tiles'] = ceil(image_width / float(tile_width))
ifd['ny_tiles'] = ceil(image_height / float(tile_height))
if (ifd['compression'] == 'deflate'):
self._mask_ifds.append(ifd)
else:
self._image_ifds.append(ifd)
if len(self._image_ifds) == 0 and len(self._mask_ifds) > 0: # pragma: no cover
self._image_ifds = self._mask_ifds
self._mask_ifds = []
def get_tile(self, x, y, z):
"""Read tile data."""
if z < len(self._image_ifds):
image_ifd = self._image_ifds[z]
idx = (y * image_ifd['ny_tiles']) + x
if idx > len(image_ifd['offsets']):
raise TIFFError(f'Tile {x} {y} {z} does not exist')
else:
offset = image_ifd['offsets'][idx]
byte_count = image_ifd['byte_counts'][idx]
tile = self.read(offset, byte_count)
if image_ifd['compression'] == 'image/jpeg':
# fix up jpeg tile with missing quantization tables
tile = insert_tables(tile, image_ifd['jpeg_tables'])
# look for a bit mask file
if z < len(self._mask_ifds):
mask_ifd = self._mask_ifds[z]
mask_offset = mask_ifd['offsets'][idx]
mask_byte_count = mask_ifd['byte_counts'][idx]
mask_tile = self.read(
mask_offset,
mask_byte_count
)
tile = tile + mask_tile
return image_ifd['compression'], tile
else:
return image_ifd['compression'], tile
else:
raise TIFFError(f'Overview {z} is out of bounds.')
@property
def version(self):
return self._version
|
[
"os.environ.get",
"cogdumper.jpegreader.insert_tables",
"cogdumper.errors.TIFFError",
"struct.unpack"
] |
[((8290, 8343), 'os.environ.get', 'os.environ.get', (['"""COG_INGESTED_BYTES_AT_OPEN"""', '"""16384"""'], {}), "('COG_INGESTED_BYTES_AT_OPEN', '16384')\n", (8304, 8343), False, 'import os\n'), ((8558, 8609), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'self.header[2:4]'], {}), "(f'{self._endian}H', self.header[2:4])\n", (8571, 8609), False, 'import struct\n'), ((14208, 14252), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Overview {z} is out of bounds."""'], {}), "(f'Overview {z} is out of bounds.')\n", (14217, 14252), False, 'from cogdumper.errors import TIFFError\n'), ((8766, 8817), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}L"""', 'self.header[4:8]'], {}), "(f'{self._endian}L', self.header[4:8])\n", (8779, 8817), False, 'import struct\n'), ((9363, 9422), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Invalid version {self._version} for TIFF file"""'], {}), "(f'Invalid version {self._version} for TIFF file')\n", (9372, 9422), False, 'from cogdumper.errors import TIFFError\n'), ((11979, 12027), 'cogdumper.errors.TIFFError', 'TIFFError', (['"""TIFF Tiles are not found in IFD {z}"""'], {}), "('TIFF Tiles are not found in IFD {z}')\n", (11988, 12027), False, 'from cogdumper.errors import TIFFError\n'), ((13111, 13156), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Tile {x} {y} {z} does not exist"""'], {}), "(f'Tile {x} {y} {z} does not exist')\n", (13120, 13156), False, 'from cogdumper.errors import TIFFError\n'), ((2925, 2965), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}Q"""', 'bytes'], {}), "(f'{self._endian}Q', bytes)\n", (2938, 2965), False, 'import struct\n'), ((5303, 5380), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}Q"""', 'self.header[self._offset:self._offset + 8]'], {}), "(f'{self._endian}Q', self.header[self._offset:self._offset + 8])\n", (5316, 5380), False, 'import struct\n'), ((5556, 5596), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes'], {}), "(f'{self._endian}H', bytes)\n", (5569, 5596), False, 'import struct\n'), ((7909, 7986), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}L"""', 'self.header[self._offset:self._offset + 4]'], {}), "(f'{self._endian}L', self.header[self._offset:self._offset + 4])\n", (7922, 7986), False, 'import struct\n'), ((8972, 9017), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[0:2]'], {}), "(f'{self._endian}H', bytes[0:2])\n", (8985, 9017), False, 'import struct\n'), ((9037, 9082), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[2:4]'], {}), "(f'{self._endian}H', bytes[2:4])\n", (9050, 9082), False, 'import struct\n'), ((9113, 9157), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}Q"""', 'bytes[4:]'], {}), "(f'{self._endian}Q', bytes[4:])\n", (9126, 9157), False, 'import struct\n'), ((9243, 9310), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Invalid BigTIFF with bytesize {bytesize} and word {w}"""'], {}), "(f'Invalid BigTIFF with bytesize {bytesize} and word {w}')\n", (9252, 9310), False, 'from cogdumper.errors import TIFFError\n'), ((13498, 13543), 'cogdumper.jpegreader.insert_tables', 'insert_tables', (['tile', "image_ifd['jpeg_tables']"], {}), "(tile, image_ifd['jpeg_tables'])\n", (13511, 13543), False, 'from cogdumper.jpegreader import insert_tables\n'), ((3362, 3415), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[pos:pos + 2]'], {}), "(f'{self._endian}H', bytes[pos:pos + 2])\n", (3375, 3415), False, 'import struct\n'), ((5993, 6046), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[pos:pos + 2]'], {}), "(f'{self._endian}H', bytes[pos:pos + 2])\n", (6006, 6046), False, 'import struct\n'), ((10129, 10177), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{fmt}"""', "t['data']"], {}), "(f'{self._endian}{fmt}', t['data'])\n", (10142, 10177), False, 'import struct\n'), ((3564, 3621), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[pos + 2:pos + 4]'], {}), "(f'{self._endian}H', bytes[pos + 2:pos + 4])\n", (3577, 3621), False, 'import struct\n'), ((3814, 3858), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Unrecognised data type {dtype}"""'], {}), "(f'Unrecognised data type {dtype}')\n", (3823, 3858), False, 'from cogdumper.errors import TIFFError\n'), ((3897, 3955), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}Q"""', 'bytes[pos + 4:pos + 12]'], {}), "(f'{self._endian}Q', bytes[pos + 4:pos + 12])\n", (3910, 3955), False, 'import struct\n'), ((6195, 6252), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}H"""', 'bytes[pos + 2:pos + 4]'], {}), "(f'{self._endian}H', bytes[pos + 2:pos + 4])\n", (6208, 6252), False, 'import struct\n'), ((6445, 6489), 'cogdumper.errors.TIFFError', 'TIFFError', (['f"""Unrecognised data type {dtype}"""'], {}), "(f'Unrecognised data type {dtype}')\n", (6454, 6489), False, 'from cogdumper.errors import TIFFError\n'), ((6528, 6585), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}L"""', 'bytes[pos + 4:pos + 8]'], {}), "(f'{self._endian}L', bytes[pos + 4:pos + 8])\n", (6541, 6585), False, 'import struct\n'), ((10355, 10403), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{fmt}"""', "t['data']"], {}), "(f'{self._endian}{fmt}', t['data'])\n", (10368, 10403), False, 'import struct\n'), ((4318, 4377), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}Q"""', 'bytes[pos + 12:pos + 20]'], {}), "(f'{self._endian}Q', bytes[pos + 12:pos + 20])\n", (4331, 4377), False, 'import struct\n'), ((6926, 6984), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}L"""', 'bytes[pos + 8:pos + 12]'], {}), "(f'{self._endian}L', bytes[pos + 8:pos + 12])\n", (6939, 6984), False, 'import struct\n'), ((10571, 10619), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{fmt}"""', "t['data']"], {}), "(f'{self._endian}{fmt}', t['data'])\n", (10584, 10619), False, 'import struct\n'), ((10986, 11034), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{fmt}"""', "t['data']"], {}), "(f'{self._endian}{fmt}', t['data'])\n", (10999, 11034), False, 'import struct\n'), ((11210, 11258), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{fmt}"""', "t['data']"], {}), "(f'{self._endian}{fmt}', t['data'])\n", (11223, 11258), False, 'import struct\n'), ((11431, 11496), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{t[\'num_values\']}{fmt}"""', "t['data']"], {}), '(f"{self._endian}{t[\'num_values\']}{fmt}", t[\'data\'])\n', (11444, 11496), False, 'import struct\n'), ((11674, 11739), 'struct.unpack', 'struct.unpack', (['f"""{self._endian}{t[\'num_values\']}{fmt}"""', "t['data']"], {}), '(f"{self._endian}{t[\'num_values\']}{fmt}", t[\'data\'])\n', (11687, 11739), False, 'import struct\n')]
|
import errno
import gc
import mmap
import os
import time
pjoin = os.path.join
import pytest
from snakeoil import _fileutils, currying, fileutils
from snakeoil.fileutils import AtomicWriteFile, write_file
from snakeoil.test.fixtures import RandomPath, TempDir
class TestTouch(RandomPath):
def test_file_creation(self):
orig_um = os.umask(0o000)
try:
fileutils.touch(self.path)
finally:
exiting_umask = os.umask(orig_um)
assert exiting_umask == 0o000
assert os.path.exists(self.path)
assert os.stat(self.path).st_mode & 0o4777 == 0o644
def test_set_times(self):
fileutils.touch(self.path)
orig_stat = os.stat(self.path)
time.sleep(1)
fileutils.touch(self.path)
new_stat = os.stat(self.path)
assert orig_stat.st_atime != new_stat.st_atime
assert orig_stat.st_mtime != new_stat.st_mtime
def test_set_custom_times(self):
fileutils.touch(self.path)
orig_stat = os.stat(self.path)
times = (1, 1)
fileutils.touch(self.path, times=times)
new_stat = os.stat(self.path)
assert orig_stat != new_stat
assert 1 == new_stat.st_atime
assert 1 == new_stat.st_mtime
def test_set_custom_nstimes(self):
fileutils.touch(self.path)
orig_stat = os.stat(self.path)
ns = (1, 1)
fileutils.touch(self.path, ns=ns)
new_stat = os.stat(self.path)
# system doesn't have nanosecond precision, try microseconds
if new_stat.st_atime == 0:
ns = (1000, 1000)
fileutils.touch(self.path, ns=ns)
new_stat = os.stat(self.path)
assert orig_stat != new_stat
assert ns[0] == new_stat.st_atime_ns
assert ns[0] == new_stat.st_mtime_ns
class TestAtomicWriteFile(TempDir):
kls = AtomicWriteFile
def test_normal_ops(self):
fp = pjoin(self.dir, "target")
write_file(fp, "w", "me")
af = self.kls(fp)
af.write("dar")
assert fileutils.readfile_ascii(fp) == "me"
af.close()
assert fileutils.readfile_ascii(fp) == "dar"
def test_perms(self):
fp = pjoin(self.dir, 'target')
orig_um = os.umask(0o777)
try:
af = self.kls(fp, perms=0o644)
af.write("dar")
af.close()
finally:
exiting_umask = os.umask(orig_um)
assert exiting_umask == 0o777
assert os.stat(fp).st_mode & 0o4777 == 0o644
def test_del(self):
fp = pjoin(self.dir, "target")
write_file(fp, "w", "me")
assert fileutils.readfile_ascii(fp) == "me"
af = self.kls(fp)
af.write("dar")
del af
gc.collect()
assert fileutils.readfile_ascii(fp) == "me"
assert len(os.listdir(self.dir)) == 1
def test_close(self):
# verify that we handle multiple closes; no exception is good.
af = self.kls(pjoin(self.dir, "target"))
af.close()
af.close()
def test_discard(self):
fp = pjoin(self.dir, "target")
write_file(fp, "w", "me")
assert fileutils.readfile_ascii(fp) == "me"
af = self.kls(fp)
af.write("dar")
af.discard()
assert not os.path.exists(af._temp_fp)
af.close()
assert fileutils.readfile_ascii(fp) == "me"
# finally validate that it handles multiple discards properly.
af = self.kls(fp)
af.write("dar")
af.discard()
af.discard()
af.close()
def cpy_setup_class(scope, func_name):
if getattr(fileutils, 'native_%s' % func_name) \
is getattr(fileutils, func_name):
scope['skip'] = 'extensions disabled'
else:
scope['func'] = staticmethod(getattr(fileutils, func_name))
class Test_readfile(TempDir):
func = staticmethod(fileutils.readfile)
test_cases = ['asdf\nfdasswer\1923', '', '987234']
default_encoding = 'ascii'
none_on_missing_ret_data = 'dar'
@staticmethod
def convert_data(data, encoding):
if isinstance(data, bytes):
return data
if encoding:
return data.encode(encoding)
return data
def test_it(self):
fp = pjoin(self.dir, 'testfile')
for expected in self.test_cases:
raised = None
encoding = self.default_encoding
if isinstance(expected, tuple):
if len(expected) == 3:
raised = expected[2]
if expected[1] is not None:
encoding = expected[1]
expected = expected[0]
write_file(fp, 'wb', self.convert_data(expected, encoding))
if raised:
with pytest.raises(raised):
self.assertFunc(fp, expected)
else:
self.assertFunc(fp, expected)
def assertFunc(self, path, expected):
assert self.func(path) == expected
def test_none_on_missing(self):
fp = pjoin(self.dir, 'nonexistent')
with pytest.raises(FileNotFoundError):
self.func(fp)
assert self.func(fp, True) == None
write_file(fp, 'wb', self.convert_data('dar', 'ascii'))
assert self.func(fp, True) == self.none_on_missing_ret_data
# ensure it handles paths that go through files-
# still should be suppress
assert self.func(pjoin(fp, 'extra'), True) == None
class Test_readfile_ascii(Test_readfile):
func = staticmethod(fileutils.readfile_ascii)
class Test_readfile_utf8(Test_readfile):
func = staticmethod(fileutils.readfile_utf8)
default_encoding = 'utf8'
class Test_readfile_bytes(Test_readfile):
func = staticmethod(fileutils.readfile_bytes)
default_encoding = None
test_cases = list(map(
currying.post_curry(Test_readfile.convert_data, 'ascii'),
Test_readfile.test_cases))
test_cases.append('\ua000fa'.encode("utf8"))
none_on_missing_ret_data = Test_readfile.convert_data(
Test_readfile.none_on_missing_ret_data, 'ascii')
class readlines_mixin(TempDir):
def assertFunc(self, path, expected):
expected = tuple(expected.split())
if expected == ('',):
expected = ()
if 'utf8' not in self.encoding_mode:
assert tuple(self.func(path)) == expected
return
data = tuple(self.func(path))
assert data == expected
def test_none_on_missing(self):
fp = pjoin(self.dir, 'nonexistent')
with pytest.raises(FileNotFoundError):
self.func(fp)
assert tuple(self.func(fp, False, True)) == ()
write_file(fp, 'wb', self.convert_data('dar', 'ascii'))
assert tuple(self.func(fp, True)) == (self.none_on_missing_ret_data,)
assert tuple(self.func(pjoin(fp, 'missing'), False, True)) == ()
def test_strip_whitespace(self):
fp = pjoin(self.dir, 'data')
write_file(fp, 'wb', self.convert_data(' dar1 \ndar2 \n dar3\n',
'ascii'))
results = tuple(self.func(fp, True))
expected = ('dar1', 'dar2', 'dar3')
if self.encoding_mode == 'bytes':
expected = tuple(x.encode("ascii") for x in expected)
assert results == expected
# this time without the trailing newline...
write_file(fp, 'wb', self.convert_data(' dar1 \ndar2 \n dar3',
'ascii'))
results = tuple(self.func(fp, True))
assert results == expected
# test a couple of edgecases; underly c extension has gotten these
# wrong before.
write_file(fp, 'wb', self.convert_data('0', 'ascii'))
results = tuple(self.func(fp, True))
expected = ('0',)
if self.encoding_mode == 'bytes':
expected = tuple(x.encode("ascii") for x in expected)
assert results == expected
write_file(fp, 'wb', self.convert_data('0\n', 'ascii'))
results = tuple(self.func(fp, True))
expected = ('0',)
if self.encoding_mode == 'bytes':
expected = tuple(x.encode("ascii") for x in expected)
assert results == expected
write_file(fp, 'wb', self.convert_data('0 ', 'ascii'))
results = tuple(self.func(fp, True))
expected = ('0',)
if self.encoding_mode == 'bytes':
expected = tuple(x.encode("ascii") for x in expected)
assert results == expected
def mk_readlines_test(scope, mode):
func_name = 'readlines_%s' % mode
base = globals()['Test_readfile_%s' % mode]
class kls(readlines_mixin, base):
func = staticmethod(getattr(fileutils, func_name))
encoding_mode = mode
kls.__name__ = "Test_%s" % func_name
scope["Test_%s" % func_name] = kls
for case in ("ascii", "bytes", "utf8"):
name = 'readlines_%s' % case
mk_readlines_test(locals(), case)
class TestBrokenStats:
test_cases = ['/proc/crypto', '/sys/devices/system/cpu/present']
def test_readfile(self):
for path in self.test_cases:
self._check_path(path, fileutils.readfile)
def test_readlines(self):
for path in self.test_cases:
self._check_path(path, fileutils.readlines, True)
def _check_path(self, path, func, split_it=False):
try:
with open(path, 'r') as handle:
data = handle.read()
except EnvironmentError as e:
if e.errno not in (errno.ENOENT, errno.EPERM):
raise
return
func_data = func(path)
if split_it:
func_data = list(func_data)
data = [x for x in data.split('\n') if x]
func_data = [x for x in func_data if x]
assert func_data == data
class Test_mmap_or_open_for_read(TempDir):
func = staticmethod(fileutils.mmap_or_open_for_read)
def test_zero_length(self):
path = pjoin(self.dir, 'target')
write_file(path, 'w', '')
m, f = self.func(path)
assert m is None
assert f.read() == b''
f.close()
def test_mmap(self, data=b'foonani'):
path = pjoin(self.dir, 'target')
write_file(path, 'wb', data)
m, f = self.func(path)
assert len(m) == len(data)
assert m.read(len(data)) == data
m.close()
assert f is None
class Test_mmap_and_close(TempDir):
def test_it(self):
path = pjoin(self.dir, 'target')
data = b'asdfasdf'
write_file(path, 'wb', [data])
fd, m = None, None
try:
fd = os.open(path, os.O_RDONLY)
m = _fileutils.mmap_and_close(
fd, len(data), mmap.MAP_PRIVATE, mmap.PROT_READ)
# and ensure it closed the fd...
with pytest.raises(EnvironmentError):
os.read(fd, 1)
fd = None
assert len(m) == len(data)
assert m.read(len(data)) == data
finally:
if m is not None:
m.close()
if fd is not None:
os.close(fd)
|
[
"os.read",
"os.open",
"os.stat",
"os.path.exists",
"time.sleep",
"os.umask",
"gc.collect",
"snakeoil.fileutils.touch",
"snakeoil.fileutils.write_file",
"snakeoil.fileutils.readfile_ascii",
"pytest.raises",
"snakeoil.currying.post_curry",
"os.close",
"os.listdir"
] |
[((345, 356), 'os.umask', 'os.umask', (['(0)'], {}), '(0)\n', (353, 356), False, 'import os\n'), ((529, 554), 'os.path.exists', 'os.path.exists', (['self.path'], {}), '(self.path)\n', (543, 554), False, 'import os\n'), ((654, 680), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {}), '(self.path)\n', (669, 680), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((701, 719), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (708, 719), False, 'import os\n'), ((728, 741), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (738, 741), False, 'import time\n'), ((750, 776), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {}), '(self.path)\n', (765, 776), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((796, 814), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (803, 814), False, 'import os\n'), ((971, 997), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {}), '(self.path)\n', (986, 997), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((1018, 1036), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (1025, 1036), False, 'import os\n'), ((1068, 1107), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {'times': 'times'}), '(self.path, times=times)\n', (1083, 1107), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((1127, 1145), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (1134, 1145), False, 'import os\n'), ((1307, 1333), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {}), '(self.path)\n', (1322, 1333), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((1354, 1372), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (1361, 1372), False, 'import os\n'), ((1401, 1434), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {'ns': 'ns'}), '(self.path, ns=ns)\n', (1416, 1434), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((1454, 1472), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (1461, 1472), False, 'import os\n'), ((1968, 1993), 'snakeoil.fileutils.write_file', 'write_file', (['fp', '"""w"""', '"""me"""'], {}), "(fp, 'w', 'me')\n", (1978, 1993), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((2252, 2265), 'os.umask', 'os.umask', (['(511)'], {}), '(511)\n', (2260, 2265), False, 'import os\n'), ((2601, 2626), 'snakeoil.fileutils.write_file', 'write_file', (['fp', '"""w"""', '"""me"""'], {}), "(fp, 'w', 'me')\n", (2611, 2626), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((2752, 2764), 'gc.collect', 'gc.collect', ([], {}), '()\n', (2762, 2764), False, 'import gc\n'), ((3124, 3149), 'snakeoil.fileutils.write_file', 'write_file', (['fp', '"""w"""', '"""me"""'], {}), "(fp, 'w', 'me')\n", (3134, 3149), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((10039, 10064), 'snakeoil.fileutils.write_file', 'write_file', (['path', '"""w"""', '""""""'], {}), "(path, 'w', '')\n", (10049, 10064), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((10262, 10290), 'snakeoil.fileutils.write_file', 'write_file', (['path', '"""wb"""', 'data'], {}), "(path, 'wb', data)\n", (10272, 10290), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((10579, 10609), 'snakeoil.fileutils.write_file', 'write_file', (['path', '"""wb"""', '[data]'], {}), "(path, 'wb', [data])\n", (10589, 10609), False, 'from snakeoil.fileutils import AtomicWriteFile, write_file\n'), ((386, 412), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {}), '(self.path)\n', (401, 412), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((458, 475), 'os.umask', 'os.umask', (['orig_um'], {}), '(orig_um)\n', (466, 475), False, 'import os\n'), ((1620, 1653), 'snakeoil.fileutils.touch', 'fileutils.touch', (['self.path'], {'ns': 'ns'}), '(self.path, ns=ns)\n', (1635, 1653), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((1677, 1695), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (1684, 1695), False, 'import os\n'), ((2059, 2087), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (2083, 2087), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((2130, 2158), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (2154, 2158), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((2420, 2437), 'os.umask', 'os.umask', (['orig_um'], {}), '(orig_um)\n', (2428, 2437), False, 'import os\n'), ((2642, 2670), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (2666, 2670), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((2780, 2808), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (2804, 2808), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((3165, 3193), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (3189, 3193), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((3292, 3319), 'os.path.exists', 'os.path.exists', (['af._temp_fp'], {}), '(af._temp_fp)\n', (3306, 3319), False, 'import os\n'), ((3354, 3382), 'snakeoil.fileutils.readfile_ascii', 'fileutils.readfile_ascii', (['fp'], {}), '(fp)\n', (3378, 3382), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((5093, 5125), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (5106, 5125), False, 'import pytest\n'), ((5853, 5909), 'snakeoil.currying.post_curry', 'currying.post_curry', (['Test_readfile.convert_data', '"""ascii"""'], {}), "(Test_readfile.convert_data, 'ascii')\n", (5872, 5909), False, 'from snakeoil import _fileutils, currying, fileutils\n'), ((6570, 6602), 'pytest.raises', 'pytest.raises', (['FileNotFoundError'], {}), '(FileNotFoundError)\n', (6583, 6602), False, 'import pytest\n'), ((10667, 10693), 'os.open', 'os.open', (['path', 'os.O_RDONLY'], {}), '(path, os.O_RDONLY)\n', (10674, 10693), False, 'import os\n'), ((2836, 2856), 'os.listdir', 'os.listdir', (['self.dir'], {}), '(self.dir)\n', (2846, 2856), False, 'import os\n'), ((10864, 10895), 'pytest.raises', 'pytest.raises', (['EnvironmentError'], {}), '(EnvironmentError)\n', (10877, 10895), False, 'import pytest\n'), ((10913, 10927), 'os.read', 'os.read', (['fd', '(1)'], {}), '(fd, 1)\n', (10920, 10927), False, 'import os\n'), ((11154, 11166), 'os.close', 'os.close', (['fd'], {}), '(fd)\n', (11162, 11166), False, 'import os\n'), ((570, 588), 'os.stat', 'os.stat', (['self.path'], {}), '(self.path)\n', (577, 588), False, 'import os\n'), ((2491, 2502), 'os.stat', 'os.stat', (['fp'], {}), '(fp)\n', (2498, 2502), False, 'import os\n'), ((4776, 4797), 'pytest.raises', 'pytest.raises', (['raised'], {}), '(raised)\n', (4789, 4797), False, 'import pytest\n')]
|
import numpy as _np
from openpnm.utils import Docorator
__all__ = ["pore_coords"]
docstr = Docorator()
@docstr.dedent
def pore_coords(target):
r"""
Calculate throat centroid values by averaging adjacent pore coordinates
Parameters
----------
%(models.target.parameters)s
Returns
-------
values : ndarray
A numpy ndarray containing throat centroid values
"""
network = target.project.network
Ts = network.throats(target.name)
conns = network['throat.conns']
coords = network['pore.coords']
return _np.mean(coords[conns], axis=1)[Ts]
|
[
"numpy.mean",
"openpnm.utils.Docorator"
] |
[((93, 104), 'openpnm.utils.Docorator', 'Docorator', ([], {}), '()\n', (102, 104), False, 'from openpnm.utils import Docorator\n'), ((567, 598), 'numpy.mean', '_np.mean', (['coords[conns]'], {'axis': '(1)'}), '(coords[conns], axis=1)\n', (575, 598), True, 'import numpy as _np\n')]
|
from typing import List, Dict
import pytrec_eval
def get_metric(qrels: str, run: str, metric: str = 'map') -> float:
# Read the qrel file
with open(qrels, 'r') as f_qrel:
qrel_dict = pytrec_eval.parse_qrel(f_qrel)
# Read the run file
with open(run, 'r') as f_run:
run_dict = pytrec_eval.parse_run(f_run)
# Evaluate
evaluator = pytrec_eval.RelevanceEvaluator(qrel_dict, pytrec_eval.supported_measures)
results = evaluator.evaluate(run_dict)
mes = {}
for _, query_measures in sorted(results.items()):
for measure, value in sorted(query_measures.items()):
mes[measure] = pytrec_eval.compute_aggregated_measure(measure,
[query_measures[measure]
for query_measures in results.values()])
return mes[metric]
def get_mrr(qrels: str, trec: str, metric: str = 'mrr_cut_10') -> float:
k = int(metric.split('_')[-1])
qrel = {}
with open(qrels, 'r') as f_qrel:
for line in f_qrel:
qid, _, did, label = line.strip().split()
if qid not in qrel:
qrel[qid] = {}
qrel[qid][did] = int(label)
run = {}
with open(trec, 'r') as f_run:
for line in f_run:
qid, _, did, _, _, _ = line.strip().split()
if qid not in run:
run[qid] = []
run[qid].append(did)
mrr = 0.0
for qid in run:
rr = 0.0
for i, did in enumerate(run[qid][:k]):
if qid in qrel and did in qrel[qid] and qrel[qid][did] > 0:
rr = 1 / (i + 1)
break
mrr += rr
mrr /= len(run)
return mrr
|
[
"pytrec_eval.parse_run",
"pytrec_eval.parse_qrel",
"pytrec_eval.RelevanceEvaluator"
] |
[((373, 446), 'pytrec_eval.RelevanceEvaluator', 'pytrec_eval.RelevanceEvaluator', (['qrel_dict', 'pytrec_eval.supported_measures'], {}), '(qrel_dict, pytrec_eval.supported_measures)\n', (403, 446), False, 'import pytrec_eval\n'), ((203, 233), 'pytrec_eval.parse_qrel', 'pytrec_eval.parse_qrel', (['f_qrel'], {}), '(f_qrel)\n', (225, 233), False, 'import pytrec_eval\n'), ((312, 340), 'pytrec_eval.parse_run', 'pytrec_eval.parse_run', (['f_run'], {}), '(f_run)\n', (333, 340), False, 'import pytrec_eval\n')]
|
from django.db import migrations
def remove_ct_from_source_locations(apps, schema_editor):
ConcordanceIdentifier = apps.get_model("core", "ConcordanceIdentifier")
ConcordanceIdentifier.source_locations.through.objects.filter(
concordanceidentifier__authority__in=("ct_covidvaccinefinder_gov", "ct_gov")
).delete()
ConcordanceIdentifier.objects.filter(
authority__in=("ct_covidvaccinefinder_gov", "ct_gov")
).delete()
class Migration(migrations.Migration):
dependencies = [
("core", "0145_add_and_backfill_is_pending_review"),
]
operations = [
migrations.RunPython(
remove_ct_from_source_locations,
reverse_code=lambda apps, schema_editor: None,
),
]
|
[
"django.db.migrations.RunPython"
] |
[((613, 717), 'django.db.migrations.RunPython', 'migrations.RunPython', (['remove_ct_from_source_locations'], {'reverse_code': '(lambda apps, schema_editor: None)'}), '(remove_ct_from_source_locations, reverse_code=lambda\n apps, schema_editor: None)\n', (633, 717), False, 'from django.db import migrations\n')]
|
import numpy as np
from ldpc import bposd_decoder
from panqec.codes import StabilizerCode
from panqec.error_models import BaseErrorModel
from panqec.decoders import BaseDecoder
class BeliefPropagationOSDDecoder(BaseDecoder):
label = 'BP-OSD decoder'
def __init__(self,
code: StabilizerCode,
error_model: BaseErrorModel,
error_rate: float,
max_bp_iter: int = 1000,
channel_update: bool = False,
osd_order: int = 10,
bp_method: str = 'msl'):
super().__init__(code, error_model, error_rate)
self._max_bp_iter = max_bp_iter
self._channel_update = channel_update
self._osd_order = osd_order
self._bp_method = bp_method
# Do not initialize the decoder until we call the decode method.
# This is required because during analysis, there is no need to
# initialize the decoder every time.
self._initialized = False
def get_probabilities(self):
pi, px, py, pz = self.error_model.probability_distribution(
self.code, self.error_rate
)
return pi, px, py, pz
def update_probabilities(self, correction: np.ndarray,
px: np.ndarray, py: np.ndarray, pz: np.ndarray,
direction: str = "x->z") -> np.ndarray:
"""Update X probabilities once a Z correction has been applied"""
n_qubits = correction.shape[0]
new_probs = np.zeros(n_qubits)
if direction == "z->x":
for i in range(n_qubits):
if correction[i] == 1:
if pz[i] + py[i] != 0:
new_probs[i] = py[i] / (pz[i] + py[i])
else:
new_probs[i] = px[i] / (1 - pz[i] - py[i])
elif direction == "x->z":
for i in range(n_qubits):
if correction[i] == 1:
if px[i] + py[i] != 0:
new_probs[i] = py[i] / (px[i] + py[i])
else:
new_probs[i] = pz[i] / (1 - px[i] - py[i])
else:
raise ValueError(
f"Unrecognized direction {direction} when "
"updating probabilities"
)
return new_probs
def initialize_decoders(self):
is_css = self.code.is_css
if is_css:
self.z_decoder = bposd_decoder(
self.code.Hx,
error_rate=self.error_rate,
max_iter=self._max_bp_iter,
bp_method=self._bp_method,
ms_scaling_factor=0,
osd_method="osd_cs", # Choose from: "osd_e", "osd_cs", "osd0"
osd_order=self._osd_order
)
self.x_decoder = bposd_decoder(
self.code.Hz,
error_rate=self.error_rate,
max_iter=self._max_bp_iter,
bp_method=self._bp_method,
ms_scaling_factor=0,
osd_method="osd_cs", # Choose from: "osd_e", "osd_cs", "osd0"
osd_order=self._osd_order
)
else:
self.decoder = bposd_decoder(
self.code.stabilizer_matrix,
error_rate=self.error_rate,
max_iter=self._max_bp_iter,
bp_method=self._bp_method,
ms_scaling_factor=0,
osd_method="osd_cs", # Choose from: "osd_e", "osd_cs", "osd0"
osd_order=self._osd_order
)
def decode(self, syndrome: np.ndarray, **kwargs) -> np.ndarray:
"""Get X and Z corrections given code and measured syndrome."""
if not self._initialized:
self.initialize_decoders()
is_css = self.code.is_css
n_qubits = self.code.n
syndrome = np.array(syndrome, dtype=int)
if is_css:
syndrome_z = self.code.extract_z_syndrome(syndrome)
syndrome_x = self.code.extract_x_syndrome(syndrome)
pi, px, py, pz = self.get_probabilities()
probabilities_x = px + py
probabilities_z = pz + py
probabilities = np.hstack([probabilities_z, probabilities_x])
if is_css:
# Update probabilities (in case the distribution is new at each
# iteration)
self.x_decoder.update_channel_probs(probabilities_x)
self.z_decoder.update_channel_probs(probabilities_z)
# Decode Z errors
self.z_decoder.decode(syndrome_x)
z_correction = self.z_decoder.osdw_decoding
# Bayes update of the probability
if self._channel_update:
new_x_probs = self.update_probabilities(
z_correction, px, py, pz, direction="z->x"
)
self.x_decoder.update_channel_probs(new_x_probs)
# Decode X errors
self.x_decoder.decode(syndrome_z)
x_correction = self.x_decoder.osdw_decoding
correction = np.concatenate([x_correction, z_correction])
else:
# Update probabilities (in case the distribution is new at each
# iteration)
self.decoder.update_channel_probs(probabilities)
# Decode all errors
self.decoder.decode(syndrome)
correction = self.decoder.osdw_decoding
correction = np.concatenate(
[correction[n_qubits:], correction[:n_qubits]]
)
return correction
def test_decoder():
from panqec.codes import XCubeCode
from panqec.error_models import PauliErrorModel
import time
rng = np.random.default_rng()
L = 20
code = XCubeCode(L, L, L)
error_rate = 0.5
r_x, r_y, r_z = [0.15, 0.15, 0.7]
error_model = PauliErrorModel(r_x, r_y, r_z)
print("Create stabilizer matrix")
code.stabilizer_matrix
print("Create Hx and Hz")
code.Hx
code.Hz
print("Create logicals")
code.logicals_x
code.logicals_z
print("Instantiate BP-OSD")
decoder = BeliefPropagationOSDDecoder(
code, error_model, error_rate, osd_order=0, max_bp_iter=1000
)
# Start timer
start = time.time()
n_iter = 1
accuracy = 0
for i in range(n_iter):
print(f"\nRun {code.label} {i}...")
print("Generate errors")
error = error_model.generate(code, error_rate, rng=rng)
print("Calculate syndrome")
syndrome = code.measure_syndrome(error)
print("Decode")
correction = decoder.decode(syndrome)
print("Get total error")
total_error = (correction + error) % 2
codespace = code.in_codespace(total_error)
success = not code.is_logical_error(total_error) and codespace
print(success)
accuracy += success
accuracy /= n_iter
print("Average time per iteration", (time.time() - start) / n_iter)
print("Logical error rate", 1 - accuracy)
if __name__ == '__main__':
test_decoder()
|
[
"numpy.zeros",
"time.time",
"numpy.random.default_rng",
"numpy.hstack",
"numpy.array",
"panqec.error_models.PauliErrorModel",
"panqec.codes.XCubeCode",
"ldpc.bposd_decoder",
"numpy.concatenate"
] |
[((5722, 5745), 'numpy.random.default_rng', 'np.random.default_rng', ([], {}), '()\n', (5743, 5745), True, 'import numpy as np\n'), ((5769, 5787), 'panqec.codes.XCubeCode', 'XCubeCode', (['L', 'L', 'L'], {}), '(L, L, L)\n', (5778, 5787), False, 'from panqec.codes import XCubeCode\n'), ((5866, 5896), 'panqec.error_models.PauliErrorModel', 'PauliErrorModel', (['r_x', 'r_y', 'r_z'], {}), '(r_x, r_y, r_z)\n', (5881, 5896), False, 'from panqec.error_models import PauliErrorModel\n'), ((6270, 6281), 'time.time', 'time.time', ([], {}), '()\n', (6279, 6281), False, 'import time\n'), ((1532, 1550), 'numpy.zeros', 'np.zeros', (['n_qubits'], {}), '(n_qubits)\n', (1540, 1550), True, 'import numpy as np\n'), ((3891, 3920), 'numpy.array', 'np.array', (['syndrome'], {'dtype': 'int'}), '(syndrome, dtype=int)\n', (3899, 3920), True, 'import numpy as np\n'), ((4214, 4259), 'numpy.hstack', 'np.hstack', (['[probabilities_z, probabilities_x]'], {}), '([probabilities_z, probabilities_x])\n', (4223, 4259), True, 'import numpy as np\n'), ((2460, 2648), 'ldpc.bposd_decoder', 'bposd_decoder', (['self.code.Hx'], {'error_rate': 'self.error_rate', 'max_iter': 'self._max_bp_iter', 'bp_method': 'self._bp_method', 'ms_scaling_factor': '(0)', 'osd_method': '"""osd_cs"""', 'osd_order': 'self._osd_order'}), "(self.code.Hx, error_rate=self.error_rate, max_iter=self.\n _max_bp_iter, bp_method=self._bp_method, ms_scaling_factor=0,\n osd_method='osd_cs', osd_order=self._osd_order)\n", (2473, 2648), False, 'from ldpc import bposd_decoder\n'), ((2838, 3026), 'ldpc.bposd_decoder', 'bposd_decoder', (['self.code.Hz'], {'error_rate': 'self.error_rate', 'max_iter': 'self._max_bp_iter', 'bp_method': 'self._bp_method', 'ms_scaling_factor': '(0)', 'osd_method': '"""osd_cs"""', 'osd_order': 'self._osd_order'}), "(self.code.Hz, error_rate=self.error_rate, max_iter=self.\n _max_bp_iter, bp_method=self._bp_method, ms_scaling_factor=0,\n osd_method='osd_cs', osd_order=self._osd_order)\n", (2851, 3026), False, 'from ldpc import bposd_decoder\n'), ((3228, 3430), 'ldpc.bposd_decoder', 'bposd_decoder', (['self.code.stabilizer_matrix'], {'error_rate': 'self.error_rate', 'max_iter': 'self._max_bp_iter', 'bp_method': 'self._bp_method', 'ms_scaling_factor': '(0)', 'osd_method': '"""osd_cs"""', 'osd_order': 'self._osd_order'}), "(self.code.stabilizer_matrix, error_rate=self.error_rate,\n max_iter=self._max_bp_iter, bp_method=self._bp_method,\n ms_scaling_factor=0, osd_method='osd_cs', osd_order=self._osd_order)\n", (3241, 3430), False, 'from ldpc import bposd_decoder\n'), ((5090, 5134), 'numpy.concatenate', 'np.concatenate', (['[x_correction, z_correction]'], {}), '([x_correction, z_correction])\n', (5104, 5134), True, 'import numpy as np\n'), ((5463, 5525), 'numpy.concatenate', 'np.concatenate', (['[correction[n_qubits:], correction[:n_qubits]]'], {}), '([correction[n_qubits:], correction[:n_qubits]])\n', (5477, 5525), True, 'import numpy as np\n'), ((6957, 6968), 'time.time', 'time.time', ([], {}), '()\n', (6966, 6968), False, 'import time\n')]
|
import numpy as np
import pandas as pd
import random
from sklearn.model_selection import train_test_split
class SVM:
def __init__(self, max_iterations=1000, C=1, epsilon=0.001):
self.max_iterations = max_iterations
self.C = C
self.epsilon = epsilon
def fit(self, X, y):
# Ensure X and y are numpy arrays
X = np.array(X).astype('float')
y = np.array(y).astype('int')
# Initialize variables
n, d = X.shape
alpha = np.zeros(n)
iterations = 0
# Calculate starting w & b values
self.compute_w_b(y, alpha, X)
for iteration in range(1, self.max_iterations + 1):
# print('Iteration ', iteration)
alpha_prev = np.copy(alpha)
for i in range(0, n):
# Get random j so that i != j
j = random.randint(0, n - 2)
if j >= i:
j += 1
# Prepare variables
x_i, y_i = X[i,:], y[i]
x_j, y_j = X[j,:], y[j]
# Calculate nu
nu = np.dot(x_i, x_i) + np.dot(x_j, x_j) - np.dot(x_i, x_j)
# Calculate lower and upper bounds
L, H = self.L_H(alpha[i], alpha[j], y_i*y_j, self.C)
if L == H:
continue
# Compute E values
E_i = self.E(x_i, y_i, self.w, self.b)
E_j = self.E(x_j, y_j, self.w, self.b)
if nu == 0:
continue
"""
if self.obj_func(nu, alpha[j], L, y_i, E_i, E_j) > self.obj_func(nu, alpha[j], H, y_i, E_i, E_j):
new_alpha_j = L
else:
new_alpha_j = H
"""
else:
# Compute E values
E_i = self.E(x_i, y_i, self.w, self.b)
E_j = self.E(x_j, y_j, self.w, self.b)
# Compute new alpha j
new_alpha_j = alpha[j] + y_j*(E_i - E_j)/nu
new_alpha_j = max(min(new_alpha_j, H), L)
# Compute new alpha i & deltas
new_alpha_i = alpha[i] + y_i*y_j*(alpha[j] - new_alpha_j)
delta_i = (new_alpha_i - alpha[i])
delta_j = (new_alpha_j - alpha[j])
# Update w
self.w += delta_i*y_i*x_i + delta_j*y_j*x_j
# Update b
b_i = self.b - E_i - y_i*delta_i*np.dot(x_i, x_i) - y_j*delta_j*np.dot(x_i, x_j)
b_j = self.b - E_i - y_i*delta_i*np.dot(x_i, x_i) - y_j*delta_j*np.dot(x_i, x_j)
if 0 < new_alpha_i < self.C:
self.b = b_i
elif 0 < new_alpha_j < self.C:
self.b = b_j
else:
self.b = (b_i + b_j)/2
# Update alphas
alpha[i] = new_alpha_i
alpha[j] = new_alpha_j
"""
print('i: ', i, 'j: ', j)
print('f_i: ', self.f(x_i, self.w, self.b), 'y_i: ', y_i,
'f_j: ', self.f(x_j, self.w, self.b), 'y_j: ', y_j)
print('L: ', L, 'H: ', H, 'E_i: ', E_i, 'E_j: ', E_j)
print('New unbounded alpha j: ', new_alpha_j)
print('New alpha j: ', alpha[j])
"""
# End loop if convergence param is attained
if np.linalg.norm(alpha - alpha_prev) < self.epsilon:
break
# Save support vectors
self.train_iterations = iterations
self.support_vectors = X[np.where(alpha > 0)[0], :]
def score(self, X, y):
if not self.b:
print('SVM has not been trained yet')
else:
predictions = self.predict(X)
return np.sum(y == predictions)/y.shape[0]
def compute_w_b(self, y, alpha, X):
self.w = np.matmul(y*alpha, X).T
self.b = np.mean(y - np.matmul(self.w.T, X.T))
def f(self, x, w, b):
return np.sign(np.matmul(x.astype('float'), w) + b).astype(int)
def E(self, x, y, w, b):
return self.f(x, w, b) - y
def obj_func(self, nu, alpha_j, new_alpha_j, y_j, E_i, E_j):
return 0.5*nu*new_alpha_j**2 + (y_j*(E_i - E_j) - nu*alpha_j)*new_alpha_j
def L_H(self, alpha_i, alpha_j, s, C):
if s == -1:
return max(0, alpha_j - alpha_i), min(C, C + alpha_j - alpha_i)
else:
return max(0, alpha_i + alpha_j - C), min(C, alpha_i + alpha_j)
def predict(self, features):
return self.f(features, self.w, self.b)
def test_svm():
# Read data from text file
df = pd.read_csv('data/breast-cancer-wisconsin.data')
df = df.replace('?', -99999999).drop(['id'], axis=1)
# Prepare X and y inputs
X = df.drop(['class'], axis=1)
y = df['class'].replace(2, -1).replace(4, 1)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Train SVM
classifier = SVM()
classifier.fit(X_train, y_train)
# Print Model parameters
print('SVM parameters')
print('w: ', classifier.w, 'b: ', classifier.b)
print('Support vector count: ', len(classifier.support_vectors))
# Test SVM accuracy
accuracy = classifier.score(X_test, y_test)
print('SVM Accuracy: ', accuracy)
# Predict random examples
example_measures = np.array([[8,10,10,8,7,10,9,7,1], [6,1,1,1,2,1,3,1,1], [3,1,1,1,2,1,2,1,1]])
predictions = classifier.predict(example_measures)
print('SVM Predictions: ', predictions, '; Actual: ', [1, -1, -1])
test_svm()
|
[
"numpy.sum",
"random.randint",
"numpy.copy",
"pandas.read_csv",
"sklearn.model_selection.train_test_split",
"numpy.zeros",
"numpy.where",
"numpy.array",
"numpy.linalg.norm",
"numpy.matmul",
"numpy.dot"
] |
[((4792, 4840), 'pandas.read_csv', 'pd.read_csv', (['"""data/breast-cancer-wisconsin.data"""'], {}), "('data/breast-cancer-wisconsin.data')\n", (4803, 4840), True, 'import pandas as pd\n'), ((5051, 5088), 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.2)'}), '(X, y, test_size=0.2)\n', (5067, 5088), False, 'from sklearn.model_selection import train_test_split\n'), ((5510, 5615), 'numpy.array', 'np.array', (['[[8, 10, 10, 8, 7, 10, 9, 7, 1], [6, 1, 1, 1, 2, 1, 3, 1, 1], [3, 1, 1, 1, \n 2, 1, 2, 1, 1]]'], {}), '([[8, 10, 10, 8, 7, 10, 9, 7, 1], [6, 1, 1, 1, 2, 1, 3, 1, 1], [3, \n 1, 1, 1, 2, 1, 2, 1, 1]])\n', (5518, 5615), True, 'import numpy as np\n'), ((498, 509), 'numpy.zeros', 'np.zeros', (['n'], {}), '(n)\n', (506, 509), True, 'import numpy as np\n'), ((745, 759), 'numpy.copy', 'np.copy', (['alpha'], {}), '(alpha)\n', (752, 759), True, 'import numpy as np\n'), ((4014, 4037), 'numpy.matmul', 'np.matmul', (['(y * alpha)', 'X'], {}), '(y * alpha, X)\n', (4023, 4037), True, 'import numpy as np\n'), ((361, 372), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (369, 372), True, 'import numpy as np\n'), ((401, 412), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (409, 412), True, 'import numpy as np\n'), ((877, 901), 'random.randint', 'random.randint', (['(0)', '(n - 2)'], {}), '(0, n - 2)\n', (891, 901), False, 'import random\n'), ((3536, 3570), 'numpy.linalg.norm', 'np.linalg.norm', (['(alpha - alpha_prev)'], {}), '(alpha - alpha_prev)\n', (3550, 3570), True, 'import numpy as np\n'), ((3920, 3944), 'numpy.sum', 'np.sum', (['(y == predictions)'], {}), '(y == predictions)\n', (3926, 3944), True, 'import numpy as np\n'), ((4067, 4091), 'numpy.matmul', 'np.matmul', (['self.w.T', 'X.T'], {}), '(self.w.T, X.T)\n', (4076, 4091), True, 'import numpy as np\n'), ((1164, 1180), 'numpy.dot', 'np.dot', (['x_i', 'x_j'], {}), '(x_i, x_j)\n', (1170, 1180), True, 'import numpy as np\n'), ((3717, 3736), 'numpy.where', 'np.where', (['(alpha > 0)'], {}), '(alpha > 0)\n', (3725, 3736), True, 'import numpy as np\n'), ((1126, 1142), 'numpy.dot', 'np.dot', (['x_i', 'x_i'], {}), '(x_i, x_i)\n', (1132, 1142), True, 'import numpy as np\n'), ((1145, 1161), 'numpy.dot', 'np.dot', (['x_j', 'x_j'], {}), '(x_j, x_j)\n', (1151, 1161), True, 'import numpy as np\n'), ((2603, 2619), 'numpy.dot', 'np.dot', (['x_i', 'x_j'], {}), '(x_i, x_j)\n', (2609, 2619), True, 'import numpy as np\n'), ((2700, 2716), 'numpy.dot', 'np.dot', (['x_i', 'x_j'], {}), '(x_i, x_j)\n', (2706, 2716), True, 'import numpy as np\n'), ((2572, 2588), 'numpy.dot', 'np.dot', (['x_i', 'x_i'], {}), '(x_i, x_i)\n', (2578, 2588), True, 'import numpy as np\n'), ((2669, 2685), 'numpy.dot', 'np.dot', (['x_i', 'x_i'], {}), '(x_i, x_i)\n', (2675, 2685), True, 'import numpy as np\n')]
|
from phantasm import Parser
if __name__ == '__main__':
with open('add.wasm', 'rb') as f:
parser = Parser(f)
print(parser.parse())
|
[
"phantasm.Parser"
] |
[((111, 120), 'phantasm.Parser', 'Parser', (['f'], {}), '(f)\n', (117, 120), False, 'from phantasm import Parser\n')]
|
#!/usr/bin/env python
#-*- coding:utf-8 _*-
"""
@author:liruihui
@file: train.py
@time: 2019/09/17
@contact: <EMAIL>
@github: https://liruihui.github.io/
@description:
"""
import os
import pprint
pp = pprint.PrettyPrinter()
from datetime import datetime
from Generation.model_test import Model
from Generation.config import opts
if __name__ == '__main__':
opts.pretrain_model_G = "Chair_G.pth"
opts.log_dir = "train_models"
model = Model(opts)
# model.draw_correspondense() # draw the correspondense between sphere and shape
model.draw_shape_intepolate() # shape inteporlate
# model.draw_part_shape_inte() # shape inteporlate vs part-wise shape inteporlate
#model.draw_part_shape_inte_detail() # shape inteporlate vs multi-path part-wise shape inteporlate
#model.draw_part_edit() # random change the noise on selected region
#model.draw_part_flip() # negative the noise vector along x,y,z zxis
#model.draw_edit_inte() # combine for part edit & part/shape interpolate
#model.draw_part_exchange() # exchange the noise vector of two regions of two shape
# model.latent_optimization()
|
[
"pprint.PrettyPrinter",
"Generation.model_test.Model"
] |
[((208, 230), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {}), '()\n', (228, 230), False, 'import pprint\n'), ((456, 467), 'Generation.model_test.Model', 'Model', (['opts'], {}), '(opts)\n', (461, 467), False, 'from Generation.model_test import Model\n')]
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class myResnet(nn.Module):
def __init__(self, resnet):
super(myResnet, self).__init__()
self.resnet = resnet
self.last_conv = nn.Conv2d(2048, 512, [1, 1])
def forward(self, img, att_size=7):
x = img # .unsqueeze(0) # 3x224x224
x = self.resnet.conv1(x) # 64x112x112
x = self.resnet.bn1(x) # 64x112x112
x = self.resnet.relu(x) # 64x112x112
x = self.resnet.maxpool(x) # 64x56x56
x = self.resnet.layer1(x) # 256x56x56
x = self.resnet.layer2(x) # 512x28x28
x = self.resnet.layer3(x) # 1024x14x14
x = self.resnet.layer4(x) # 2048x7x7
g = self.resnet.avgpool(x) # 2048x1x1
g = g.view(g.size(0), -1) # 2048
g = self.resnet.fc(g) # 1000
# BASIC
# fc = x.mean(3).mean(2) # .squeeze()
# # att = F.adaptive_avg_pool2d(x,[att_size,att_size]).permute(0, 2, 3, 1)
# att = x.permute(0, 2, 3, 1)
###
### AoANet - Method - 512 - 1x1conv
# fc = x.mean(3).mean(2) # .squeeze()
# att = self.last_conv(x)
# att = att.permute(0, 2, 3, 1)
# att = att.view(att.size(0), -1, att.size(-1)) # TODO add this line for AoANet code
###
### AoANet - Method - 512 - maxpooling
fc = x.mean(3).mean(2) # .squeeze()
att = x.permute(0, 2, 3, 1)
att = att.view(att.size(0), -1, att.size(-1)) # TODO add this line for AoANet code
att = F.adaptive_avg_pool1d(att, 512)
# att = F.adaptive_max_pool1d(att, 512)
###
return fc, att # (batch, 2048), (batch, 7, 7, 2048)
|
[
"torch.nn.Conv2d",
"torch.nn.functional.adaptive_avg_pool1d"
] |
[((223, 251), 'torch.nn.Conv2d', 'nn.Conv2d', (['(2048)', '(512)', '[1, 1]'], {}), '(2048, 512, [1, 1])\n', (232, 251), True, 'import torch.nn as nn\n'), ((1546, 1577), 'torch.nn.functional.adaptive_avg_pool1d', 'F.adaptive_avg_pool1d', (['att', '(512)'], {}), '(att, 512)\n', (1567, 1577), True, 'import torch.nn.functional as F\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Apr 4 14:13:34 2020
Testing out the ADS1115 ADC with the raspberry pi
@author: nlourie
"""
import board
import busio
import time
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from datetime import datetime
import numpy as np
i2c = busio.I2C(board.SCL,board.SDA)
import adafruit_ads1x15.ads1115 as ADS
from adafruit_ads1x15.analog_in import AnalogIn
ads = ADS.ADS1115(i2c)
chan = AnalogIn(ads,ADS.P3)
xs = []
ys = []
fig = plt.figure()
ax = fig.add_subplot(111)
i = 0
dt = 10 # ms
T = 10 # seconds
Nmax = np.int(T*100/dt)
def animate(i,xs,ys):
v = chan.voltage
t = datetime.utcnow()
xs.append(t)
ys.append(v)
# limit the number of items in the vectors
xs = xs[-Nmax:]
ys = ys[-Nmax:]
# draw x and y lists
ax.clear()
ax.plot(xs,ys)
# set up plot to call animate() function periodically
ani = animation.FuncAnimation(fig,animate,fargs = (xs,ys),interval = dt)
plt.show()
"""
while True:
print('loop = ',i, chan.voltage)
index.append(i)
v.append(chan.voltage)
ax.plot(index,v)
fig.canvas.draw()
#ax.set_xlim(left = max(0,i-50),right = i+50)
time.sleep(0.1)
i+=1
"""
|
[
"adafruit_ads1x15.ads1115.ADS1115",
"matplotlib.pyplot.show",
"busio.I2C",
"matplotlib.animation.FuncAnimation",
"datetime.datetime.utcnow",
"matplotlib.pyplot.figure",
"numpy.int",
"adafruit_ads1x15.analog_in.AnalogIn"
] |
[((333, 364), 'busio.I2C', 'busio.I2C', (['board.SCL', 'board.SDA'], {}), '(board.SCL, board.SDA)\n', (342, 364), False, 'import busio\n'), ((460, 476), 'adafruit_ads1x15.ads1115.ADS1115', 'ADS.ADS1115', (['i2c'], {}), '(i2c)\n', (471, 476), True, 'import adafruit_ads1x15.ads1115 as ADS\n'), ((485, 506), 'adafruit_ads1x15.analog_in.AnalogIn', 'AnalogIn', (['ads', 'ADS.P3'], {}), '(ads, ADS.P3)\n', (493, 506), False, 'from adafruit_ads1x15.analog_in import AnalogIn\n'), ((530, 542), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (540, 542), True, 'import matplotlib.pyplot as plt\n'), ((614, 634), 'numpy.int', 'np.int', (['(T * 100 / dt)'], {}), '(T * 100 / dt)\n', (620, 634), True, 'import numpy as np\n'), ((950, 1016), 'matplotlib.animation.FuncAnimation', 'animation.FuncAnimation', (['fig', 'animate'], {'fargs': '(xs, ys)', 'interval': 'dt'}), '(fig, animate, fargs=(xs, ys), interval=dt)\n', (973, 1016), True, 'import matplotlib.animation as animation\n'), ((1017, 1027), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1025, 1027), True, 'import matplotlib.pyplot as plt\n'), ((683, 700), 'datetime.datetime.utcnow', 'datetime.utcnow', ([], {}), '()\n', (698, 700), False, 'from datetime import datetime\n')]
|
from django.urls import path
from authors.apps.articles.share_articles import (
ShareEmailAPIView, ShareFacebookAPIView, ShareTwitterAPIView)
from .views import (
ArticlesListCreateAPIView, ArticleRetrieveUpdateDestroy,
ArticleRetrieveBySlugAPIView, CommentListCreateView,
ThreadListCreateView, CommentDeleteView, LikeAPIView,
ArticleRating, FavoriteStatusAPIView, GetFavoriteArticles,
BookmarkListCreateView, BookmarkDestroyView, TagsListAPIView,
ReportArticleAPIView, LikeCommentAPIView,
)
urlpatterns = [
# GET/POST api/articles
path('articles', ArticlesListCreateAPIView.as_view(), name='list_create'),
# GET api/articles/id
path('articles/<int:pk>', ArticleRetrieveUpdateDestroy.as_view(),
name='article_by_id'),
# GET api/articles/slug
path('articles/<slug:slug>', ArticleRetrieveBySlugAPIView.as_view(),
name='article_by_slug'),
path('articles/<int:article_id>/comments', CommentListCreateView.as_view(),
name='comment_on_article'),
path('articles/<int:article_id>/comments/<int:comment_id>',
CommentDeleteView.as_view(),
name='comment_by_id'),
path('articles/<int:article_id>/comments/<int:comment_id>/threads',
ThreadListCreateView.as_view(),
name='comment_on_comment'),
path('articles/<int:pk>/like_status', LikeAPIView.as_view(),
name='like_article'),
path('articles/<int:pk>/rating', ArticleRating.as_view(),
name='ratings_list'),
path('articles/<int:pk>/bookmarks', BookmarkListCreateView.as_view(),
name='create_bookmark'),
path('articles/<int:article_id>/bookmark', BookmarkDestroyView.as_view(),
name='un_bookmark'),
path('articles/<int:pk>/favorite_status', FavoriteStatusAPIView.as_view(),
name='favorite_article'),
path('articles/favorites/',
GetFavoriteArticles.as_view(),
name='favorites'),
path('tags', TagsListAPIView.as_view()),
path('articles/<int:article_id>/comments/<int:comment_id>/like_status',
LikeCommentAPIView.as_view(), name='like_comment'),
path('articles/<int:article_id>/email',
ShareEmailAPIView.as_view(), name='share_email'),
path('articles/<int:article_id>/facebook',
ShareFacebookAPIView.as_view(), name='share_facebook'),
path('articles/<int:article_id>/twitter',
ShareTwitterAPIView.as_view(), name='share_twitter'),
path('articles/<int:article_id>/report', ReportArticleAPIView.as_view(),
name='report_article')
]
|
[
"authors.apps.articles.share_articles.ShareFacebookAPIView.as_view",
"authors.apps.articles.share_articles.ShareTwitterAPIView.as_view",
"authors.apps.articles.share_articles.ShareEmailAPIView.as_view"
] |
[((2164, 2191), 'authors.apps.articles.share_articles.ShareEmailAPIView.as_view', 'ShareEmailAPIView.as_view', ([], {}), '()\n', (2189, 2191), False, 'from authors.apps.articles.share_articles import ShareEmailAPIView, ShareFacebookAPIView, ShareTwitterAPIView\n'), ((2270, 2300), 'authors.apps.articles.share_articles.ShareFacebookAPIView.as_view', 'ShareFacebookAPIView.as_view', ([], {}), '()\n', (2298, 2300), False, 'from authors.apps.articles.share_articles import ShareEmailAPIView, ShareFacebookAPIView, ShareTwitterAPIView\n'), ((2381, 2410), 'authors.apps.articles.share_articles.ShareTwitterAPIView.as_view', 'ShareTwitterAPIView.as_view', ([], {}), '()\n', (2408, 2410), False, 'from authors.apps.articles.share_articles import ShareEmailAPIView, ShareFacebookAPIView, ShareTwitterAPIView\n')]
|
from django.contrib.sessions.models import Session
from tracking.models import Visitor
from datetime import datetime
class UserRestrictMiddleware(object):
"""Prevents more than one user logging in at once from two different IPs.
"""
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
ip_address = request.META.get('REMOTE_ADDR', '')
try:
last_login = request.user.last_login
except Exception:
last_login = 0
if str(last_login) == str(datetime.now())[:19]:
previous_visitors = Visitor.objects.filter(
user=request.user).exclude(ip_address=ip_address)
for visitor in previous_visitors:
Session.objects.filter(
session_key=visitor.session_key).delete()
visitor.user = None
visitor.save()
return self.get_response(request)
|
[
"django.contrib.sessions.models.Session.objects.filter",
"datetime.datetime.now",
"tracking.models.Visitor.objects.filter"
] |
[((564, 578), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (576, 578), False, 'from datetime import datetime\n'), ((618, 659), 'tracking.models.Visitor.objects.filter', 'Visitor.objects.filter', ([], {'user': 'request.user'}), '(user=request.user)\n', (640, 659), False, 'from tracking.models import Visitor\n'), ((771, 826), 'django.contrib.sessions.models.Session.objects.filter', 'Session.objects.filter', ([], {'session_key': 'visitor.session_key'}), '(session_key=visitor.session_key)\n', (793, 826), False, 'from django.contrib.sessions.models import Session\n')]
|
#!/usr/bin/env python
#
# Copyright (c) 2017-2018, SyLabs, Inc. All rights reserved.
# Copyright (c) 2017, SingularityWare, LLC. All rights reserved.
# Copyright (c) 2017, <NAME>. All rights reserved.
#
# See the COPYRIGHT.md file at the top-level directory of this
# distribution and at https://github.com/singularityware/singularity
#
# This file is part of the Singularity Linux container project.
# It is subject to the license terms in the LICENSE.md file
# found in the top-level directory of this distribution and at
# https://github.com/singularityware/singularity. No part
# of Singularity, including this file, may be copied, modified,
# propagated, or distributed except according to the terms
# contained in the LICENSE.md file.
import os
import platform
import sys
import re
try:
from urllib.request import urlopen, Request, unquote
except ImportError:
from urllib2 import urlopen, Request, HTTPError
os_base, os_name, os_version = platform.linux_distribution()
os_base = os_base.lower()
os_names = "|".join([x.lower() for x in os_name.split('/')])
base = os.environ["SINGULARITY_ROOTFS"]
os.chdir(base)
##################################################################
# Common Vulnerabilities Database, High Risk
##################################################################
base = "https://security-tracker.debian.org/tracker/status/release"
filters = "?filter=1&filter=high_urgency"
release = "stable"
url = Request('%s/%s/%s' % (base, release, filters))
response = urlopen(url).read().decode('utf-8')
cve_codes = re.findall(">CVE-(.*?)<", response)
returncode = 0
# We are only testing debian
if os_base not in ['debian', 'ubuntu']:
print("OS not in debian/ubuntu family, skipping test.")
sys.exit(returncode)
# Iterate through the CVE codes, and assess if the distribution matches
print("Checking %s system for %s CVE vulnerabilities..." % (os_base,
len(cve_codes)))
for cve_code in cve_codes:
url = "https://security-tracker.debian.org/tracker/CVE-%s" % cve_code
request = Request(url)
try:
response = urlopen(request)
except HTTPError:
pass
html = response.read().decode('utf-8')
table = html.replace('PTS', '').split('<table>')[2]
title = table.split('<tr>')[2]
title = re.findall('">(.*?)</a>', title)[0]
print("CVE-%s: %s" % (cve_code, title))
rows = table.replace('</td>', '').split('<tr>')
for row in rows:
if row:
if re.search(os_names, row):
print("PROBLEM: Vulnerability CVE-%s" % cve_code)
print("RESOLVE: %s" % url)
returncode = 1
sys.exit(returncode)
|
[
"urllib2.urlopen",
"urllib2.Request",
"re.findall",
"platform.linux_distribution",
"re.search",
"os.chdir",
"sys.exit"
] |
[((957, 986), 'platform.linux_distribution', 'platform.linux_distribution', ([], {}), '()\n', (984, 986), False, 'import platform\n'), ((1114, 1128), 'os.chdir', 'os.chdir', (['base'], {}), '(base)\n', (1122, 1128), False, 'import os\n'), ((1446, 1492), 'urllib2.Request', 'Request', (["('%s/%s/%s' % (base, release, filters))"], {}), "('%s/%s/%s' % (base, release, filters))\n", (1453, 1492), False, 'from urllib2 import urlopen, Request, HTTPError\n'), ((1552, 1587), 're.findall', 're.findall', (['""">CVE-(.*?)<"""', 'response'], {}), "('>CVE-(.*?)<', response)\n", (1562, 1587), False, 'import re\n'), ((2689, 2709), 'sys.exit', 'sys.exit', (['returncode'], {}), '(returncode)\n', (2697, 2709), False, 'import sys\n'), ((1738, 1758), 'sys.exit', 'sys.exit', (['returncode'], {}), '(returncode)\n', (1746, 1758), False, 'import sys\n'), ((2094, 2106), 'urllib2.Request', 'Request', (['url'], {}), '(url)\n', (2101, 2106), False, 'from urllib2 import urlopen, Request, HTTPError\n'), ((2135, 2151), 'urllib2.urlopen', 'urlopen', (['request'], {}), '(request)\n', (2142, 2151), False, 'from urllib2 import urlopen, Request, HTTPError\n'), ((2334, 2366), 're.findall', 're.findall', (['"""">(.*?)</a>"""', 'title'], {}), '(\'">(.*?)</a>\', title)\n', (2344, 2366), False, 'import re\n'), ((2520, 2544), 're.search', 're.search', (['os_names', 'row'], {}), '(os_names, row)\n', (2529, 2544), False, 'import re\n'), ((1504, 1516), 'urllib2.urlopen', 'urlopen', (['url'], {}), '(url)\n', (1511, 1516), False, 'from urllib2 import urlopen, Request, HTTPError\n')]
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# IMPORTS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
from __future__ import annotations;
from src.local.system import *;
from src.local.maths import *;
from src.local.typing import *;
from src.core.utils import PythonCommand;
from src.core.utils import getFullPath;
from src.customtypes.exports import *;
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# GLOBAL VARIABLES
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
_config_parameters: Dict[str, ConfigParameter] = {
'pattern_config': ConfigParameter[str]('patternConfig'),
'path_app': ConfigParameter[str]('pathApp'),
'path_root': ConfigParameter[str]('pathRoot'),
'file_start': ConfigParameter[str]('fileStart'),
'file_transpiled': ConfigParameter[str]('fileTranspiled'),
'file_output': ConfigParameter[str]('fileOutput'),
'file_stamp': ConfigParameter[str]('fileStamp'),
'with_file_stamp': ConfigParameter[bool]('withFileStamp').setValue(False),
'file_params_py': ConfigParameter[str]('fileParamsPy'),
'with_file_params_py': ConfigParameter[bool]('withFileParamsPy').setValue(False),
'import_param_py': ConfigParameter[str]('importParamPy'),
'param_module_name': ConfigParameter[str]('paramModuleName').setValue('MODULE_GLOBAL_PARAMS'),
'python_path': ConfigParameter[str]('pythonPath').setValue(PythonCommand()),
####
'option_legacy': ConfigParameter[bool]('optionLegacy').setValue(False),
'option_ignore': ConfigParameter[bool]('optionIgnore').setValue(False),
'option_debug': ConfigParameter[bool]('optionDebug').setValue(False),
'option_compile_latex': ConfigParameter[bool]('optionCompileLatex').setValue(False),
'option_show_tree': ConfigParameter[bool]('optionShowTree').setValue(True),
'option_comments_auto': ConfigParameter[bool]('optionCommentsAuto').setValue(True),
'option_comments_on': ConfigParameter[bool]('optionCommentsOn').setValue(True),
'option_insert_bib': ConfigParameter[bool]('optionInsertBib').setValue(False),
'option_overwrite_stamp': ConfigParameter[bool]('optionOverwriteStamp').setValue(True),
'option_overwrite_params': ConfigParameter[bool]('optionOverwriteParams').setValue(True),
'max_length': ConfigParameter[int]('maxLength').setValue(10000),
# <-- prevents transpiler from creating overlarge files
'seed': ConfigParameter[int]('seed'),
'indent_character': ConfigParameter[str]('indentCharacter'),
'indent_character_re': ConfigParameter[str]('indentCharacterRe'),
'censor_symbol': ConfigParameter[str]('censorSymbol').setValue('########'),
'offset_symbol': ConfigParameter[str]('offsetSymbol').setValue(''),
};
_dictionary_stamp: Dict[str, Any] = dict();
_dictionary_params: Dict[str, Any] = dict();
_project_tree: ProjectTree = ProjectTree();
_export_vars: Dict[str, Tuple[Any, str]] = dict();
_includes: List[str] = [];
_precompile_lines: List[Tuple[int, Any, str]] = [];
_document_structure: List[str];
_list_of_imports: List[str];
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# METHODS: get/set
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def getPatternConfig() -> str:
return _config_parameters['pattern_config'].value;
def setPatternConfig(value: str):
global _config_parameters;
_config_parameters['pattern_config'].value = value;
return;
def getPathApp() -> str:
return _config_parameters['path_app'].value;
def setPathApp(value: str):
global _config_parameters;
try:
path = getFullPath(value or '.');
except:
raise Exception('Path \033[1m{}\033[0m does not exist and cannot be used as the app path!'.format(value));
_config_parameters['path_app'].value = path;
return;
def getPathRoot() -> str:
return _config_parameters['path_root'].value;
def setPathRoot(value: str):
global _config_parameters;
try:
path = getFullPath(value or '.');
os.chdir(path);
except:
raise Exception('Path \033[1m{}\033[0m does not exist and cannot be used as the root path!'.format(value));
_config_parameters['path_root'].value = value;
return;
def getPythonPath() -> str:
return _config_parameters['python_path'].value;
def setPythonPath(value: str):
global _config_parameters;
_config_parameters['python_path'].value = value;
return;
def getOptionInsertBib() -> bool:
return _config_parameters['option_insert_bib'].value;
def setOptionInsertBib(value: bool):
global _config_parameters;
_config_parameters['option_insert_bib'].value = value;
return;
def getOptionOverwriteStamp() -> bool:
return _config_parameters['option_overwrite_stamp'].value;
def setOptionOverwriteStamp(value: bool):
global _config_parameters;
_config_parameters['option_overwrite_stamp'].value = value;
return;
def getOptionOverwriteParams() -> bool:
return _config_parameters['option_overwrite_params'].value;
def setOptionOverwriteParams(value: bool):
global _config_parameters;
_config_parameters['option_overwrite_params'].value = value;
return;
def getWithFileStamp() -> bool:
return _config_parameters['with_file_stamp'].value;
def setWithFileStamp(value: bool):
global _config_parameters;
_config_parameters['with_file_stamp'].value = value;
return;
def getFileStamp(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_stamp'].value);
return relativisePath(path) if rel else path;
def setFileStamp(value: str):
global _config_parameters;
if value == '':
return;
_config_parameters['file_stamp'].value = value;
return;
def getFileStart(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_start'].value);
return relativisePath(path) if rel else path;
def setFileStart(value: str):
global _config_parameters;
if value == '':
return;
_config_parameters['file_start'].value = value;
return;
def getFileTranspiled(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_transpiled'].value);
return relativisePath(path) if rel else path;
def setFileTranspiled(value: str):
global _config_parameters;
if value == '':
return;
_config_parameters['file_transpiled'].value = value;
return;
def getFileOutput(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_output'].value);
return relativisePath(path) if rel else path;
def getFileOutput(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_output'].value);
return relativisePath(path) if rel else path;
def getFileOutputBase() -> str:
path = _config_parameters['file_output'].value;
return os.path.splitext(os.path.basename(path))[0];
def setFileOutput(value: str):
global _config_parameters;
if value == '':
return;
_config_parameters['file_output'].value = value;
return;
def getOptionLegacy() -> bool:
return _config_parameters['option_legacy'].value;
def setOptionLegacy(value: bool) -> bool:
global _config_parameters;
_config_parameters['option_legacy'].value = value;
return;
def getOptionIgnore() -> bool:
return _config_parameters['option_ignore'].value;
def setOptionIgnore(value: bool):
global _config_parameters;
_config_parameters['option_ignore'].value = value;
return;
def getOptionDebug() -> bool:
return _config_parameters['option_debug'].value;
def setOptionDebug(value: bool):
global _config_parameters;
_config_parameters['option_debug'].value = value;
return;
def getOptionCompileLatex() -> bool:
return _config_parameters['option_compile_latex'].value;
def setOptionCompileLatex(value: bool):
global _config_parameters;
_config_parameters['option_compile_latex'].value = value;
return;
def getOptionShowTree() -> bool:
return _config_parameters['option_show_tree'].value;
def setOptionShowTree(value: bool):
global _config_parameters;
_config_parameters['option_show_tree'].value = value;
return;
def getOptionCommentsAuto() -> bool:
return _config_parameters['option_comments_auto'].value;
def setOptionCommentsAuto(value: bool):
global _config_parameters;
_config_parameters['option_comments_auto'].value = value;
return;
def getOptionCommentsOn() -> bool:
return _config_parameters['option_comments_on'].value;
def setOptionCommentsOn(value: bool):
global _config_parameters;
_config_parameters['option_comments_on'].value = value;
return;
def getWithFileParamsPy() -> bool:
return _config_parameters['with_file_params_py'].value;
def setWithFileParamsPy(value: bool):
global _config_parameters;
_config_parameters['with_file_params_py'].value = value;
return;
def getFileParamsPy(rel: bool = True) -> str:
path = os.path.abspath(_config_parameters['file_params_py'].value);
return relativisePath(path) if rel else path;
def setFileParamsPy(value: str):
global _config_parameters;
_config_parameters['file_params_py'].value = value;
return;
def getImportParamsPy() -> str:
return _config_parameters['import_param_py'].value;
def setImportParamsPy(value: str):
global _config_parameters;
_config_parameters['import_param_py'].value = value;
return;
def getParamModuleName() -> str:
return _config_parameters['param_module_name'].value;
def setParamModuleName(value: str):
global _config_parameters;
_config_parameters['param_module_name'].value = value;
return;
return;
def getMaxLengthOuput() -> int:
return _config_parameters['max_length'].value;
def setMaxLengthOutput(value: int):
global _config_parameters;
_config_parameters['max_length'].value = value;
return;
def hasSeed() -> bool:
return _config_parameters['seed'].hasValue;
def getSeed() -> int:
return _config_parameters['seed'].value;
def setSeed(value: int):
global _config_parameters;
_config_parameters['seed'].value = value;
return;
def reSeed():
global _config_parameters;
if _config_parameters['seed'].hasValue:
random.seed(_config_parameters['seed'].value);
return;
def getIndentCharacter() -> str:
return _config_parameters['indent_character'].value;
def setIndentCharacter(value: str):
global _config_parameters;
_config_parameters['indent_character'].value = value;
return;
def getIndentCharacterRe() -> str:
return _config_parameters['indent_character_re'].value;
def setIndentCharacterRe(value: str):
global _config_parameters;
_config_parameters['indent_character_re'].value = value;
return;
def getCensorSymbol() -> str:
return _config_parameters['censor_symbol'].value;
def setCensorSymbol(value: str):
global _config_parameters;
_config_parameters['censor_symbol'].value = value;
return;
def getOffsetSymbol() -> str:
return _config_parameters['offset_symbol'].value;
def setOffsetSymbol(value: str):
global _config_parameters;
_config_parameters['offset_symbol'].value = value;
return;
def getDictionaryStamp() -> Dict[str, Any]:
return _dictionary_stamp;
def setDictionaryStamp(value: Dict[str, Any]):
global _dictionary_stamp;
_dictionary_stamp = value;
return;
def getDictionaryParms() -> Dict[str, Any]:
return _dictionary_params;
def setDictionaryParams(value: Dict[str, Any]):
global _dictionary_params;
_dictionary_params = value;
return;
def getProjectTree() -> ProjectTree:
return _project_tree;
def setProjectTree(value: ProjectTree):
global _project_tree;
_project_tree = value;
return;
def getExportVars() -> Dict[str, Tuple[Any, str]]:
return _export_vars;
def setExportVars(value: Dict[str, Tuple[Any, str]]):
global _export_vars;
_export_vars = value;
return;
def setExportVarsKeyValue(key: str, value: Any, codedvalue: str):
global _export_vars;
_export_vars[key] = (value, codedvalue);
return;
def getIncludes() -> List[str]:
return _includes;
def setIncludes(value: List[str]):
global _includes;
_includes = value;
return;
def getPrecompileLines() -> List[Tuple[int, Any, str]]:
return _precompile_lines;
def setPrecompileLines(value: List[Tuple[int, Any, str]]):
global _precompile_lines;
_precompile_lines = value;
return;
def getDocumentStructure() -> List[str]:
return _document_structure;
def setDocumentStructure(value: List[str]):
global _document_structure;
_document_structure = value;
return;
def getListOfImports() -> List[str]:
return _list_of_imports;
def setListOfImports(value: List[str]):
global _list_of_imports;
_list_of_imports = value;
return;
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# AUXILIARY METHODS
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
def relativisePath(path: str):
return os.path.relpath(path=path, start=getPathRoot());
|
[
"src.core.utils.getFullPath",
"src.core.utils.PythonCommand"
] |
[((1618, 1633), 'src.core.utils.PythonCommand', 'PythonCommand', ([], {}), '()\n', (1631, 1633), False, 'from src.core.utils import PythonCommand\n'), ((3951, 3976), 'src.core.utils.getFullPath', 'getFullPath', (["(value or '.')"], {}), "(value or '.')\n", (3962, 3976), False, 'from src.core.utils import getFullPath\n'), ((4328, 4353), 'src.core.utils.getFullPath', 'getFullPath', (["(value or '.')"], {}), "(value or '.')\n", (4339, 4353), False, 'from src.core.utils import getFullPath\n')]
|
import sys
import string
from itertools import product
import scipy.constants as co
import numpy as np
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
from scipy import stats
import h5py
plt.rc('text', usetex=True)
plt.rc('text.latex', preamble=r'\usepackage[varg]{txfonts}')
plt.rc('axes', titlesize=54)
plt.rc('font', family='serif', size=12)
FOUT = "wavelength.pdf"
def main():
obs = 1
plt.figure(figsize=(8, 10))
plt.subplots_adjust(top=0.95, bottom=0.1)
lst = list(product([10, 12], [10, 20])) + [[0, 20]]
for i, (h, R) in enumerate(lst):
ax = plt.subplot(5, 1, i + 1)
if i == 4:
move_down(ax)
plot_panel(ax, h, R, letter=string.ascii_lowercase[i])
if i != 4:
noxticks(ax)
else:
ax.set_xlabel("pixel")
if i == 0:
ax.legend(["777 nm", "337 nm"])
ax.set_ylabel("brightness (a.u.)")
ax.set_xlim([472, 537])
ax.axvline(512, color='k', lw=0.75)
ax.grid()
plt.savefig(FOUT)
#plt.show()
def plot_panel(ax, h, R, letter):
plot_line(ax, h, R, 777, color='#ff7777')
plot_line(ax, h, R, 337, color='#7799bb')
if h > 0:
title = f"\\Large{{{{\\bf {letter}.}} {h} km, {R} µm}}"
else:
title = f"\\Large{{{{\\bf {letter}.}} 10-12 km, {R} µm}}"
ax.text(0.02, 0.85, title, transform=plt.gca().transAxes)
axins1 = ax.inset_axes([0.025, 0.1, 0.15, 0.6])
plot_map(axins1, h, R, 777)
axins2 = ax.inset_axes([0.18, 0.1, 0.15, 0.6])
plot_map(axins2, h, R, 337)
def plot_line(ax, h, R, lmbd, **kwargs):
if h != 0:
fname = f"wavelength_{lmbd}nm_{h}km_{R}um.h5"
else:
fname = f"wavelength_extended_{lmbd}nm_{R}um.h5"
fp = h5py.File(fname, "r")
obs = 1
# Note that the image is transposed wrt the julia array.
img = np.array(fp[f"obs{obs:05d}/image"])
width, height = img.shape
x, y = np.arange(width), np.arange(height)
v = img[:, height // 2]
ax.plot(x, v / np.amax(v), **kwargs)
def plot_map(ax, h, R, lmbd):
if h != 0:
fname = f"wavelength_{lmbd}nm_{h}km_{R}um.h5"
else:
fname = f"wavelength_extended_{lmbd}nm_{R}um.h5"
fp = h5py.File(fname, "r")
obs = 1
# Note that the image is transposed wrt the julia array.
img = np.array(fp[f"obs{obs:05d}/image"])
width, height = img.shape
ax.pcolormesh(img[492:532, 492:532], cmap="gnuplot2", rasterized=True)
noxticks(ax)
noyticks(ax)
ax.tick_params('both', length=2, width=0.5, which='major')
ax.axhline(512 - 492, lw=0.75, c="#777777")
ax.text(0.03, 0.05, f"\small{{{lmbd} nm}}", color="w",
transform=ax.transAxes)
def move_down(ax):
[left, bottom, width, height] = ax.get_position().bounds
ax.set_position([left, bottom - 0.05, width, height])
def noxticks(ax):
""" Remove xticks from the plot. """
loc = ax.get_xticks()
ax.set_xticklabels(['' for l in loc])
def noyticks(ax):
""" Remove xticks from the plot. """
loc = ax.get_yticks()
ax.set_yticklabels(['' for l in loc])
if __name__ == '__main__':
main()
|
[
"matplotlib.pyplot.subplot",
"h5py.File",
"matplotlib.pyplot.gca",
"numpy.amax",
"matplotlib.pyplot.figure",
"numpy.array",
"matplotlib.pyplot.rc",
"numpy.arange",
"itertools.product",
"matplotlib.pyplot.subplots_adjust",
"matplotlib.pyplot.savefig"
] |
[((218, 245), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text"""'], {'usetex': '(True)'}), "('text', usetex=True)\n", (224, 245), True, 'from matplotlib import pyplot as plt\n'), ((246, 306), 'matplotlib.pyplot.rc', 'plt.rc', (['"""text.latex"""'], {'preamble': '"""\\\\usepackage[varg]{txfonts}"""'}), "('text.latex', preamble='\\\\usepackage[varg]{txfonts}')\n", (252, 306), True, 'from matplotlib import pyplot as plt\n'), ((307, 335), 'matplotlib.pyplot.rc', 'plt.rc', (['"""axes"""'], {'titlesize': '(54)'}), "('axes', titlesize=54)\n", (313, 335), True, 'from matplotlib import pyplot as plt\n'), ((336, 375), 'matplotlib.pyplot.rc', 'plt.rc', (['"""font"""'], {'family': '"""serif"""', 'size': '(12)'}), "('font', family='serif', size=12)\n", (342, 375), True, 'from matplotlib import pyplot as plt\n'), ((430, 457), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 10)'}), '(figsize=(8, 10))\n', (440, 457), True, 'from matplotlib import pyplot as plt\n'), ((462, 503), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'top': '(0.95)', 'bottom': '(0.1)'}), '(top=0.95, bottom=0.1)\n', (481, 503), True, 'from matplotlib import pyplot as plt\n'), ((1091, 1108), 'matplotlib.pyplot.savefig', 'plt.savefig', (['FOUT'], {}), '(FOUT)\n', (1102, 1108), True, 'from matplotlib import pyplot as plt\n'), ((1846, 1867), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (1855, 1867), False, 'import h5py\n'), ((1956, 1991), 'numpy.array', 'np.array', (["fp[f'obs{obs:05d}/image']"], {}), "(fp[f'obs{obs:05d}/image'])\n", (1964, 1991), True, 'import numpy as np\n'), ((2326, 2347), 'h5py.File', 'h5py.File', (['fname', '"""r"""'], {}), "(fname, 'r')\n", (2335, 2347), False, 'import h5py\n'), ((2436, 2471), 'numpy.array', 'np.array', (["fp[f'obs{obs:05d}/image']"], {}), "(fp[f'obs{obs:05d}/image'])\n", (2444, 2471), True, 'import numpy as np\n'), ((615, 639), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(1)', '(i + 1)'], {}), '(5, 1, i + 1)\n', (626, 639), True, 'from matplotlib import pyplot as plt\n'), ((2034, 2050), 'numpy.arange', 'np.arange', (['width'], {}), '(width)\n', (2043, 2050), True, 'import numpy as np\n'), ((2052, 2069), 'numpy.arange', 'np.arange', (['height'], {}), '(height)\n', (2061, 2069), True, 'import numpy as np\n'), ((519, 546), 'itertools.product', 'product', (['[10, 12]', '[10, 20]'], {}), '([10, 12], [10, 20])\n', (526, 546), False, 'from itertools import product\n'), ((2122, 2132), 'numpy.amax', 'np.amax', (['v'], {}), '(v)\n', (2129, 2132), True, 'import numpy as np\n'), ((1460, 1469), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (1467, 1469), True, 'from matplotlib import pyplot as plt\n')]
|
# -*- coding: utf-8 -*-
r"""
Created on Fri Nov 24 21:38:04 2017
@author: _Lantian
Instructions for pyinstaller:
after install pyinstaller using pip install,
use pyinstaller.exe from ...\Scripts the same way as we use pip install
and type -F path after the exe (-F means put all info into one exe)
Example: pyinstaller.exe -F E:\xxxx
Then we can find our exe file inn ...\Scripts\dist
"""
import os
import csv
import tkinter as tk
from tkinter import ttk
from tkinter import messagebox
def walk(dirname, file_list):
"""
This function is from a book called Think Python
written by <NAME>.
It walks through a directory, gets names of all files
and calls itself recursively on all the directories
"""
for name in os.listdir(dirname):
path=os.path.join(dirname,name)
if os.path.isfile(path):
file_list.append(path)
else:
walk(path, file_list)
return file_list
def save_csv(file,target_list):
"""write list into csv.
argument newline must be used,
or there will be empty lines between every two lines of codes"""
csvFile=open(file,'w', newline='',encoding='utf_8_sig')
writer=csv.writer(csvFile)
for i in range(len(target_list)):
writer.writerow(target_list[i])
csvFile.close()
def write_list(dirname):
file_list=[]
file_list = walk(dirname, file_list)
#split the folder names and file names into different columns
lists=[item.split('\\') for item in file_list]
#write data into csv files
save_csv(dirname+r'\file_names.csv',lists)
def get_path():
dirname=name.get()
write_list(dirname)
messagebox.showinfo(title='Succeed',message='output is in:'+dirname)
#GUI
win=tk.Tk()
#add a title
win.title('For Maggie: Put file names in a directory into csv')
#add a label
ttk.Label(win,text=r'Enter path (example: E:\programs\files)'
).grid(column=0,row=0)
#add a text box entry widget
name=tk.StringVar()
name_entered=ttk.Entry(win,width=70,textvariable=name)
name_entered.grid(column=1,row=0)
#add a click button
action=ttk.Button(win,text='Run',command=get_path)
action.grid(column=0,row=2)
#place cursor
name_entered.focus()
#start GUI
win.mainloop()
|
[
"tkinter.StringVar",
"os.listdir",
"tkinter.ttk.Label",
"tkinter.ttk.Entry",
"csv.writer",
"tkinter.messagebox.showinfo",
"os.path.isfile",
"tkinter.ttk.Button",
"os.path.join",
"tkinter.Tk"
] |
[((1743, 1750), 'tkinter.Tk', 'tk.Tk', ([], {}), '()\n', (1748, 1750), True, 'import tkinter as tk\n'), ((1970, 1984), 'tkinter.StringVar', 'tk.StringVar', ([], {}), '()\n', (1982, 1984), True, 'import tkinter as tk\n'), ((1998, 2041), 'tkinter.ttk.Entry', 'ttk.Entry', (['win'], {'width': '(70)', 'textvariable': 'name'}), '(win, width=70, textvariable=name)\n', (2007, 2041), False, 'from tkinter import ttk\n'), ((2101, 2146), 'tkinter.ttk.Button', 'ttk.Button', (['win'], {'text': '"""Run"""', 'command': 'get_path'}), "(win, text='Run', command=get_path)\n", (2111, 2146), False, 'from tkinter import ttk\n'), ((760, 779), 'os.listdir', 'os.listdir', (['dirname'], {}), '(dirname)\n', (770, 779), False, 'import os\n'), ((1196, 1215), 'csv.writer', 'csv.writer', (['csvFile'], {}), '(csvFile)\n', (1206, 1215), False, 'import csv\n'), ((1663, 1734), 'tkinter.messagebox.showinfo', 'messagebox.showinfo', ([], {'title': '"""Succeed"""', 'message': "('output is in:' + dirname)"}), "(title='Succeed', message='output is in:' + dirname)\n", (1682, 1734), False, 'from tkinter import messagebox\n'), ((794, 821), 'os.path.join', 'os.path.join', (['dirname', 'name'], {}), '(dirname, name)\n', (806, 821), False, 'import os\n'), ((832, 852), 'os.path.isfile', 'os.path.isfile', (['path'], {}), '(path)\n', (846, 852), False, 'import os\n'), ((1841, 1905), 'tkinter.ttk.Label', 'ttk.Label', (['win'], {'text': '"""Enter path (example: E:\\\\programs\\\\files)"""'}), "(win, text='Enter path (example: E:\\\\programs\\\\files)')\n", (1850, 1905), False, 'from tkinter import ttk\n')]
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.utils import is_valid_string, filter_model
from kubernetes.models.v1.KeyToPath import KeyToPath
class ConfigMapProjection(object):
"""
https://kubernetes.io/docs/api-reference/v1.8/#configmapprojection-v1-core
Adapts a ConfigMap into a projected volume.
The contents of the target ConfigMap's Data field will be presented in a projected volume as files using the keys
in the Data field as the file names, unless the items element is populated with specific mappings of keys to paths.
Note that this is identical to a configmap volume source without the default mode.
"""
def __init__(self, model=None):
super(ConfigMapProjection, self).__init__()
self._items = None
self._name = None
self._optional = None
if model is not None:
m = filter_model(model)
self._build_with_model(m)
def _build_with_model(self, model=None):
if 'items' in model:
self.items = model['items']
if 'name' in model:
self.name = model['name']
if 'optional' in model:
self.optional = model['optional']
# ------------------------------------------------------------------------------------- items
@property
def items(self):
return self._items
@items.setter
def items(self, items=None):
if not isinstance(items, list):
raise SyntaxError('ConfigMapVolumeSource: items: [ {0} ] is invalid.'.format(items))
modeled_items = list()
for i in items:
tmp_item = KeyToPath(model=i)
modeled_items.append(tmp_item)
self._items = items
# ------------------------------------------------------------------------------------- name
@property
def name(self):
return self._name
@name.setter
def name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('ConfigMapVolumeSource: name: [ {0} ] is invalid.'.format(name))
self._name = name
# ------------------------------------------------------------------------------------- optional
@property
def optional(self):
return self._optional
@optional.setter
def optional(self, v=None):
if not isinstance(v, bool):
raise SyntaxError('ConfigMapVolumeSource: optional: [ {0} ] is invalid.'.format(v))
self._optional = v
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.items is not None:
tmp_items = list()
for i in self.items:
assert isinstance(i, KeyToPath)
tmp_items.append(i.serialize())
data['items'] = tmp_items
if self.name is not None:
data['name'] = self.name
if self.optional is not None:
data['optional'] = self.optional
return data
|
[
"kubernetes.utils.is_valid_string",
"kubernetes.utils.filter_model",
"kubernetes.models.v1.KeyToPath.KeyToPath"
] |
[((1017, 1036), 'kubernetes.utils.filter_model', 'filter_model', (['model'], {}), '(model)\n', (1029, 1036), False, 'from kubernetes.utils import is_valid_string, filter_model\n'), ((1763, 1781), 'kubernetes.models.v1.KeyToPath.KeyToPath', 'KeyToPath', ([], {'model': 'i'}), '(model=i)\n', (1772, 1781), False, 'from kubernetes.models.v1.KeyToPath import KeyToPath\n'), ((2076, 2097), 'kubernetes.utils.is_valid_string', 'is_valid_string', (['name'], {}), '(name)\n', (2091, 2097), False, 'from kubernetes.utils import is_valid_string, filter_model\n')]
|
from drf_problems import PROBLEM_CODE_CHOICES, PROBLEM_EXCEPTION_MAP
def register_exception(exc_cls):
code = getattr(exc_cls, 'code', exc_cls.default_code)
PROBLEM_EXCEPTION_MAP[code] = exc_cls
PROBLEM_CODE_CHOICES.append((code, code))
class register(object):
def __init__(self, cls):
self.cls = cls
register_exception(cls)
def __call__(self, *args, **kwargs):
return self.cls(*args, **kwargs)
|
[
"drf_problems.PROBLEM_CODE_CHOICES.append"
] |
[((208, 249), 'drf_problems.PROBLEM_CODE_CHOICES.append', 'PROBLEM_CODE_CHOICES.append', (['(code, code)'], {}), '((code, code))\n', (235, 249), False, 'from drf_problems import PROBLEM_CODE_CHOICES, PROBLEM_EXCEPTION_MAP\n')]
|
"""
A file for all models' weight initialization functions
"""
import torch
from torch import nn
import numpy as np
import graphs
import math
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
if type(m) in [nn.GRU, nn.LSTM, nn.RNN]:
for name, param in m.named_parameters():
if 'weight_ih' in name:
torch.nn.init.xavier_uniform_(param.data)
elif 'weight_hh' in name:
torch.nn.init.orthogonal_(param.data)
elif 'bias' in name:
param.data.fill_(0)
if isinstance(m, nn.Linear):
nn.init.xavier_uniform_(m.weight)
if m.bias is not None:
nn.init.constant_(m.bias, 0)
def disable_conv_bias(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.bias = None
def weights_init_normal(m):
"""
Initialize the weights of Convolution2D and BatchNorm2D with normal.
:param m:
:return:
"""
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, 0.02)
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
def init_model_weights(m):
### initialize
for m in m.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.bias.data.zero_()
|
[
"torch.nn.init.kaiming_normal_",
"math.sqrt",
"torch.nn.init.xavier_uniform_",
"torch.nn.init.constant_",
"torch.nn.init.orthogonal_"
] |
[((260, 330), 'torch.nn.init.kaiming_normal_', 'nn.init.kaiming_normal_', (['m.weight'], {'mode': '"""fan_out"""', 'nonlinearity': '"""relu"""'}), "(m.weight, mode='fan_out', nonlinearity='relu')\n", (283, 330), False, 'from torch import nn\n'), ((848, 881), 'torch.nn.init.xavier_uniform_', 'nn.init.xavier_uniform_', (['m.weight'], {}), '(m.weight)\n', (871, 881), False, 'from torch import nn\n'), ((927, 955), 'torch.nn.init.constant_', 'nn.init.constant_', (['m.bias', '(0)'], {}), '(m.bias, 0)\n', (944, 955), False, 'from torch import nn\n'), ((598, 639), 'torch.nn.init.xavier_uniform_', 'torch.nn.init.xavier_uniform_', (['param.data'], {}), '(param.data)\n', (627, 639), False, 'import torch\n'), ((1654, 1672), 'math.sqrt', 'math.sqrt', (['(2.0 / n)'], {}), '(2.0 / n)\n', (1663, 1672), False, 'import math\n'), ((696, 733), 'torch.nn.init.orthogonal_', 'torch.nn.init.orthogonal_', (['param.data'], {}), '(param.data)\n', (721, 733), False, 'import torch\n')]
|
"""
This setup file installs packages to test mypy's PEP 561 implementation
"""
from distutils.core import setup
setup(
name='typedpkg_ns_b-stubs',
author="The mypy team",
version='0.1',
namespace_packages=['typedpkg_ns-stubs'],
package_data={'typedpkg_ns-stubs.b': ['__init__.pyi', 'bbb.pyi']},
packages=['typedpkg_ns-stubs.b'],
)
|
[
"distutils.core.setup"
] |
[((121, 350), 'distutils.core.setup', 'setup', ([], {'name': '"""typedpkg_ns_b-stubs"""', 'author': '"""The mypy team"""', 'version': '"""0.1"""', 'namespace_packages': "['typedpkg_ns-stubs']", 'package_data': "{'typedpkg_ns-stubs.b': ['__init__.pyi', 'bbb.pyi']}", 'packages': "['typedpkg_ns-stubs.b']"}), "(name='typedpkg_ns_b-stubs', author='The mypy team', version='0.1',\n namespace_packages=['typedpkg_ns-stubs'], package_data={\n 'typedpkg_ns-stubs.b': ['__init__.pyi', 'bbb.pyi']}, packages=[\n 'typedpkg_ns-stubs.b'])\n", (126, 350), False, 'from distutils.core import setup\n')]
|
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
from tensorflow.contrib.tensorboard.plugins import projector
# useful libraries for data preprocessing
import tensorlayer as tl
from tensorlayer.layers import *
import numpy as np
import time
import os
from gen_data import max_len
from eunn import EUNNCell
PAD_ID = 0
UNK_ID = 1
GO_ID = 2
EOS_ID = 3
capacity = 8
# complex initialization
def complex_initializer(base_initializer):
f = base_initializer()
def initializer(*args, dtype=tf.complex64, **kwargs):
real = f(*args, **kwargs)
imag = f(*args, **kwargs)
return tf.complex(real, imag)
return initializer
# this is the complex seq2seq model
class seq2seq_model():
def __init__(self,
vocab_size,
embedding_size,
num_layer,
max_gradient_norm,
batch_size_num,
learning_rate,
dropout):
self.batch_size = batch_size_num
with tf.variable_scope('seq2seq') as scope:
self.encoder_input = tf.placeholder(tf.int32, [None, None])
self.decoder_output = tf.placeholder(tf.int32, [None, None])
self.decoder_input = tf.placeholder(tf.int32,[None, None])
self.target_weight = tf.placeholder(tf.float32, [None, None]) # for training or updating
self.encoder_length = retrieve_seq_length_op2(self.encoder_input)
self.decoder_length = retrieve_seq_length_op2(self.decoder_output)
batch_size = batch_size_num
decoder_output = self.decoder_output
target_weight = self.target_weight
self.embedding = tf.get_variable('embedding', [vocab_size, embedding_size])
# encoder and decoder share the same weight
encoder_embedded = tf.nn.embedding_lookup(self.embedding, self.encoder_input)
decoder_embedded = tf.nn.embedding_lookup(self.embedding, self.decoder_input)
with tf.variable_scope('encoder'):
encoder_cell = EUNNCell(num_units=embedding_size,capacity=capacity, fft=False, cplex=False)
encoder_output, encoder_state = tf.nn.dynamic_rnn(encoder_cell, encoder_embedded,
self.encoder_length, dtype=tf.float32)
with tf.variable_scope('decoder') as decoder_scope:
# train or evaluate
decoder_cell = EUNNCell(num_units=embedding_size,capacity=capacity, fft=False, cplex=False)
helper = tf.contrib.seq2seq.TrainingHelper(decoder_embedded, self.decoder_length)
projection_layer = layers_core.Dense(vocab_size)
decoder = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper, encoder_state, output_layer=projection_layer)
output, decoder_state, _ = tf.contrib.seq2seq.dynamic_decode(decoder, scope=decoder_scope, impute_finished=True)
logits = output.rnn_output
self.result_train = output.sample_id
self.decoder_state = decoder_state
# inference (sample decode)
helper_sample = tf.contrib.seq2seq.SampleEmbeddingHelper(self.embedding,
start_tokens=tf.fill([batch_size], GO_ID), end_token=EOS_ID)
decoder_sample = tf.contrib.seq2seq.BasicDecoder(decoder_cell, helper_sample, encoder_state,
output_layer=projection_layer)
output, _, _ = tf.contrib.seq2seq.dynamic_decode(decoder_sample, impute_finished=True,
scope=decoder_scope)
self.result_sample = output.sample_id
params = scope.trainable_variables()
# update for training
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=decoder_output, logits=logits) # max_len * batch
self.loss_train = tf.reduce_sum(target_weight * cross_entropy) / tf.cast(batch_size, tf.float32)
self.perplexity = tf.exp(tf.reduce_sum(target_weight * cross_entropy) / tf.reduce_sum(target_weight))
# gradient clipping
#optimizer = tf.train.AdamOptimizer(learning_rate)
#gvs = optimizer.compute_gradients(self.loss_train, params)
#capped_gvs = [(tf.clip_by_value(grad, -max_gradient_norm, max_gradient_norm), var) for grad, var in gvs]
#self.opt_train = optimizer.apply_gradients(capped_gvs)
optimizer = tf.train.AdamOptimizer(learning_rate)
self.opt_train = optimizer.minimize(self.loss_train)
# Save down the summary of the model
tf.summary.scalar('loss_wif_weight',self.loss_train)
tf.summary.scalar('perplexity',self.perplexity)
# Merge all summary into one
self.summary_op = tf.summary.merge_all()
def train(self, sess, encoder_input, decoder_output, decoder_input, mask):
feed_dict = {}
feed_dict[self.encoder_input] = encoder_input
feed_dict[self.decoder_output] = decoder_output
feed_dict[self.decoder_input] = decoder_input
feed_dict[self.target_weight] = mask
perplexity, result, loss, state, _ , summary= sess.run([self.perplexity, self.result_train, self.loss_train, self.decoder_state, self.opt_train, self.summary_op], feed_dict=feed_dict)
return(perplexity, loss, summary)
def test(self,sess, encoder_input, decoder_output, decoder_input, mask):
feed_dict = {}
feed_dict[self.encoder_input] = encoder_input
feed_dict[self.decoder_output] = decoder_output
feed_dict[self.decoder_input] = decoder_input
feed_dict[self.target_weight] = mask
perplexity, result, loss, stat= sess.run([self.perplexity, self.result_train, self.loss_train, self.decoder_state], feed_dict=feed_dict)
return(perplexity, loss)
def inference(self, sess, encoder_input, mode):
feed_dict = {}
feed_dict[self.encoder_input] = encoder_input
result = None
if mode == 'sample':
result = sess.run(self.result_sample, feed_dict=feed_dict)
return result
# functions for data processing
def load_vocab(filename):
'''
input : folder name
output : vocab dictionary in the form of {vocab : index}
'''
vocab = {}
with open(filename, encoding='utf-8', errors='ignore') as f:
for idx, line in enumerate(f):
vocab[line.strip()] = idx
return vocab
def sentence2idx(line, vocab, chinese=False):
'''
input : whole sentence, vocab dictionary
output : return a list of sentence with index, e.g. [24,199,256]
'''
# return unknown key if the token does not exist in the vocab folder
if not chinese:
return [vocab.get(token, UNK_ID) for token in line.split()]
else:
return [vocab.get(token, UNK_ID) for token in list(line)]
def get_rev_vocab(vocab):
return {idx: key for key, idx in vocab.items()}
if __name__ == "__main__":
batch_size = 64
model_name = 'complex_{}_{}capacity'.format(max_len,capacity)
chinese = False
inputs = open('data/reverse_{}/train/input.txt'.format(max_len), encoding='utf-8', errors='ignore').read().split('\n')
targets = open('data/reverse_{}/train/output.txt'.format(max_len), encoding='utf-8', errors='ignore').read().split('\n')
vocab = load_vocab('data/reverse_{}/train/vocab.txt'.format(max_len))
test_inputs = open('data/reverse_{}/test/input.txt'.format(max_len), encoding='utf-8', errors='ignore').read().split('\n')
test_targets = open('data/reverse_{}/test/output.txt'.format(max_len), encoding='utf-8', errors='ignore').read().split('\n')
#print(inputs[:5])
#print(targets[:5])
rev_vocab = get_rev_vocab(vocab)
trainX = [sentence2idx(x, vocab, chinese) for x in inputs]
trainY = [sentence2idx(y, vocab, chinese) for y in targets]
testX = [sentence2idx(x, vocab, chinese) for x in test_inputs]
testY = [sentence2idx(y, vocab, chinese) for y in test_targets]
#print(len(trainX))
#print(trainY[:5])
# training for the seq2seq model
seq2seq = seq2seq_model(vocab_size=len(vocab),
embedding_size=200,
num_layer=4,
max_gradient_norm=5,
batch_size_num=batch_size,
learning_rate=0.0001,
dropout=0.5
)
n_epoch = 100
n_step = int(len(trainX)/batch_size)
n_test_step = int(len(testX)/batch_size)
saver = tf.train.Saver(tf.global_variables(), keep_checkpoint_every_n_hours=1.0)
sess = tf.Session()
try:
loader = tf.train.import_meta_graph('{}/model.ckpt.meta'.format(model_name))
loader.restore(sess, tf.train.latest_checkpoint('{}'.format(model_name)))
print('load finished')
except:
sess.run(tf.global_variables_initializer())
print('load failed')
# initialize summary
summary_writer = tf.summary.FileWriter('{}'.format(model_name),graph=sess.graph)
with open('{}/metadata.tsv'.format(model_name),'w',encoding='utf-8') as f:
for word in vocab:
f.write('{}\n'.format(word))
f.write('\n')
# training
summary_time = 0
try:
for epoch in range(n_epoch):
n_iter = 0
perplexity, loss = 0, 0
for X, Y in tl.iterate.minibatches(inputs=trainX, targets=trainY, batch_size=batch_size, shuffle=False):
X = tl.prepro.pad_sequences(X)
_decoder_output = tl.prepro.sequences_add_end_id(Y, end_id=EOS_ID)
_decoder_output = tl.prepro.pad_sequences(_decoder_output)
_decoder_input = tl.prepro.sequences_add_start_id(Y, start_id=GO_ID, remove_last=False)
_decoder_input = tl.prepro.pad_sequences(_decoder_input)
_target_weight = tl.prepro.sequences_get_mask(_decoder_output)
train_perplexity,train_loss,summary = seq2seq.train(sess,X,_decoder_output, _decoder_input, _target_weight)
summary_writer.add_summary(summary,summary_time)
perplexity += train_perplexity
loss += train_loss
n_iter +=1
summary_time +=1
'''
# generate result during training
if n_iter % 100 == 0:
results = seq2seq.inference(sess, X ,'sample')
for inputs,targets in zip(X[:10],results[:10]):
try:
tmp_i = [rev_vocab[x] for x in inputs if rev_vocab[x]!='<PAD>']
tmp_o = [rev_vocab[x] for x in targets]
print('I : {}'.format(" ".join(tmp_i)))
print('O : {}'.format(" ".join(tmp_o[:tmp_o.index('<EOS>')])))
except:
print('error when decoding')
'''
test_iter = 0
test_perplexity, test_loss = 0,0
for X, Y in tl.iterate.minibatches(inputs=testX, targets=testY, batch_size=batch_size, shuffle=False):
X = tl.prepro.pad_sequences(X)
_decoder_output = tl.prepro.sequences_add_end_id(Y, end_id=EOS_ID)
_decoder_output = tl.prepro.pad_sequences(_decoder_output)
_decoder_input = tl.prepro.sequences_add_start_id(Y, start_id=GO_ID, remove_last=False)
_decoder_input = tl.prepro.pad_sequences(_decoder_input)
_target_weight = tl.prepro.sequences_get_mask(_decoder_output)
test_perplexity,test_loss = seq2seq.test(sess,X,_decoder_output, _decoder_input, _target_weight)
test_perplexity += test_perplexity
test_loss += test_loss
test_iter += 1
# generate result during training
if test_iter % 10 == 0:
results = seq2seq.inference(sess, X ,'sample')
for inputs,targets in zip(X[:10],results[:10]):
try:
tmp_i = [rev_vocab[x] for x in inputs if rev_vocab[x]!='<PAD>']
tmp_o = [rev_vocab[x] for x in targets]
print('I : {}'.format(" ".join(tmp_i)))
print('O : {}'.format(" ".join(tmp_o[:tmp_o.index('<EOS>')])))
except:
print('error when decoding')
print("epoch perplexity : %.5f" % (perplexity/n_iter))
print("epoch loss: %.5f" % (loss/n_iter))
print("epoch test perplexity : %.5f" % (test_perplexity/test_iter))
print("epoch test loss: %.5f" % (test_loss/test_iter))
saver.save(sess, '{}/model.ckpt'.format(model_name))
config = projector.ProjectorConfig()
embedding_config = config.embeddings.add()
embedding_config.tensor_name = seq2seq.embedding.name
embedding_config.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(summary_writer,config)
except KeyboardInterrupt:
saver.save(sess, '{}/model.ckpt'.format(model_name))
config = projector.ProjectorConfig()
embedding_config = config.embeddings.add()
embedding_config.tensor_name = seq2seq.embedding.name
embedding_config.metadata_path = 'metadata.tsv'
projector.visualize_embeddings(summary_writer,config)
|
[
"tensorflow.reduce_sum",
"tensorflow.global_variables",
"tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig",
"tensorflow.contrib.seq2seq.BasicDecoder",
"tensorflow.complex",
"tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings",
"tensorflow.get_variable",
"tensorflow.python.layers.core.Dense",
"tensorflow.variable_scope",
"tensorflow.placeholder",
"tensorflow.cast",
"tensorflow.summary.merge_all",
"tensorlayer.iterate.minibatches",
"tensorflow.summary.scalar",
"tensorflow.nn.embedding_lookup",
"tensorflow.global_variables_initializer",
"tensorflow.Session",
"tensorlayer.prepro.sequences_add_end_id",
"tensorlayer.prepro.pad_sequences",
"tensorflow.contrib.seq2seq.TrainingHelper",
"eunn.EUNNCell",
"tensorlayer.prepro.sequences_add_start_id",
"tensorlayer.prepro.sequences_get_mask",
"tensorflow.nn.dynamic_rnn",
"tensorflow.fill",
"tensorflow.contrib.seq2seq.dynamic_decode",
"tensorflow.train.AdamOptimizer",
"tensorflow.nn.sparse_softmax_cross_entropy_with_logits"
] |
[((9037, 9049), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (9047, 9049), True, 'import tensorflow as tf\n'), ((13958, 14012), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['summary_writer', 'config'], {}), '(summary_writer, config)\n', (13988, 14012), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((666, 688), 'tensorflow.complex', 'tf.complex', (['real', 'imag'], {}), '(real, imag)\n', (676, 688), True, 'import tensorflow as tf\n'), ((8967, 8988), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (8986, 8988), True, 'import tensorflow as tf\n'), ((1080, 1108), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""seq2seq"""'], {}), "('seq2seq')\n", (1097, 1108), True, 'import tensorflow as tf\n'), ((1153, 1191), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (1167, 1191), True, 'import tensorflow as tf\n'), ((1227, 1265), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (1241, 1265), True, 'import tensorflow as tf\n'), ((1300, 1338), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[None, None]'], {}), '(tf.int32, [None, None])\n', (1314, 1338), True, 'import tensorflow as tf\n'), ((1372, 1412), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32', '[None, None]'], {}), '(tf.float32, [None, None])\n', (1386, 1412), True, 'import tensorflow as tf\n'), ((1798, 1856), 'tensorflow.get_variable', 'tf.get_variable', (['"""embedding"""', '[vocab_size, embedding_size]'], {}), "('embedding', [vocab_size, embedding_size])\n", (1813, 1856), True, 'import tensorflow as tf\n'), ((1960, 2018), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.embedding', 'self.encoder_input'], {}), '(self.embedding, self.encoder_input)\n', (1982, 2018), True, 'import tensorflow as tf\n'), ((2051, 2109), 'tensorflow.nn.embedding_lookup', 'tf.nn.embedding_lookup', (['self.embedding', 'self.decoder_input'], {}), '(self.embedding, self.decoder_input)\n', (2073, 2109), True, 'import tensorflow as tf\n'), ((3949, 4037), 'tensorflow.nn.sparse_softmax_cross_entropy_with_logits', 'tf.nn.sparse_softmax_cross_entropy_with_logits', ([], {'labels': 'decoder_output', 'logits': 'logits'}), '(labels=decoder_output,\n logits=logits)\n', (3995, 4037), True, 'import tensorflow as tf\n'), ((4662, 4699), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', (['learning_rate'], {}), '(learning_rate)\n', (4684, 4699), True, 'import tensorflow as tf\n'), ((4831, 4884), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss_wif_weight"""', 'self.loss_train'], {}), "('loss_wif_weight', self.loss_train)\n", (4848, 4884), True, 'import tensorflow as tf\n'), ((4897, 4945), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""perplexity"""', 'self.perplexity'], {}), "('perplexity', self.perplexity)\n", (4914, 4945), True, 'import tensorflow as tf\n'), ((5018, 5040), 'tensorflow.summary.merge_all', 'tf.summary.merge_all', ([], {}), '()\n', (5038, 5040), True, 'import tensorflow as tf\n'), ((9814, 9909), 'tensorlayer.iterate.minibatches', 'tl.iterate.minibatches', ([], {'inputs': 'trainX', 'targets': 'trainY', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(inputs=trainX, targets=trainY, batch_size=batch_size,\n shuffle=False)\n', (9836, 9909), True, 'import tensorlayer as tl\n'), ((11525, 11618), 'tensorlayer.iterate.minibatches', 'tl.iterate.minibatches', ([], {'inputs': 'testX', 'targets': 'testY', 'batch_size': 'batch_size', 'shuffle': '(False)'}), '(inputs=testX, targets=testY, batch_size=batch_size,\n shuffle=False)\n', (11547, 11618), True, 'import tensorlayer as tl\n'), ((13363, 13390), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (13388, 13390), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((13588, 13642), 'tensorflow.contrib.tensorboard.plugins.projector.visualize_embeddings', 'projector.visualize_embeddings', (['summary_writer', 'config'], {}), '(summary_writer, config)\n', (13618, 13642), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((13753, 13780), 'tensorflow.contrib.tensorboard.plugins.projector.ProjectorConfig', 'projector.ProjectorConfig', ([], {}), '()\n', (13778, 13780), False, 'from tensorflow.contrib.tensorboard.plugins import projector\n'), ((2141, 2169), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""encoder"""'], {}), "('encoder')\n", (2158, 2169), True, 'import tensorflow as tf\n'), ((2203, 2280), 'eunn.EUNNCell', 'EUNNCell', ([], {'num_units': 'embedding_size', 'capacity': 'capacity', 'fft': '(False)', 'cplex': '(False)'}), '(num_units=embedding_size, capacity=capacity, fft=False, cplex=False)\n', (2211, 2280), False, 'from eunn import EUNNCell\n'), ((2329, 2421), 'tensorflow.nn.dynamic_rnn', 'tf.nn.dynamic_rnn', (['encoder_cell', 'encoder_embedded', 'self.encoder_length'], {'dtype': 'tf.float32'}), '(encoder_cell, encoder_embedded, self.encoder_length,\n dtype=tf.float32)\n', (2346, 2421), True, 'import tensorflow as tf\n'), ((2460, 2488), 'tensorflow.variable_scope', 'tf.variable_scope', (['"""decoder"""'], {}), "('decoder')\n", (2477, 2488), True, 'import tensorflow as tf\n'), ((2576, 2653), 'eunn.EUNNCell', 'EUNNCell', ([], {'num_units': 'embedding_size', 'capacity': 'capacity', 'fft': '(False)', 'cplex': '(False)'}), '(num_units=embedding_size, capacity=capacity, fft=False, cplex=False)\n', (2584, 2653), False, 'from eunn import EUNNCell\n'), ((2681, 2753), 'tensorflow.contrib.seq2seq.TrainingHelper', 'tf.contrib.seq2seq.TrainingHelper', (['decoder_embedded', 'self.decoder_length'], {}), '(decoder_embedded, self.decoder_length)\n', (2714, 2753), True, 'import tensorflow as tf\n'), ((2790, 2819), 'tensorflow.python.layers.core.Dense', 'layers_core.Dense', (['vocab_size'], {}), '(vocab_size)\n', (2807, 2819), True, 'from tensorflow.python.layers import core as layers_core\n'), ((2847, 2950), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', (['decoder_cell', 'helper', 'encoder_state'], {'output_layer': 'projection_layer'}), '(decoder_cell, helper, encoder_state,\n output_layer=projection_layer)\n', (2878, 2950), True, 'import tensorflow as tf\n'), ((2993, 3082), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', (['decoder'], {'scope': 'decoder_scope', 'impute_finished': '(True)'}), '(decoder, scope=decoder_scope,\n impute_finished=True)\n', (3026, 3082), True, 'import tensorflow as tf\n'), ((3501, 3611), 'tensorflow.contrib.seq2seq.BasicDecoder', 'tf.contrib.seq2seq.BasicDecoder', (['decoder_cell', 'helper_sample', 'encoder_state'], {'output_layer': 'projection_layer'}), '(decoder_cell, helper_sample, encoder_state,\n output_layer=projection_layer)\n', (3532, 3611), True, 'import tensorflow as tf\n'), ((3661, 3757), 'tensorflow.contrib.seq2seq.dynamic_decode', 'tf.contrib.seq2seq.dynamic_decode', (['decoder_sample'], {'impute_finished': '(True)', 'scope': 'decoder_scope'}), '(decoder_sample, impute_finished=True,\n scope=decoder_scope)\n', (3694, 3757), True, 'import tensorflow as tf\n'), ((4083, 4127), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(target_weight * cross_entropy)'], {}), '(target_weight * cross_entropy)\n', (4096, 4127), True, 'import tensorflow as tf\n'), ((4130, 4161), 'tensorflow.cast', 'tf.cast', (['batch_size', 'tf.float32'], {}), '(batch_size, tf.float32)\n', (4137, 4161), True, 'import tensorflow as tf\n'), ((9292, 9325), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (9323, 9325), True, 'import tensorflow as tf\n'), ((9928, 9954), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['X'], {}), '(X)\n', (9951, 9954), True, 'import tensorlayer as tl\n'), ((9990, 10038), 'tensorlayer.prepro.sequences_add_end_id', 'tl.prepro.sequences_add_end_id', (['Y'], {'end_id': 'EOS_ID'}), '(Y, end_id=EOS_ID)\n', (10020, 10038), True, 'import tensorlayer as tl\n'), ((10074, 10114), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['_decoder_output'], {}), '(_decoder_output)\n', (10097, 10114), True, 'import tensorlayer as tl\n'), ((10151, 10221), 'tensorlayer.prepro.sequences_add_start_id', 'tl.prepro.sequences_add_start_id', (['Y'], {'start_id': 'GO_ID', 'remove_last': '(False)'}), '(Y, start_id=GO_ID, remove_last=False)\n', (10183, 10221), True, 'import tensorlayer as tl\n'), ((10256, 10295), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['_decoder_input'], {}), '(_decoder_input)\n', (10279, 10295), True, 'import tensorlayer as tl\n'), ((10330, 10375), 'tensorlayer.prepro.sequences_get_mask', 'tl.prepro.sequences_get_mask', (['_decoder_output'], {}), '(_decoder_output)\n', (10358, 10375), True, 'import tensorlayer as tl\n'), ((11637, 11663), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['X'], {}), '(X)\n', (11660, 11663), True, 'import tensorlayer as tl\n'), ((11699, 11747), 'tensorlayer.prepro.sequences_add_end_id', 'tl.prepro.sequences_add_end_id', (['Y'], {'end_id': 'EOS_ID'}), '(Y, end_id=EOS_ID)\n', (11729, 11747), True, 'import tensorlayer as tl\n'), ((11783, 11823), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['_decoder_output'], {}), '(_decoder_output)\n', (11806, 11823), True, 'import tensorlayer as tl\n'), ((11860, 11930), 'tensorlayer.prepro.sequences_add_start_id', 'tl.prepro.sequences_add_start_id', (['Y'], {'start_id': 'GO_ID', 'remove_last': '(False)'}), '(Y, start_id=GO_ID, remove_last=False)\n', (11892, 11930), True, 'import tensorlayer as tl\n'), ((11965, 12004), 'tensorlayer.prepro.pad_sequences', 'tl.prepro.pad_sequences', (['_decoder_input'], {}), '(_decoder_input)\n', (11988, 12004), True, 'import tensorlayer as tl\n'), ((12039, 12084), 'tensorlayer.prepro.sequences_get_mask', 'tl.prepro.sequences_get_mask', (['_decoder_output'], {}), '(_decoder_output)\n', (12067, 12084), True, 'import tensorlayer as tl\n'), ((4200, 4244), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(target_weight * cross_entropy)'], {}), '(target_weight * cross_entropy)\n', (4213, 4244), True, 'import tensorflow as tf\n'), ((4247, 4275), 'tensorflow.reduce_sum', 'tf.reduce_sum', (['target_weight'], {}), '(target_weight)\n', (4260, 4275), True, 'import tensorflow as tf\n'), ((3419, 3447), 'tensorflow.fill', 'tf.fill', (['[batch_size]', 'GO_ID'], {}), '([batch_size], GO_ID)\n', (3426, 3447), True, 'import tensorflow as tf\n')]
|
"""An instance of an FMOD Studio Event."""
from ctypes import byref, c_bool, c_float, c_int, c_void_p
from ..channel_group import ChannelGroup
from ..utils import prepare_str
from .enums import PLAYBACK_STATE
from .studio_object import StudioObject
class EventInstance(StudioObject):
"""An instance of an FMOD Studio Event."""
function_prefix = "FMOD_Studio_EventInstance"
def start(self):
"""Starts playback.
If the instance was already playing then calling this function will
restart the event.
"""
self._call("Start")
def stop(self):
"""Stops playback."""
self._call("Stop")
@property
def paused(self):
"""Tthe pause state.
True if the event instance is paused.
"""
paused = c_bool()
self._call("GetPaused", byref(paused))
return paused.value
@paused.setter
def paused(self, val):
"""Set the pause state.
:param bool val: The desired pause state. True = paused, False =
unpaused.
"""
self._call("SetPaused", c_bool(val))
@property
def playback_state(self):
"""The playback state.
If the instance is invalid, then the state will be STOPPED.
"""
state = c_int()
self._call("GetPlaybackState", byref(state))
return PLAYBACK_STATE(state.value)
def get_parameter_by_name(self, name):
"""A parameter value.
:param str name: Parameter name (case-insensitive)."""
val = c_float()
actual = c_float()
self._call("GetParameterByName", prepare_str(name), byref(val), byref(actual))
return (val.value, actual.value)
def set_parameter_by_name(self, name, value, ignoreseekspeed=False):
"""Set a parameter value by name.
:param str name: Parameter name (case-insensitive).
:param float value: Value for given name.
:param bool ignoreseekspeed: Specifies whether to ignore the
parameter's seek speed and set the value immediately.
"""
self._call(
"SetParameterByName", prepare_str(name), c_float(value), ignoreseekspeed
)
@property
def channel_group(self):
"""The core channel group corresponding to the master track.
Until the event instance has been fully created calling this property
will raise an :py:exc:`~pyfmodex.exceptions.FmodError` with code
:py:attr:`~pyfmodex.enums.RESULT.STUDIO_NOT_LOADED`.
"""
ptr = c_void_p()
self._call("GetChannelGroup", byref(ptr))
return ChannelGroup(ptr)
@property
def reverb_level(self):
"""Not Implemented."""
raise NotImplementedError
@reverb_level.setter
def reverb_level(self, level):
raise NotImplementedError
|
[
"ctypes.c_int",
"ctypes.byref",
"ctypes.c_bool",
"ctypes.c_float",
"ctypes.c_void_p"
] |
[((800, 808), 'ctypes.c_bool', 'c_bool', ([], {}), '()\n', (806, 808), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1289, 1296), 'ctypes.c_int', 'c_int', ([], {}), '()\n', (1294, 1296), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1545, 1554), 'ctypes.c_float', 'c_float', ([], {}), '()\n', (1552, 1554), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1572, 1581), 'ctypes.c_float', 'c_float', ([], {}), '()\n', (1579, 1581), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((2551, 2561), 'ctypes.c_void_p', 'c_void_p', ([], {}), '()\n', (2559, 2561), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((841, 854), 'ctypes.byref', 'byref', (['paused'], {}), '(paused)\n', (846, 854), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1103, 1114), 'ctypes.c_bool', 'c_bool', (['val'], {}), '(val)\n', (1109, 1114), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1336, 1348), 'ctypes.byref', 'byref', (['state'], {}), '(state)\n', (1341, 1348), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1642, 1652), 'ctypes.byref', 'byref', (['val'], {}), '(val)\n', (1647, 1652), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((1654, 1667), 'ctypes.byref', 'byref', (['actual'], {}), '(actual)\n', (1659, 1667), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((2157, 2171), 'ctypes.c_float', 'c_float', (['value'], {}), '(value)\n', (2164, 2171), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n'), ((2600, 2610), 'ctypes.byref', 'byref', (['ptr'], {}), '(ptr)\n', (2605, 2610), False, 'from ctypes import byref, c_bool, c_float, c_int, c_void_p\n')]
|
import yaml
import io
class Config:
CONFIG_PATH = 'config.yaml'
def __init__(self):
self.config = self._load_config()
self.access_token = self.config['access_token']
self.wall_id = self.config['wall_id']
self.telegram_token = self.config['telegram_token']
self.telegram_chat_id = self.config['telegram_chat_id']
self.last_date = self.config['last_date']
pass
def set_last_date(self, last_date):
self.last_date = last_date
self.config['last_date'] = last_date
@staticmethod
def _load_config():
with open(Config.CONFIG_PATH, 'r') as stream:
data = yaml.load(stream)
return data
def save_config(self):
with io.open(Config.CONFIG_PATH, 'w', encoding='utf8') as outfile:
yaml.dump(self.config, outfile, default_flow_style=False, allow_unicode=True)
|
[
"yaml.load",
"yaml.dump",
"io.open"
] |
[((667, 684), 'yaml.load', 'yaml.load', (['stream'], {}), '(stream)\n', (676, 684), False, 'import yaml\n'), ((750, 799), 'io.open', 'io.open', (['Config.CONFIG_PATH', '"""w"""'], {'encoding': '"""utf8"""'}), "(Config.CONFIG_PATH, 'w', encoding='utf8')\n", (757, 799), False, 'import io\n'), ((824, 901), 'yaml.dump', 'yaml.dump', (['self.config', 'outfile'], {'default_flow_style': '(False)', 'allow_unicode': '(True)'}), '(self.config, outfile, default_flow_style=False, allow_unicode=True)\n', (833, 901), False, 'import yaml\n')]
|
import os
# from catalogue.models import MediaUpload
def get_upload_media_name(instance, filename):
"""
Generic method to manage model media
- Use a uuid string to avoid name conflicts
- Added a 's' to media type to generate a plural folder
"""
dirname = "media/" + instance.media_type + "s/"
extension = os.path.splitext(filename)[1]
return dirname + str(instance.media_key) + extension
|
[
"os.path.splitext"
] |
[((336, 362), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (352, 362), False, 'import os\n')]
|
from __future__ import absolute_import
from sentry.api.serializers import Serializer, register, serialize
from sentry.models import Activity
@register(Activity)
class ActivitySerializer(Serializer):
def serialize(self, obj, attrs, user):
d = {
'id': str(obj.id),
'user': serialize(obj.user),
'type': obj.get_type_display(),
'data': obj.data,
'dateCreated': obj.datetime,
}
return d
|
[
"sentry.api.serializers.serialize",
"sentry.api.serializers.register"
] |
[((145, 163), 'sentry.api.serializers.register', 'register', (['Activity'], {}), '(Activity)\n', (153, 163), False, 'from sentry.api.serializers import Serializer, register, serialize\n'), ((310, 329), 'sentry.api.serializers.serialize', 'serialize', (['obj.user'], {}), '(obj.user)\n', (319, 329), False, 'from sentry.api.serializers import Serializer, register, serialize\n')]
|
from setuptools import setup, find_packages
NAME = 'linkml_model_enrichment'
DESCRIPTION = 'A Python library and set of command line utilities for exchanging Knowledge Graphs (KGs) that conform to or are aligned to the Biolink Model.'
URL = 'https://github.com/NCATS-Tangerine/linkml_model_enrichment'
AUTHOR = '<NAME>'
EMAIL = '<EMAIL>'
REQUIRES_PYTHON = '>=3.7.0'
VERSION = '1.0.0b0'
LICENSE = 'BSD'
with open("requirements.txt", "r") as FH:
REQUIREMENTS = FH.readlines()
EXTRAS = {}
setup(
name=NAME,
author=AUTHOR,
version=VERSION,
author_email=EMAIL,
python_requires=REQUIRES_PYTHON,
url=URL,
description=DESCRIPTION,
long_description=open('README.md').read(),
long_description_content_type='text/markdown',
license=LICENSE,
packages=find_packages(exclude=["*.tests", "*.tests.*", "tests.*", "tests"]),
package_data={'linkml_model_enrichment': ["config.yml"]},
keywords='linkml',
classifiers=[
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Bio-Informatics',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3'
],
install_requires=[r for r in REQUIREMENTS if not r.startswith("#")],
extras_require=EXTRAS,
include_package_data=True,
entry_points={
'console_scripts': ['annotate-enums=linkml_model_enrichment.annotators.enum_annotator:clickmain',
'tsv2linkml=linkml_model_enrichment.importers.csv_import_engine:tsv2model',
'tsvs2linkml=linkml_model_enrichment.importers.csv_import_engine:tsvs2model',
'rdf2linkml=linkml_model_enrichment.importers.rdf_instance_import_engine:rdf2model',
'owl2linkml=linkml_model_enrichment.importers.owl_import_engine:owl2model',
'dosdp2linkml=linkml_model_enrichment.importers.owl_import_engine:dosdp2model',
'jsondata2linkml=linkml_model_enrichment.importers.json_instance_import_engine:json2model',
'jsonschema2linkml=linkml_model_enrichment.importers.jsonschema_import_engine:jsonschema2model',
]
}
)
|
[
"setuptools.find_packages"
] |
[((791, 858), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['*.tests', '*.tests.*', 'tests.*', 'tests']"}), "(exclude=['*.tests', '*.tests.*', 'tests.*', 'tests'])\n", (804, 858), False, 'from setuptools import setup, find_packages\n')]
|
import asyncio
from userbot import CMD_HANDLER as cmd
from userbot import CMD_HELP, StartTime, bot
from userbot.utils import bash, edit_or_reply, zelda_cmd
hpx_thumb = "https://telegra.ph/file/6443a8f61a0194b065221.mp4"
@zelda_cmd(pattern="hcm (.*)")
async def amireallycuan(cuan):
user = await bot.get_me()
reply_message = await cuan.get_reply_message()
capt = str(cuan.pattern_match.group(1).split(" ", 1)[1])
link = str(cuan.pattern_match.group(1).split(" ", 2)[0])
capti = capt.replace(".", " ")
thumb = hpx_thumb
output = (
f"**{capt}**\n\n"
f"**LINK VIDEO**\n"
f"{link}\n\n"
f"-----------------------------------\n"
f"📍**JOIN :**\n"
"@HCMutualism\n@VIPLiveRecords"
)
if thumb:
try:
logo = thumb
await cuan.delete()
msg = await bot.send_file(cuan.chat_id, logo, caption=output)
await asyncio.sleep(300)
except BaseException:
await cuan.edit(
output + "\n\n ***Tidak Ada Thumbnail!"
"\nHarap balas ke gambar untuk dijadikan thumbnail Content.**"
)
else:
await edit_or_reply(cuan, output)
CMD_HELP.update(
{
"ch_hpx": f"**Plugin : **`Content CH`\
\n\n • **Syntax :** `{cmd}hcm` <Link> <Caption>\
\n • **Function : **Untuk membuat Content Pada Channel.\
"
}
)
|
[
"asyncio.sleep",
"userbot.utils.edit_or_reply",
"userbot.bot.send_file",
"userbot.CMD_HELP.update",
"userbot.bot.get_me",
"userbot.utils.zelda_cmd"
] |
[((224, 253), 'userbot.utils.zelda_cmd', 'zelda_cmd', ([], {'pattern': '"""hcm (.*)"""'}), "(pattern='hcm (.*)')\n", (233, 253), False, 'from userbot.utils import bash, edit_or_reply, zelda_cmd\n'), ((1229, 1422), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'ch_hpx\':\n f"""**Plugin : **`Content CH` \n\n • **Syntax :** `{cmd}hcm` <Link> <Caption> \n • **Function : **Untuk membuat Content Pada Channel. """\n }'], {}), '({\'ch_hpx\':\n f"""**Plugin : **`Content CH` \n\n • **Syntax :** `{cmd}hcm` <Link> <Caption> \n • **Function : **Untuk membuat Content Pada Channel. """\n })\n', (1244, 1422), False, 'from userbot import CMD_HELP, StartTime, bot\n'), ((302, 314), 'userbot.bot.get_me', 'bot.get_me', ([], {}), '()\n', (312, 314), False, 'from userbot import CMD_HELP, StartTime, bot\n'), ((1183, 1210), 'userbot.utils.edit_or_reply', 'edit_or_reply', (['cuan', 'output'], {}), '(cuan, output)\n', (1196, 1210), False, 'from userbot.utils import bash, edit_or_reply, zelda_cmd\n'), ((864, 913), 'userbot.bot.send_file', 'bot.send_file', (['cuan.chat_id', 'logo'], {'caption': 'output'}), '(cuan.chat_id, logo, caption=output)\n', (877, 913), False, 'from userbot import CMD_HELP, StartTime, bot\n'), ((932, 950), 'asyncio.sleep', 'asyncio.sleep', (['(300)'], {}), '(300)\n', (945, 950), False, 'import asyncio\n')]
|
from django.core.management.base import BaseCommand, CommandError
import os
import concurrent.futures
import time
class Command(BaseCommand):
help = 'Load all data from csv file to database'
command_list = ['load_land', 'load_building', 'load_place', 'load_item']
def handle(self, *args, **options):
start = time.time()
try:
self.load_data(self.command_list)
end = time.time()
print(f"Loaded completed in: {round(end-start, 4)}s")
except Exception as e:
raise CommandError(e)
def load_command(self, command):
os.system(f'./manage.py {command}')
def load_data(self, commands):
with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
executor.map(self.load_command, commands)
|
[
"django.core.management.base.CommandError",
"os.system",
"time.time"
] |
[((332, 343), 'time.time', 'time.time', ([], {}), '()\n', (341, 343), False, 'import time\n'), ((610, 645), 'os.system', 'os.system', (['f"""./manage.py {command}"""'], {}), "(f'./manage.py {command}')\n", (619, 645), False, 'import os\n'), ((421, 432), 'time.time', 'time.time', ([], {}), '()\n', (430, 432), False, 'import time\n'), ((548, 563), 'django.core.management.base.CommandError', 'CommandError', (['e'], {}), '(e)\n', (560, 563), False, 'from django.core.management.base import BaseCommand, CommandError\n')]
|
import json as stdlib_json # Don't conflict with `corehq.util.json`
from traceback import format_exception_only
from django.utils.functional import Promise
from .couch import get_document_or_404 # noqa: F401
from .view_utils import reverse # noqa: F401
def flatten_list(elements):
return [item for sublist in elements for item in sublist]
def flatten_non_iterable_list(elements):
# actually iterate over the list and ensure element to avoid conversion of strings to chars
# ['abc'] => ['a', 'b', 'c']
items = []
for element in elements:
if isinstance(element, list):
items.extend(flatten_non_iterable_list(element))
else:
items.append(element)
return items
def eval_lazy(value):
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def cmp(a, b):
"""Comparison function for Python 3
https://stackoverflow.com/a/22490617/10840
"""
return (a > b) - (a < b)
def as_text(value):
"""Safely convert object to text"""
if isinstance(value, str):
return value
if isinstance(value, bytes):
return value.decode("utf8", errors="backslashreplace")
if isinstance(value, BaseException):
lines = format_exception_only(type(value), value)
return "\n".join(x.rstrip("\n") for x in lines)
return repr(value)
def as_json_text(value):
if value is None:
return ''
if isinstance(value, dict):
try:
return stdlib_json.dumps(value, indent=2)
except TypeError:
pass
return as_text(value)
|
[
"json.dumps"
] |
[((1506, 1540), 'json.dumps', 'stdlib_json.dumps', (['value'], {'indent': '(2)'}), '(value, indent=2)\n', (1523, 1540), True, 'import json as stdlib_json\n')]
|
import torch
import torch.nn as nn
from models.layers_384 import Conv, Hourglass, Residual
class Convert(nn.Module):
def __init__(self, in_channel, out_channel):
super(Convert, self).__init__()
self.conv = nn.Conv2d(in_channel, out_channel, 1)
def forward(self, x):
return self.conv(x)
class PoseNet(nn.Module):
def __init__(self, nstack=8, layer=4, in_channel=256, out_channel=4, increase=0):
super(PoseNet, self).__init__()
self.nstack = nstack
self.pre = nn.Sequential(
Conv(3, 64, 7, 2, bn=True, relu=True),
Residual(64, 128),
nn.MaxPool2d(2, 2),
Residual(128, 128),
Residual(128, in_channel)
)
self.hourglass = nn.ModuleList([nn.Sequential(
Hourglass(layer, in_channel, inc=increase)) for _ in range(nstack)])
self.feature = nn.ModuleList([nn.Sequential(Residual(in_channel, in_channel), Conv(
in_channel, in_channel, 1, bn=True, relu=True)) for _ in range(nstack)])
self.outs = nn.ModuleList(
[Conv(in_channel, out_channel, 1, bn=False, relu=False) for _ in range(nstack)])
self.merge_feature = nn.ModuleList(
[Convert(in_channel, in_channel) for _ in range(nstack - 1)])
self.merge_pred = nn.ModuleList(
[Convert(out_channel, in_channel) for _ in range(nstack - 1)])
def forward(self, x):
x = self.pre(x)
heat_maps = []
for i in range(self.nstack):
hg = self.hourglass[i](x)
feature = self.feature[i](hg)
pred = self.outs[i](feature)
heat_maps.append(pred)
if i < self.nstack - 1:
x = x + self.merge_pred[i](pred) + \
self.merge_feature[i](feature)
return heat_maps
|
[
"models.layers_384.Conv",
"torch.nn.Conv2d",
"models.layers_384.Hourglass",
"models.layers_384.Residual",
"torch.nn.MaxPool2d"
] |
[((228, 265), 'torch.nn.Conv2d', 'nn.Conv2d', (['in_channel', 'out_channel', '(1)'], {}), '(in_channel, out_channel, 1)\n', (237, 265), True, 'import torch.nn as nn\n'), ((550, 587), 'models.layers_384.Conv', 'Conv', (['(3)', '(64)', '(7)', '(2)'], {'bn': '(True)', 'relu': '(True)'}), '(3, 64, 7, 2, bn=True, relu=True)\n', (554, 587), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((601, 618), 'models.layers_384.Residual', 'Residual', (['(64)', '(128)'], {}), '(64, 128)\n', (609, 618), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((632, 650), 'torch.nn.MaxPool2d', 'nn.MaxPool2d', (['(2)', '(2)'], {}), '(2, 2)\n', (644, 650), True, 'import torch.nn as nn\n'), ((664, 682), 'models.layers_384.Residual', 'Residual', (['(128)', '(128)'], {}), '(128, 128)\n', (672, 682), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((696, 721), 'models.layers_384.Residual', 'Residual', (['(128)', 'in_channel'], {}), '(128, in_channel)\n', (704, 721), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((1093, 1147), 'models.layers_384.Conv', 'Conv', (['in_channel', 'out_channel', '(1)'], {'bn': '(False)', 'relu': '(False)'}), '(in_channel, out_channel, 1, bn=False, relu=False)\n', (1097, 1147), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((799, 841), 'models.layers_384.Hourglass', 'Hourglass', (['layer', 'in_channel'], {'inc': 'increase'}), '(layer, in_channel, inc=increase)\n', (808, 841), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((920, 952), 'models.layers_384.Residual', 'Residual', (['in_channel', 'in_channel'], {}), '(in_channel, in_channel)\n', (928, 952), False, 'from models.layers_384 import Conv, Hourglass, Residual\n'), ((954, 1005), 'models.layers_384.Conv', 'Conv', (['in_channel', 'in_channel', '(1)'], {'bn': '(True)', 'relu': '(True)'}), '(in_channel, in_channel, 1, bn=True, relu=True)\n', (958, 1005), False, 'from models.layers_384 import Conv, Hourglass, Residual\n')]
|
from html.parser import HTMLParser
from bs4 import BeautifulSoup
import csv
data = []
class MyHTMLParser(HTMLParser):
def handle_starttag(self, tag, attrs):
print("Start tag:", tag)
for attr in attrs:
print(" attr:", attr)
def handle_endtag(self, tag):
print("End tag :", tag)
def handle_data(self, data):
print("Data :", data)
with open('sample.html') as f:
read_data = f.read()
soup = BeautifulSoup(read_data, 'html.parser')
for span in soup.find_all('li'):
if (span.header == None): continue
date = span.header.find_all('div', class_="date")[0].contents[0]
price = span.header.find_all('div', class_="amount")[0].find_all('strong')[0].contents[0].strip()
who = span.header.find_all('div', class_="description")[0].find_all('span', class_="label")[0].contents[0]
dataRow = dict()
price = float(price.replace(',', '.').replace(' ', '').replace(u'\xa0', u''))
dataRow['Date'] = date
dataRow['Payee'] = who
dataRow['Memo'] = ''
if price < 0:
dataRow['Outflow'] = abs(price)
dataRow['Inflow'] = ''
else:
dataRow['Inflow'] = abs(price)
dataRow['Outflow'] = ''
data.append(dataRow)
with open('mbank_html.csv', 'w') as csvfile:
fieldnames = ['Date', 'Payee', 'Memo', 'Outflow', 'Inflow']
spamwriter = csv.DictWriter(csvfile, fieldnames=fieldnames)
spamwriter.writeheader()
spamwriter.writerows(data)
|
[
"bs4.BeautifulSoup",
"csv.DictWriter"
] |
[((457, 496), 'bs4.BeautifulSoup', 'BeautifulSoup', (['read_data', '"""html.parser"""'], {}), "(read_data, 'html.parser')\n", (470, 496), False, 'from bs4 import BeautifulSoup\n'), ((1363, 1409), 'csv.DictWriter', 'csv.DictWriter', (['csvfile'], {'fieldnames': 'fieldnames'}), '(csvfile, fieldnames=fieldnames)\n', (1377, 1409), False, 'import csv\n')]
|
from datetime import datetime
from dataclasses import dataclass
from marshmallow import Schema, fields, post_load
class LogMessageCommandType:
INBOUND_SMS = "INBOUND_SMS"
STATUS_UPDATE = "STATUS_UPDATE"
OUTBOUND_SMS = "OUTBOUND_SMS"
@dataclass
class LogMessageCommand:
command_type: str
payload: dict
approximate_arrival: datetime
class LogMessageCommandSchema(Schema):
command_type = fields.Str(required=True)
payload = fields.Dict(required=True)
approximate_arrival = fields.DateTime(required=True)
@post_load
def make_sms(self, data, **kwargs):
return LogMessageCommand(**data)
|
[
"marshmallow.fields.Str",
"marshmallow.fields.DateTime",
"marshmallow.fields.Dict"
] |
[((419, 444), 'marshmallow.fields.Str', 'fields.Str', ([], {'required': '(True)'}), '(required=True)\n', (429, 444), False, 'from marshmallow import Schema, fields, post_load\n'), ((459, 485), 'marshmallow.fields.Dict', 'fields.Dict', ([], {'required': '(True)'}), '(required=True)\n', (470, 485), False, 'from marshmallow import Schema, fields, post_load\n'), ((512, 542), 'marshmallow.fields.DateTime', 'fields.DateTime', ([], {'required': '(True)'}), '(required=True)\n', (527, 542), False, 'from marshmallow import Schema, fields, post_load\n')]
|
from pathlib import Path
from fastapi.testclient import TestClient
import deciphon_api.data as data
from deciphon_api.main import settings
def _upload(
client: TestClient, file_type: str, file_field: str, path: Path, with_api_key=True
):
api_prefix = settings.api_prefix
api_key = settings.api_key
if with_api_key:
headers = {"X-API-Key": f"{api_key}"}
else:
headers = {}
return client.post(
f"{api_prefix}/{file_type}/",
files={
file_field: (
path.name,
open(path, "rb"),
"application/octet-stream",
)
},
headers=headers,
)
def upload_minifam_hmm(client: TestClient, with_api_key=True):
minifam_hmm = data.filepath(data.FileName.minifam_hmm)
return _upload(client, "hmms", "hmm_file", minifam_hmm, with_api_key)
def upload_minifam_db(client: TestClient):
minifam_dcp = data.filepath(data.FileName.minifam_db)
return _upload(client, "dbs", "db_file", minifam_dcp)
def upload_minifam(client: TestClient):
response = upload_minifam_hmm(client)
assert response.status_code == 201
response = upload_minifam_db(client)
assert response.status_code == 201
def upload_pfam1_hmm(client: TestClient):
pfam1_hmm = data.filepath(data.FileName.pfam1_hmm)
return _upload(client, "hmms", "hmm_file", pfam1_hmm)
def upload_pfam1_db(client: TestClient):
pfam1_dcp = data.filepath(data.FileName.pfam1_db)
return _upload(client, "dbs", "db_file", pfam1_dcp)
def upload_pfam1(client: TestClient):
response = upload_pfam1_hmm(client)
assert response.status_code == 201
response = upload_pfam1_db(client)
assert response.status_code == 201
|
[
"deciphon_api.data.filepath"
] |
[((760, 800), 'deciphon_api.data.filepath', 'data.filepath', (['data.FileName.minifam_hmm'], {}), '(data.FileName.minifam_hmm)\n', (773, 800), True, 'import deciphon_api.data as data\n'), ((938, 977), 'deciphon_api.data.filepath', 'data.filepath', (['data.FileName.minifam_db'], {}), '(data.FileName.minifam_db)\n', (951, 977), True, 'import deciphon_api.data as data\n'), ((1299, 1337), 'deciphon_api.data.filepath', 'data.filepath', (['data.FileName.pfam1_hmm'], {}), '(data.FileName.pfam1_hmm)\n', (1312, 1337), True, 'import deciphon_api.data as data\n'), ((1455, 1492), 'deciphon_api.data.filepath', 'data.filepath', (['data.FileName.pfam1_db'], {}), '(data.FileName.pfam1_db)\n', (1468, 1492), True, 'import deciphon_api.data as data\n')]
|
import json
import os
from django.conf import settings
exam_data_path = os.path.join(settings.PROJECT_ROOT, "train\\tests.json")
exams_college = json.loads(open(exam_data_path).read())
college_data_path = os.path.join(
settings.PROJECT_ROOT, "train\\orignal_data.json")
college_data = json.loads(open(college_data_path).read())
def colleges_list_exam_india(exam_name):
return exams_college[exam_name]
def complete_detail(college_name):
return [i for i in college_data if i['institute_name'].title() == college_name.title()]
|
[
"os.path.join"
] |
[((74, 130), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '"""train\\\\tests.json"""'], {}), "(settings.PROJECT_ROOT, 'train\\\\tests.json')\n", (86, 130), False, 'import os\n'), ((209, 272), 'os.path.join', 'os.path.join', (['settings.PROJECT_ROOT', '"""train\\\\orignal_data.json"""'], {}), "(settings.PROJECT_ROOT, 'train\\\\orignal_data.json')\n", (221, 272), False, 'import os\n')]
|
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import click
import nnabla as nn
import nnabla.functions as F
from layers import *
class ResidualBlock(object):
def __init__(self,
out_channels=None,
scale_shift_norm=False,
dropout=0,
conv_shortcut=False):
self.out_channels = out_channels
self.scale_shift_norm = scale_shift_norm
self.dropout = dropout
self.conv_shortcut = conv_shortcut
def in_layers(self, x):
h = normalize(x, name='norm_in')
h = nonlinearity(h)
h = conv(h, self.out_channels, name='conv_in')
return h
def emb_layers(self, emb):
out_channels = self.out_channels
if self.scale_shift_norm:
out_channels *= 2
return nin(emb, out_channels, name="emb_proj")
def out_layers(self, h, emb):
if self.scale_shift_norm:
scale, shift = chunk(emb, num_chunk=2, axis=1)
h = normalize(h, name="norm_out") * (scale + 1) + shift
else:
h += emb
h = normalize(h, name="norm_out")
h = nonlinearity(h)
if self.dropout > 0:
h = F.dropout(h, p=self.dropout)
h = conv(h, self.out_channels, name="conv_out", zeroing_w=True)
return h
def shortcut(self, x):
if self.out_channels == x.shape[1]:
return x
elif self.conv_shortcut:
return conv(x, self.out_channels, name="conv_shortcut")
else:
return nin(x, self.out_channels, name="conv_shortcut")
def __call__(self, x, temb, name):
C = x.shape[1]
if self.out_channels is None:
self.out_channels = C
with nn.parameter_scope(name):
# first block
h = self.in_layers(x)
# embed
emb = self.emb_layers(temb)
# second block
h = self.out_layers(h, emb)
# add residual
out = F.add2(h, self.shortcut(x), inplace=True)
return out
def attn_block(x, name, num_heads=4, fix_parameters=False):
"""Multihead attention block"""
B, C, H, W = x.shape
with nn.parameter_scope(name):
# Get query, key, value
h = normalize(x, name="norm")
# nin(3 * C) -> split is faster?
q = nin(h, C, name="q")
k = nin(h, C, name="k")
v = nin(h, C, name="v")
# Attention
w = F.batch_matmul(F.reshape(q, (B * num_heads, -1, H * W)),
F.reshape(k, (B * num_heads, -1, H * W)), transpose_a=True)
w = F.mul_scalar(w, int(C) ** (-0.5), inplace=True)
assert w.shape == (B * num_heads, H * W, H * W)
w = F.softmax(w, axis=-1)
h = F.reshape(v, (B * num_heads, -1, H * W))
h = F.batch_matmul(h, w)
h = F.reshape(h, (B, C, H, W))
# output projection
h = nin(h, C, name='proj_out', zeroing_w=True)
assert h.shape == x.shape
return F.add2(h, x, inplace=True)
class UNet(object):
def __init__(self,
num_classes,
model_channels,
output_channels,
num_res_blocks,
attention_resolutions,
attention_num_heads,
channel_mult=(1, 2, 4, 8),
dropout=0.,
scale_shift_norm=False,
conv_resample=True):
self.num_classes = num_classes
self.model_channels = model_channels
self.output_channels = output_channels
self.num_res_blocks = num_res_blocks
self.attention_resolutions = attention_resolutions
self.attention_num_heads = attention_num_heads
self.channel_mult = channel_mult
self.dropout = dropout
self.scale_shift_norm = scale_shift_norm
self.conv_resample = conv_resample
def timestep_embedding(self, t):
with nn.parameter_scope('timestep_embedding'):
# sinusoidal embedding
emb = sinusoidal_embedding(t, self.model_channels)
# reshape to use conv rather than affine
emb = F.reshape(emb, emb.shape + (1, 1))
# linear transforms
emb = nin(emb, self.model_channels * 4, name='dense0')
emb = nonlinearity(emb)
emb = nin(emb, self.model_channels * 4, name='dense1')
return emb
def resblock_with_attention(self, h, emb, out_channels, name):
with nn.parameter_scope(name):
block = ResidualBlock(out_channels,
scale_shift_norm=self.scale_shift_norm,
dropout=self.dropout)
h = block(h, emb, f"res_block")
res = h.shape[-1]
if self.attention_resolutions is not None and res in self.attention_resolutions:
h = attn_block(h, f"attention",
num_heads=self.attention_num_heads)
return h
def downsample_blocks(self, h, emb, out_channels, name):
hs = []
with nn.parameter_scope(name):
for i in range(self.num_res_blocks):
h = self.resblock_with_attention(
h, emb, out_channels, name=f"resblock_{i}")
hs.append(h)
return hs
def upsample_blocks(self, h, emb, hs_down, out_channels, name):
hs_up = []
with nn.parameter_scope(name):
for i in range(self.num_res_blocks + 1):
# concat skip
h = F.concatenate(h, hs_down.pop(), axis=1)
h = self.resblock_with_attention(
h, emb, out_channels, name=f"resblock_{i}")
hs_up.append(h)
return hs_up
def middle_block(self, h, emb):
ch = h.shape[1]
block = ResidualBlock(
ch, scale_shift_norm=self.scale_shift_norm, dropout=self.dropout)
h = block(h, emb, name="resblock_0")
res = h.shape[-1]
if self.attention_resolutions is not None and res in self.attention_resolutions:
h = attn_block(h, f"attention", num_heads=self.attention_num_heads)
h = block(h, emb, name="resblock_1")
return h
def output_block(self, h):
h = normalize(h, "last_norm")
h = nonlinearity(h)
h = conv(h, self.output_channels, name="last_conv", zeroing_w=True)
return h
def get_intermediates(self, x, t, name=None):
ret = dict()
with nn.auto_forward(True):
ch = self.model_channels
with nn.parameter_scope('UNet' if name is None else name):
h = conv(x, ch, name="first_conv")
emb = self.timestep_embedding(t)
ret["emb"] = emb
emb = nonlinearity(emb)
hs = [h]
# downsample block
with nn.parameter_scope("downsample_block"):
for level, mult in enumerate(self.channel_mult):
# apply resblock and attention for this resolution
outs = self.downsample_blocks(h, emb, ch * mult,
name=f"block_{level}")
hs += outs
h = outs[-1]
# downsample to lower resolution except last
if level < len(self.channel_mult) - 1:
h = downsample(h, name=f"downsample_{level}",
with_conv=self.conv_resample)
hs.append(h)
ret["down"] = hs.copy()
# middle block
with nn.parameter_scope("middle_block"):
h = self.middle_block(h, emb)
ret["middle"] = h
# upsample block
hs_up = []
with nn.parameter_scope("upsample_block"):
for level, mult in enumerate(reversed(self.channel_mult)):
# apply resblock and attention for this resolution
outs = self.upsample_blocks(h, emb, hs, ch * mult,
name=f"output_{level}")
h = outs[-1]
# downsample to lower resolution except last
if level < len(self.channel_mult) - 1:
h = upsample(h, name=f"upsample_{level}",
with_conv=self.conv_resample)
outs.pop()
outs.append(h)
hs_up += outs
assert len(hs) == 0
ret["up"] = hs_up.copy()
# output block
with nn.parameter_scope("output_block"):
out = self.output_block(h)
ret["out"] = out
assert out.shape == x.shape[:1] + \
(self.output_channels, ) + x.shape[2:]
return ret
def __call__(self, x, t, name=None):
ch = self.model_channels
with nn.parameter_scope('UNet' if name is None else name):
h = conv(x, ch, name="first_conv")
emb = self.timestep_embedding(t)
emb = nonlinearity(emb)
hs = [h]
# downsample block
with nn.parameter_scope("downsample_block"):
for level, mult in enumerate(self.channel_mult):
# apply resblock and attention for this resolution
outs = self.downsample_blocks(h, emb, ch * mult,
name=f"block_{level}")
hs += outs
h = outs[-1]
# downsample to lower resolution except last
if level < len(self.channel_mult) - 1:
h = downsample(h, name=f"downsample_{level}",
with_conv=self.conv_resample)
hs.append(h)
# middle block
with nn.parameter_scope("middle_block"):
h = self.middle_block(h, emb)
# upsample block
with nn.parameter_scope("upsample_block"):
for level, mult in enumerate(reversed(self.channel_mult)):
# apply resblock and attention for this resolution
outs = self.upsample_blocks(h, emb, hs, ch * mult,
name=f"output_{level}")
h = outs[-1]
# downsample to lower resolution except last
if level < len(self.channel_mult) - 1:
h = upsample(h, name=f"upsample_{level}",
with_conv=self.conv_resample)
assert len(hs) == 0
# output block
with nn.parameter_scope("output_block"):
out = self.output_block(h)
assert out.shape == x.shape[:1] + \
(self.output_channels, ) + x.shape[2:]
return out
# Functions below are for dubugging UNet class.
def test_simple_loop():
nn.clear_parameters()
x = nn.Variable.from_numpy_array(np.random.randn(10, 3, 128, 128))
t = nn.Variable.from_numpy_array(np.random.randint(0, 100, (10, )))
unet = UNet(num_classes=1, model_channels=128, output_channels=3,
num_res_blocks=2,
attention_resolutions=(16, 8),
attention_num_heads=4,
channel_mult=(1, 1, 2, 2, 4, 4))
y = unet(x, t)
loss = F.mean(F.squared_error(y, x))
import nnabla.solvers as S
solver = S.Sgd()
solver.set_parameters(nn.get_parameters())
from tqdm import trange
tr = trange(100)
for i in tr:
loss.forward(clear_no_need_grad=True)
solver.zero_grad()
loss.backward(clear_buffer=True)
solver.update()
tr.set_description(f"diff: {loss.d.copy():.5f}")
def test_intermediate():
import os
os.environ["NNABLA_CUDNN_DETERMINISTIC"] = '1'
nn.clear_parameters()
x = nn.Variable.from_numpy_array(np.full((1, 3, 256, 256), 0.1))
t = nn.Variable.from_numpy_array([803])
unet = UNet(num_classes=1, model_channels=128, output_channels=3,
num_res_blocks=3,
attention_resolutions=(16, 8),
attention_num_heads=4,
channel_mult=(1, 1, 2, 2, 4, 4))
res = unet.get_intermediates(x, t)
print("[emb]")
dump(res["emb"])
print("")
print("[down]")
dump(res["down"])
print("")
print("[middle]")
dump(res["middle"])
print("")
print("[up]")
dump(res["up"])
print("")
print("[out]")
dump(res["out"])
print("")
def dump(var):
if isinstance(var, list):
for x in var:
dump(x)
return
arr = var.d
mean = arr.mean()
std = arr.std()
abs_sum = np.abs(arr).sum()
print("mean: {:-6.1g} std: {:-6.1g} abs_sum: {:-6.1g} size: {}".format(mean,
std, abs_sum, arr.size))
@click.command()
@click.option("--loop/--no-loop", default=True)
@click.option("--intermediate/--no-intermediate", default=False)
def test(loop, intermediate):
# This function is for a unit test of UNet.
from nnabla.ext_utils import get_extension_context
ctx = get_extension_context("cudnn")
nn.set_default_context(ctx)
from nnabla.logger import logger
if loop:
logger.info("Test Unet by simple training loop.")
test_simple_loop()
if intermediate:
logger.info("Test intermediate values of Unet.")
test_intermediate()
if __name__ == "__main__":
test()
|
[
"numpy.abs",
"nnabla.ext_utils.get_extension_context",
"nnabla.Variable.from_numpy_array",
"click.option",
"nnabla.get_parameters",
"numpy.random.randint",
"nnabla.functions.add2",
"nnabla.functions.dropout",
"numpy.full",
"nnabla.logger.logger.info",
"numpy.random.randn",
"nnabla.solvers.Sgd",
"click.command",
"nnabla.functions.reshape",
"nnabla.functions.squared_error",
"nnabla.functions.batch_matmul",
"tqdm.trange",
"nnabla.auto_forward",
"nnabla.set_default_context",
"nnabla.functions.softmax",
"nnabla.parameter_scope",
"nnabla.clear_parameters"
] |
[((13790, 13805), 'click.command', 'click.command', ([], {}), '()\n', (13803, 13805), False, 'import click\n'), ((13807, 13853), 'click.option', 'click.option', (['"""--loop/--no-loop"""'], {'default': '(True)'}), "('--loop/--no-loop', default=True)\n", (13819, 13853), False, 'import click\n'), ((13855, 13918), 'click.option', 'click.option', (['"""--intermediate/--no-intermediate"""'], {'default': '(False)'}), "('--intermediate/--no-intermediate', default=False)\n", (13867, 13918), False, 'import click\n'), ((3583, 3609), 'nnabla.functions.add2', 'F.add2', (['h', 'x'], {'inplace': '(True)'}), '(h, x, inplace=True)\n', (3589, 3609), True, 'import nnabla.functions as F\n'), ((11851, 11872), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (11870, 11872), True, 'import nnabla as nn\n'), ((12363, 12370), 'nnabla.solvers.Sgd', 'S.Sgd', ([], {}), '()\n', (12368, 12370), True, 'import nnabla.solvers as S\n'), ((12456, 12467), 'tqdm.trange', 'trange', (['(100)'], {}), '(100)\n', (12462, 12467), False, 'from tqdm import trange\n'), ((12779, 12800), 'nnabla.clear_parameters', 'nn.clear_parameters', ([], {}), '()\n', (12798, 12800), True, 'import nnabla as nn\n'), ((12879, 12914), 'nnabla.Variable.from_numpy_array', 'nn.Variable.from_numpy_array', (['[803]'], {}), '([803])\n', (12907, 12914), True, 'import nnabla as nn\n'), ((14062, 14092), 'nnabla.ext_utils.get_extension_context', 'get_extension_context', (['"""cudnn"""'], {}), "('cudnn')\n", (14083, 14092), False, 'from nnabla.ext_utils import get_extension_context\n'), ((14097, 14124), 'nnabla.set_default_context', 'nn.set_default_context', (['ctx'], {}), '(ctx)\n', (14119, 14124), True, 'import nnabla as nn\n'), ((2770, 2794), 'nnabla.parameter_scope', 'nn.parameter_scope', (['name'], {}), '(name)\n', (2788, 2794), True, 'import nnabla as nn\n'), ((3309, 3330), 'nnabla.functions.softmax', 'F.softmax', (['w'], {'axis': '(-1)'}), '(w, axis=-1)\n', (3318, 3330), True, 'import nnabla.functions as F\n'), ((3344, 3384), 'nnabla.functions.reshape', 'F.reshape', (['v', '(B * num_heads, -1, H * W)'], {}), '(v, (B * num_heads, -1, H * W))\n', (3353, 3384), True, 'import nnabla.functions as F\n'), ((3397, 3417), 'nnabla.functions.batch_matmul', 'F.batch_matmul', (['h', 'w'], {}), '(h, w)\n', (3411, 3417), True, 'import nnabla.functions as F\n'), ((3430, 3456), 'nnabla.functions.reshape', 'F.reshape', (['h', '(B, C, H, W)'], {}), '(h, (B, C, H, W))\n', (3439, 3456), True, 'import nnabla.functions as F\n'), ((11911, 11943), 'numpy.random.randn', 'np.random.randn', (['(10)', '(3)', '(128)', '(128)'], {}), '(10, 3, 128, 128)\n', (11926, 11943), True, 'import numpy as np\n'), ((11982, 12014), 'numpy.random.randint', 'np.random.randint', (['(0)', '(100)', '(10,)'], {}), '(0, 100, (10,))\n', (11999, 12014), True, 'import numpy as np\n'), ((12295, 12316), 'nnabla.functions.squared_error', 'F.squared_error', (['y', 'x'], {}), '(y, x)\n', (12310, 12316), True, 'import nnabla.functions as F\n'), ((12397, 12416), 'nnabla.get_parameters', 'nn.get_parameters', ([], {}), '()\n', (12414, 12416), True, 'import nnabla as nn\n'), ((12839, 12869), 'numpy.full', 'np.full', (['(1, 3, 256, 256)', '(0.1)'], {}), '((1, 3, 256, 256), 0.1)\n', (12846, 12869), True, 'import numpy as np\n'), ((14185, 14234), 'nnabla.logger.logger.info', 'logger.info', (['"""Test Unet by simple training loop."""'], {}), "('Test Unet by simple training loop.')\n", (14196, 14234), False, 'from nnabla.logger import logger\n'), ((14292, 14340), 'nnabla.logger.logger.info', 'logger.info', (['"""Test intermediate values of Unet."""'], {}), "('Test intermediate values of Unet.')\n", (14303, 14340), False, 'from nnabla.logger import logger\n'), ((1770, 1798), 'nnabla.functions.dropout', 'F.dropout', (['h'], {'p': 'self.dropout'}), '(h, p=self.dropout)\n', (1779, 1798), True, 'import nnabla.functions as F\n'), ((2314, 2338), 'nnabla.parameter_scope', 'nn.parameter_scope', (['name'], {}), '(name)\n', (2332, 2338), True, 'import nnabla as nn\n'), ((3051, 3091), 'nnabla.functions.reshape', 'F.reshape', (['q', '(B * num_heads, -1, H * W)'], {}), '(q, (B * num_heads, -1, H * W))\n', (3060, 3091), True, 'import nnabla.functions as F\n'), ((3120, 3160), 'nnabla.functions.reshape', 'F.reshape', (['k', '(B * num_heads, -1, H * W)'], {}), '(k, (B * num_heads, -1, H * W))\n', (3129, 3160), True, 'import nnabla.functions as F\n'), ((4520, 4560), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""timestep_embedding"""'], {}), "('timestep_embedding')\n", (4538, 4560), True, 'import nnabla as nn\n'), ((4732, 4766), 'nnabla.functions.reshape', 'F.reshape', (['emb', '(emb.shape + (1, 1))'], {}), '(emb, emb.shape + (1, 1))\n', (4741, 4766), True, 'import nnabla.functions as F\n'), ((5072, 5096), 'nnabla.parameter_scope', 'nn.parameter_scope', (['name'], {}), '(name)\n', (5090, 5096), True, 'import nnabla as nn\n'), ((5669, 5693), 'nnabla.parameter_scope', 'nn.parameter_scope', (['name'], {}), '(name)\n', (5687, 5693), True, 'import nnabla as nn\n'), ((6007, 6031), 'nnabla.parameter_scope', 'nn.parameter_scope', (['name'], {}), '(name)\n', (6025, 6031), True, 'import nnabla as nn\n'), ((7098, 7119), 'nnabla.auto_forward', 'nn.auto_forward', (['(True)'], {}), '(True)\n', (7113, 7119), True, 'import nnabla as nn\n'), ((9760, 9812), 'nnabla.parameter_scope', 'nn.parameter_scope', (["('UNet' if name is None else name)"], {}), "('UNet' if name is None else name)\n", (9778, 9812), True, 'import nnabla as nn\n'), ((13653, 13664), 'numpy.abs', 'np.abs', (['arr'], {}), '(arr)\n', (13659, 13664), True, 'import numpy as np\n'), ((7175, 7227), 'nnabla.parameter_scope', 'nn.parameter_scope', (["('UNet' if name is None else name)"], {}), "('UNet' if name is None else name)\n", (7193, 7227), True, 'import nnabla as nn\n'), ((10012, 10050), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""downsample_block"""'], {}), "('downsample_block')\n", (10030, 10050), True, 'import nnabla as nn\n'), ((10740, 10774), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""middle_block"""'], {}), "('middle_block')\n", (10758, 10774), True, 'import nnabla as nn\n'), ((10869, 10905), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""upsample_block"""'], {}), "('upsample_block')\n", (10887, 10905), True, 'import nnabla as nn\n'), ((11565, 11599), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""output_block"""'], {}), "('output_block')\n", (11583, 11599), True, 'import nnabla as nn\n'), ((7485, 7523), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""downsample_block"""'], {}), "('downsample_block')\n", (7503, 7523), True, 'import nnabla as nn\n'), ((8306, 8340), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""middle_block"""'], {}), "('middle_block')\n", (8324, 8340), True, 'import nnabla as nn\n'), ((8509, 8545), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""upsample_block"""'], {}), "('upsample_block')\n", (8527, 8545), True, 'import nnabla as nn\n'), ((9415, 9449), 'nnabla.parameter_scope', 'nn.parameter_scope', (['"""output_block"""'], {}), "('output_block')\n", (9433, 9449), True, 'import nnabla as nn\n')]
|
# Copyright (c) 2017-2020 Neogeo-Technologies.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.conf import settings
from django.contrib.auth.decorators import login_required
# from django.db import transaction
# from django.http import Http404
# from django.shortcuts import get_object_or_404
# from django.shortcuts import redirect
# from django.urls import reverse
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from django.views import View
# from idgo_admin.exceptions import ExceptionsHandler
# from idgo_admin.exceptions import ProfileHttp404
from idgo_admin.models import Dataset
from idgo_admin.shortcuts import get_object_or_404_extended
# from idgo_admin.shortcuts import on_profile_http404
from idgo_admin.shortcuts import render_with_info_profile
from idgo_admin.shortcuts import user_and_profile
# from idgo_admin.views.dataset import target as datasets_target
# from idgo_resource import logger
# from idgo_resource.models import Resource
# CKAN_URL = settings.CKAN_URL
#
# FTP_DIR = settings.FTP_DIR
# try:
# FTP_UPLOADS_DIR = settings.FTP_UPLOADS_DIR
# except AttributeError:
# FTP_UPLOADS_DIR = 'uploads'
RESOURCE_APPS = settings.RESOURCE_APPS
decorators = [csrf_exempt, login_required(login_url=settings.LOGIN_URL)]
@method_decorator(decorators, name='dispatch')
class NewResource(View):
def get(self, request, dataset_id=None, *args, **kwargs):
user, profile = user_and_profile(request)
dataset = get_object_or_404_extended(Dataset, user, include={'id': dataset_id})
context = {
'dataset': dataset,
}
return render_with_info_profile(request, 'resource/new.html', context)
# @method_decorator(decorators, name='dispatch')
# class ResourceManager(View):
# template = 'resource/resource.html'
#
# def get_context(self, form, user, dataset, resource=None):
# return {
# 'target': datasets_target(dataset, user),
# 'dataset': dataset,
# 'resource': resource,
# 'form': form,
# }
#
# def resource_router(self, dataset_id, app, instance_id=None, action='create'):
# from django.apps import apps
# from django.urls.exceptions import NoReverseMatch
# if apps.is_installed(app):
# url_pattern = "{0}:resource-{0}-{1}".format(app, action)
# kvp = {'dataset_id': dataset_id}
# if action == 'update':
# kvp['pk'] = instance_id
# return reverse(url_pattern, kwargs=kvp)
# else:
# raise NoReverseMatch
#
# @ExceptionsHandler(actions={ProfileHttp404: on_profile_http404})
# def get(self, request, dataset_id=None, *args, **kwargs):
#
# user, profile = user_and_profile(request)
#
# dataset = get_object_or_404_extended(
# Dataset, user, include={'id': dataset_id})
# # redirect to resource children app:
# app = request.POST.get('app', request.GET.get('app'))
# if app:
# return redirect(self.resource_router(dataset_id, app, action='create'))
# # Redirect to layer
# # _resource = request.GET.get('resource')
# # _layer = request.GET.get('layer')
# # if _resource and _layer:
# # return redirect(
# # reverse('idgo_admin:layer_editor', kwargs={
# # 'dataset_id': dataset.id,
# # 'resource_id': _resource,
# # 'layer_id': _layer}))
# #
# # resource = None
# # id = request.GET.get('id')
# # if id:
# # include = {'id': id, 'dataset_id': dataset.id}
# # resource = get_object_or_404_extended(Resource, user, include=include)
#
# # form = Form(instance=resource)
# context = self.get_context(None, user, dataset, resource=resource)
# return render_with_info_profile(request, self.template, context)
#
# @ExceptionsHandler(ignore=[Http404], actions={ProfileHttp404: on_profile_http404})
# @transaction.atomic
# def post(self, request, dataset_id=None, *args, **kwargs):
# user, profile = user_and_profile(request)
# dataset = get_object_or_404_extended(
# Dataset, user, include={'id': dataset_id})
# form = Form(request.POST, request.FILES)
# if form.is_valid():
#
# try:
# resource = form.save()
# except Exception:
# logger.excption('ResourceManager:post')
# else:
# app = request.POST.get('app')
# return redirect(self.resource_router(dataset_id, app, action='create'))
#
# context = self.get_context(form, user, dataset, resource)
# return render_with_info_profile(request, self.template, context)
# @login_required(login_url=settings.LOGIN_URL)
# @csrf_exempt
# def resource(request, dataset_id=None, *args, **kwargs):
# user, profile = user_and_profile(request)
#
# id = request.GET.get('id', request.GET.get('ckan_id'))
# if not id:
# raise Http404()
#
# kvp = {}
# try:
# id = int(id)
# except ValueError:
# kvp['ckan_id'] = id
# else:
# kvp['id'] = id
# finally:
# resource = get_object_or_404(Resource, **kvp)
#
# # TODO:
# # return redirect(reverse('idgo_admin:resource_editor', kwargs={
# # 'dataset_id': resource.dataset.id, 'resource_id': resource.id}))
# return redirect(
# '{}?id={}'.format(
# reverse(
# 'resource:resource', kwargs={'dataset_id': resource.dataset.id}),
# resource.id))
|
[
"django.contrib.auth.decorators.login_required",
"idgo_admin.shortcuts.user_and_profile",
"idgo_admin.shortcuts.render_with_info_profile",
"django.utils.decorators.method_decorator",
"idgo_admin.shortcuts.get_object_or_404_extended"
] |
[((1844, 1889), 'django.utils.decorators.method_decorator', 'method_decorator', (['decorators'], {'name': '"""dispatch"""'}), "(decorators, name='dispatch')\n", (1860, 1889), False, 'from django.utils.decorators import method_decorator\n'), ((1795, 1839), 'django.contrib.auth.decorators.login_required', 'login_required', ([], {'login_url': 'settings.LOGIN_URL'}), '(login_url=settings.LOGIN_URL)\n', (1809, 1839), False, 'from django.contrib.auth.decorators import login_required\n'), ((2002, 2027), 'idgo_admin.shortcuts.user_and_profile', 'user_and_profile', (['request'], {}), '(request)\n', (2018, 2027), False, 'from idgo_admin.shortcuts import user_and_profile\n'), ((2046, 2115), 'idgo_admin.shortcuts.get_object_or_404_extended', 'get_object_or_404_extended', (['Dataset', 'user'], {'include': "{'id': dataset_id}"}), "(Dataset, user, include={'id': dataset_id})\n", (2072, 2115), False, 'from idgo_admin.shortcuts import get_object_or_404_extended\n'), ((2195, 2258), 'idgo_admin.shortcuts.render_with_info_profile', 'render_with_info_profile', (['request', '"""resource/new.html"""', 'context'], {}), "(request, 'resource/new.html', context)\n", (2219, 2258), False, 'from idgo_admin.shortcuts import render_with_info_profile\n')]
|
from unittest import TestCase
import unittest
from binary_tree import BinaryTree, BinaryTreeNode
class TestBinaryTree(TestCase):
def setUp(self):
self.root_value = "42"
self.some_value = "Gomu Gomu No!"
def test_instance(self):
# arrange/act
tree = BinaryTree()
# assert
self.assertIsInstance(tree, BinaryTree)
def test_insert_root(self):
# arrange
tree = BinaryTree()
# act
tree.insert(self.root_value)
# assert
self.assertIsInstance(tree.root, BinaryTreeNode)
self.assertEqual(tree.root.value, self.root_value)
def test_insert_first_root_child_in_level_order(self):
# arrange
tree = BinaryTree()
tree.insert(self.root_value)
# act
tree.insert(self.some_value)
# assert
self.assertIsInstance(tree.root.left, BinaryTreeNode)
self.assertEqual(tree.root.left.value, self.some_value)
def test_get_level_order_list(self):
# arrange
tree = BinaryTree()
tree.insert(self.root_value)
tree.insert(self.some_value)
tree.insert(f"{self.some_value}-2")
tree.insert(f"{self.some_value}-3")
tree.insert(f"{self.some_value}-4")
tree.insert(f"{self.some_value}-5")
expected = [
self.root_value,
self.some_value,
f"{self.some_value}-2",
f"{self.some_value}-3",
f"{self.some_value}-4",
f"{self.some_value}-5",
]
# act
level_ordered_list = tree.get_level_order_list()
# assert
self.assertListEqual(level_ordered_list, expected)
def test_find(self):
# arrange
tree = BinaryTree()
tree.insert(self.root_value)
tree.insert(self.some_value)
# act
node = tree.find(self.some_value)
# assert
self.assertIsInstance(node, BinaryTreeNode)
self.assertEqual(node.value, self.some_value)
def test_find(self):
# arrange
tree = BinaryTree()
tree.insert(self.root_value)
tree.insert(self.some_value)
# act
node = tree.find(self.some_value)
# assert
self.assertIsInstance(node, BinaryTreeNode)
self.assertEqual(node.value, self.some_value)
def test_delete(self):
# arrange
tree = BinaryTree()
tree.insert(self.root_value)
tree.insert(self.some_value)
tree.insert(f"{self.some_value}-2")
tree.insert(f"{self.some_value}-3")
tree.insert(f"{self.some_value}-4")
tree.insert(f"{self.some_value}-5")
expected = [
self.root_value,
self.some_value,
f"{self.some_value}-2",
f"{self.some_value}-4",
f"{self.some_value}-5"
]
# act
tree.delete(f"{self.some_value}-3")
# arrange
self.assertListEqual(tree.get_level_order_list(), expected)
if __name__ == "__main__":
unittest.main()
|
[
"unittest.main",
"binary_tree.BinaryTree"
] |
[((3037, 3052), 'unittest.main', 'unittest.main', ([], {}), '()\n', (3050, 3052), False, 'import unittest\n'), ((293, 305), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (303, 305), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((437, 449), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (447, 449), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((727, 739), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (737, 739), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((1046, 1058), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (1056, 1058), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((1748, 1760), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (1758, 1760), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((2073, 2085), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (2083, 2085), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n'), ((2400, 2412), 'binary_tree.BinaryTree', 'BinaryTree', ([], {}), '()\n', (2410, 2412), False, 'from binary_tree import BinaryTree, BinaryTreeNode\n')]
|
# author: <NAME>
# date: 2020-11-27
"""Load a csv / feather data file from a local input file and split into test and training data set and write to 2 separate local output files. The output file will be either a csv or a feather file format, which is determined by the extension.
Usage: src/cleanup_data.py --in_file=<in_file> --out_training_file=<out_training_file> --out_test_file=<out_test_file> [--random_state=<random_state>] [--test_size=<test_size>]
Options:
--in_file=<in_file> The path and the filename and the extension where we want to load from our disk
--out_training_file=<out_training_file> The path and the filename and the extension where we want to save the training data file in our disk
--out_test_file=<out_test_file> The path and the filename and the extension where we want to save the test data file in our disk
--random_state=<random_state> The random state that we want to use for splitting. [default: 2020]
--test_size=<test_size> The percentage of testing data split from the original dataframe [default: 0.25]
"""
import os
import pandas as pd
from sklearn.model_selection import train_test_split
from docopt import docopt
import feather
opt = docopt(__doc__)
def main(in_file, out_training_file, out_test_file, random_state, test_size):
print("Start cleanup script")
# Step 1: Read the data into Pandas data frame
in_extension = in_file[in_file.rindex(".")+1:]
print("Read in the file:", in_file)
if in_extension == "csv":
input = pd.read_csv(in_file)
elif in_extension == "feather":
input = feather.read_dataframe(in_file, )
else:
print("Unknown data type", in_file)
return
# Step 2: Split data into training and test data
train_df, test_df = train_test_split(input, test_size=float(test_size), random_state=int(random_state))
print("Split the data frame with test_size=", test_size, " and random_state=" , random_state, sep="")
# Step 3: Create the path if it does not exist
for out_file, df in [(out_training_file, train_df), (out_test_file, test_df)]:
dirpath = os.path.dirname(out_file)
if not os.path.exists(dirpath):
os.makedirs(dirpath)
# Step 4: Write the file locally based on the extension type
extension = out_file[out_file.rindex(".")+1:]
if extension == "csv":
df.to_csv(out_file, index = False)
elif extension == "feather":
feather.write_dataframe(df, out_file)
else:
print("Unknown output data type", output)
return
print("Successfully created file", out_file, "with number of rows:", df.shape[0], "and columns:", df.shape[1])
print("End cleanup script")
if __name__ == "__main__":
main(opt["--in_file"], opt["--out_training_file"], opt["--out_test_file"], opt["--random_state"], opt["--test_size"])
|
[
"feather.read_dataframe",
"os.makedirs",
"feather.write_dataframe",
"docopt.docopt",
"pandas.read_csv",
"os.path.dirname",
"os.path.exists"
] |
[((1248, 1263), 'docopt.docopt', 'docopt', (['__doc__'], {}), '(__doc__)\n', (1254, 1263), False, 'from docopt import docopt\n'), ((1575, 1595), 'pandas.read_csv', 'pd.read_csv', (['in_file'], {}), '(in_file)\n', (1586, 1595), True, 'import pandas as pd\n'), ((2180, 2205), 'os.path.dirname', 'os.path.dirname', (['out_file'], {}), '(out_file)\n', (2195, 2205), False, 'import os\n'), ((1648, 1679), 'feather.read_dataframe', 'feather.read_dataframe', (['in_file'], {}), '(in_file)\n', (1670, 1679), False, 'import feather\n'), ((2221, 2244), 'os.path.exists', 'os.path.exists', (['dirpath'], {}), '(dirpath)\n', (2235, 2244), False, 'import os\n'), ((2258, 2278), 'os.makedirs', 'os.makedirs', (['dirpath'], {}), '(dirpath)\n', (2269, 2278), False, 'import os\n'), ((2533, 2570), 'feather.write_dataframe', 'feather.write_dataframe', (['df', 'out_file'], {}), '(df, out_file)\n', (2556, 2570), False, 'import feather\n')]
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.template import Library
from django.utils.translation import gettext as _
from tasks.views import UserTasksView
register = Library()
# @register.inclusion_tag('dashboard/assets/top-bar/tasks.html', takes_context=True)
@register.inclusion_tag('tasks/tasks.html', takes_context=True)
def user_tasks(context):
request = context['request']
view = UserTasksView(user=request.user)
#objs = view.get_context_data()['object_list']
tasks = view.object_list
return {'request': request, 'tasks': tasks}
|
[
"django.template.Library",
"tasks.views.UserTasksView"
] |
[((212, 221), 'django.template.Library', 'Library', ([], {}), '()\n', (219, 221), False, 'from django.template import Library\n'), ((445, 477), 'tasks.views.UserTasksView', 'UserTasksView', ([], {'user': 'request.user'}), '(user=request.user)\n', (458, 477), False, 'from tasks.views import UserTasksView\n')]
|
import chainer
import chainer.functions as F
import chainer.links as L
class ResidualBlock(chainer.Chain):
def __init__(self, filter_size, dilation,
residual_channels, dilated_channels, skip_channels):
super(ResidualBlock, self).__init__()
with self.init_scope():
self.conv = L.Convolution1D(
residual_channels, dilated_channels,
ksize=filter_size,
pad=dilation * (filter_size - 1), dilate=dilation)
self.res = L.Convolution1D(
dilated_channels // 2, residual_channels, 1)
self.skip = L.Convolution1D(
dilated_channels // 2, skip_channels, 1)
self.filter_size = filter_size
self.dilation = dilation
self.residual_channels = residual_channels
def forward(self, x, condition):
length = x.shape[2]
h = self.conv(x)
h = h[:, :, :length] # crop
h += condition
tanh_z, sig_z = F.split_axis(h, 2, axis=1)
z = F.tanh(tanh_z) * F.sigmoid(sig_z)
if x.shape[2] == z.shape[2]:
residual = self.res(z) + x
else:
residual = self.res(z) + x[:, :, -1:] # crop
skip_conenection = self.skip(z)
return residual, skip_conenection
def initialize(self, n):
self.queue = chainer.Variable(self.xp.zeros((
n, self.residual_channels,
self.dilation * (self.filter_size - 1) + 1, 1),
dtype=self.conv.W.dtype))
self.conv.pad = (0, 0)
def pop(self, condition):
return self(self.queue, condition)
def push(self, x):
self.queue = F.concat((self.queue[:, :, 1:], x), axis=2)
class ResidualNet(chainer.ChainList):
def __init__(self, n_loop, n_layer, filter_size,
residual_channels, dilated_channels, skip_channels):
super(ResidualNet, self).__init__()
dilations = [2 ** i for i in range(n_layer)] * n_loop
for dilation in dilations:
self.add_link(ResidualBlock(
filter_size, dilation,
residual_channels, dilated_channels, skip_channels))
def forward(self, x, conditions):
for i, (func, cond) in enumerate(zip(self.children(), conditions)):
x, skip = func(x, cond)
if i == 0:
skip_connections = skip
else:
skip_connections += skip
return skip_connections
def initialize(self, n):
for block in self.children():
block.initialize(n)
def generate(self, x, conditions):
for i, (func, cond) in enumerate(zip(self.children(), conditions)):
func.push(x)
x, skip = func.pop(cond)
if i == 0:
skip_connections = skip
else:
skip_connections += skip
return skip_connections
|
[
"chainer.functions.split_axis",
"chainer.links.Convolution1D",
"chainer.functions.concat",
"chainer.functions.sigmoid",
"chainer.functions.tanh"
] |
[((997, 1023), 'chainer.functions.split_axis', 'F.split_axis', (['h', '(2)'], {'axis': '(1)'}), '(h, 2, axis=1)\n', (1009, 1023), True, 'import chainer.functions as F\n'), ((1671, 1714), 'chainer.functions.concat', 'F.concat', (['(self.queue[:, :, 1:], x)'], {'axis': '(2)'}), '((self.queue[:, :, 1:], x), axis=2)\n', (1679, 1714), True, 'import chainer.functions as F\n'), ((327, 454), 'chainer.links.Convolution1D', 'L.Convolution1D', (['residual_channels', 'dilated_channels'], {'ksize': 'filter_size', 'pad': '(dilation * (filter_size - 1))', 'dilate': 'dilation'}), '(residual_channels, dilated_channels, ksize=filter_size, pad\n =dilation * (filter_size - 1), dilate=dilation)\n', (342, 454), True, 'import chainer.links as L\n'), ((522, 582), 'chainer.links.Convolution1D', 'L.Convolution1D', (['(dilated_channels // 2)', 'residual_channels', '(1)'], {}), '(dilated_channels // 2, residual_channels, 1)\n', (537, 582), True, 'import chainer.links as L\n'), ((624, 680), 'chainer.links.Convolution1D', 'L.Convolution1D', (['(dilated_channels // 2)', 'skip_channels', '(1)'], {}), '(dilated_channels // 2, skip_channels, 1)\n', (639, 680), True, 'import chainer.links as L\n'), ((1036, 1050), 'chainer.functions.tanh', 'F.tanh', (['tanh_z'], {}), '(tanh_z)\n', (1042, 1050), True, 'import chainer.functions as F\n'), ((1053, 1069), 'chainer.functions.sigmoid', 'F.sigmoid', (['sig_z'], {}), '(sig_z)\n', (1062, 1069), True, 'import chainer.functions as F\n')]
|
from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
def readme():
try:
with open('README.md') as f:
return f.read()
except:
pass
setup(
name='iterapi',
version='1.2.2',
description='Python API to student portal of ITER',
long_description=readme(),
long_description_content_type='text/markdown',
url='https://github.com/SubhrajitPrusty/iterapi',
author='<NAME>',
author_email='<EMAIL>',
setup_requires=['setuptools>=38.6.0'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Topic :: Software Development :: Build Tools',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
keywords='api ITER development',
license = 'MIT',
packages=['iterapi'],
install_requires=['requests'],
)
|
[
"os.path.dirname",
"codecs.open"
] |
[((109, 131), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'from os import path\n'), ((170, 187), 'codecs.open', 'open', (['"""README.md"""'], {}), "('README.md')\n", (174, 187), False, 'from codecs import open\n')]
|
from collections import namedtuple
import raccoon as rc
def test_iterrows():
df = rc.DataFrame({'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]})
expected = [{'index': 0, 'first': 1, 'second': 'a'},
{'index': 1, 'first': 2, 'second': 2},
{'index': 2, 'first': 3, 'second': 'b'},
{'index': 3, 'first': 4, 'second': None},
{'index': 4, 'first': 5, 'second': 5}]
actual = list()
for x in df.iterrows():
actual.append(x)
assert actual == expected
# index = False
df = rc.DataFrame({'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]})
expected = [{'first': 1, 'second': 'a'},
{'first': 2, 'second': 2},
{'first': 3, 'second': 'b'},
{'first': 4, 'second': None},
{'first': 5, 'second': 5}]
actual = list()
for x in df.iterrows(index=False):
actual.append(x)
assert actual == expected
def test_itertuples():
df = rc.DataFrame({'first': [1, 2], 'second': ['a', 2]}, index=['hi', 'bye'], index_name='greet',
columns=['first', 'second'])
name_tup = namedtuple('Raccoon', ['greet', 'first', 'second'])
expected = [name_tup(greet='hi', first=1, second='a'),
name_tup(greet='bye', first=2, second=2)]
actual = list()
for x in df.itertuples():
actual.append(x)
assert actual == expected
# index == False
df = rc.DataFrame({'first': [1, 2], 'second': ['a', 2]}, index=['hi', 'bye'], index_name='greet',
columns=['first', 'second'])
name_tup = namedtuple('Raccoon', ['first', 'second'])
expected = [name_tup(first=1, second='a'),
name_tup(first=2, second=2)]
actual = list()
for x in df.itertuples(index=False):
actual.append(x)
assert actual == expected
|
[
"collections.namedtuple",
"raccoon.DataFrame"
] |
[((89, 163), 'raccoon.DataFrame', 'rc.DataFrame', (["{'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]}"], {}), "({'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]})\n", (101, 163), True, 'import raccoon as rc\n'), ((581, 655), 'raccoon.DataFrame', 'rc.DataFrame', (["{'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]}"], {}), "({'first': [1, 2, 3, 4, 5], 'second': ['a', 2, 'b', None, 5]})\n", (593, 655), True, 'import raccoon as rc\n'), ((1028, 1153), 'raccoon.DataFrame', 'rc.DataFrame', (["{'first': [1, 2], 'second': ['a', 2]}"], {'index': "['hi', 'bye']", 'index_name': '"""greet"""', 'columns': "['first', 'second']"}), "({'first': [1, 2], 'second': ['a', 2]}, index=['hi', 'bye'],\n index_name='greet', columns=['first', 'second'])\n", (1040, 1153), True, 'import raccoon as rc\n'), ((1188, 1239), 'collections.namedtuple', 'namedtuple', (['"""Raccoon"""', "['greet', 'first', 'second']"], {}), "('Raccoon', ['greet', 'first', 'second'])\n", (1198, 1239), False, 'from collections import namedtuple\n'), ((1494, 1619), 'raccoon.DataFrame', 'rc.DataFrame', (["{'first': [1, 2], 'second': ['a', 2]}"], {'index': "['hi', 'bye']", 'index_name': '"""greet"""', 'columns': "['first', 'second']"}), "({'first': [1, 2], 'second': ['a', 2]}, index=['hi', 'bye'],\n index_name='greet', columns=['first', 'second'])\n", (1506, 1619), True, 'import raccoon as rc\n'), ((1654, 1696), 'collections.namedtuple', 'namedtuple', (['"""Raccoon"""', "['first', 'second']"], {}), "('Raccoon', ['first', 'second'])\n", (1664, 1696), False, 'from collections import namedtuple\n')]
|
import os
from PIL import Image
from django.db import models
from django.utils import timezone
from django.contrib.auth.models import User
from users.models import Profile
from django.urls import reverse
from django.conf import settings
def get_image_path(instance, filename):
return os.path.join('posts', str(instance.author), filename)
class Post(models.Model):
photo = models.ImageField(upload_to=get_image_path, null=True, blank=False)
caption = models.TextField(max_length=2200, null=True, blank=True)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE, related_name='posts')
location = models.CharField(max_length=30, blank=True)
likes = models.ManyToManyField(User, blank=True, related_name='post_likes')
def __str__(self):
return self.caption
def get_absolute_url(self):
return reverse('photo_blog-detail', kwargs={'pk': self.pk})
def get_api_like_url(self):
return reverse('photo_blog-post_like_api', kwargs={"pk": self.pk})
# Save checks exif information for cellphone photos to see what orientation the
# photo was taken in, then rotates the image to be upright. images are reduced
# to a width of 450px, with proportionally reduced height to save room on the
# server.
def save(self, **kwargs):
super().save()
img = Image.open(self.photo.path)
exif = img._getexif()
orientation_key = 274
if exif and orientation_key in exif:
orientation = exif[orientation_key]
rotate_values = {
3: Image.ROTATE_180,
6: Image.ROTATE_270,
8: Image.ROTATE_90
}
if orientation in rotate_values:
img = img.transpose(rotate_values[orientation])
output_size = (450, (img.height / img.width) * 450)
img.thumbnail(output_size)
img.save(self.photo.path)
class Comment(models.Model):
post = models.ForeignKey('photo_blog.Post', on_delete=models.CASCADE, related_name='comments')
author = models.ForeignKey(User, on_delete=models.CASCADE)
text = models.TextField(max_length=2200)
date_posted = models.DateTimeField(default=timezone.now)
def __str__(self):
return self.text
def save(self, **kwargs):
super().save()
# Notification model is used for three different types of notifications: like,
# comment, and follow notifications.
class Notification(models.Model):
post = models.ForeignKey('photo_blog.Post', on_delete=models.CASCADE, null=True, blank=True)
comment = models.ForeignKey(Comment, on_delete=models.CASCADE, null=True, blank=True)
user = models.ForeignKey(User, on_delete=models.CASCADE, null=True)
profile = models.ForeignKey(Profile, on_delete=models.CASCADE, null=True)
liked = models.BooleanField(default=False, null=True)
followed = models.BooleanField(default=False, null=True)
date_posted = models.DateTimeField(null=True, blank=True)
|
[
"django.db.models.TextField",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"PIL.Image.open",
"django.db.models.ImageField",
"django.urls.reverse",
"django.db.models.DateTimeField"
] |
[((384, 451), 'django.db.models.ImageField', 'models.ImageField', ([], {'upload_to': 'get_image_path', 'null': '(True)', 'blank': '(False)'}), '(upload_to=get_image_path, null=True, blank=False)\n', (401, 451), False, 'from django.db import models\n'), ((466, 522), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2200)', 'null': '(True)', 'blank': '(True)'}), '(max_length=2200, null=True, blank=True)\n', (482, 522), False, 'from django.db import models\n'), ((541, 583), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (561, 583), False, 'from django.db import models\n'), ((597, 668), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'related_name': '"""posts"""'}), "(User, on_delete=models.CASCADE, related_name='posts')\n", (614, 668), False, 'from django.db import models\n'), ((684, 727), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(30)', 'blank': '(True)'}), '(max_length=30, blank=True)\n', (700, 727), False, 'from django.db import models\n'), ((740, 807), 'django.db.models.ManyToManyField', 'models.ManyToManyField', (['User'], {'blank': '(True)', 'related_name': '"""post_likes"""'}), "(User, blank=True, related_name='post_likes')\n", (762, 807), False, 'from django.db import models\n'), ((2003, 2095), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""photo_blog.Post"""'], {'on_delete': 'models.CASCADE', 'related_name': '"""comments"""'}), "('photo_blog.Post', on_delete=models.CASCADE, related_name\n ='comments')\n", (2020, 2095), False, 'from django.db import models\n'), ((2104, 2153), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE'}), '(User, on_delete=models.CASCADE)\n', (2121, 2153), False, 'from django.db import models\n'), ((2165, 2198), 'django.db.models.TextField', 'models.TextField', ([], {'max_length': '(2200)'}), '(max_length=2200)\n', (2181, 2198), False, 'from django.db import models\n'), ((2217, 2259), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'timezone.now'}), '(default=timezone.now)\n', (2237, 2259), False, 'from django.db import models\n'), ((2526, 2615), 'django.db.models.ForeignKey', 'models.ForeignKey', (['"""photo_blog.Post"""'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), "('photo_blog.Post', on_delete=models.CASCADE, null=True,\n blank=True)\n", (2543, 2615), False, 'from django.db import models\n'), ((2626, 2701), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Comment'], {'on_delete': 'models.CASCADE', 'null': '(True)', 'blank': '(True)'}), '(Comment, on_delete=models.CASCADE, null=True, blank=True)\n', (2643, 2701), False, 'from django.db import models\n'), ((2713, 2773), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(User, on_delete=models.CASCADE, null=True)\n', (2730, 2773), False, 'from django.db import models\n'), ((2788, 2851), 'django.db.models.ForeignKey', 'models.ForeignKey', (['Profile'], {'on_delete': 'models.CASCADE', 'null': '(True)'}), '(Profile, on_delete=models.CASCADE, null=True)\n', (2805, 2851), False, 'from django.db import models\n'), ((2864, 2909), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'null': '(True)'}), '(default=False, null=True)\n', (2883, 2909), False, 'from django.db import models\n'), ((2925, 2970), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'null': '(True)'}), '(default=False, null=True)\n', (2944, 2970), False, 'from django.db import models\n'), ((2989, 3032), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'null': '(True)', 'blank': '(True)'}), '(null=True, blank=True)\n', (3009, 3032), False, 'from django.db import models\n'), ((908, 960), 'django.urls.reverse', 'reverse', (['"""photo_blog-detail"""'], {'kwargs': "{'pk': self.pk}"}), "('photo_blog-detail', kwargs={'pk': self.pk})\n", (915, 960), False, 'from django.urls import reverse\n'), ((1009, 1068), 'django.urls.reverse', 'reverse', (['"""photo_blog-post_like_api"""'], {'kwargs': "{'pk': self.pk}"}), "('photo_blog-post_like_api', kwargs={'pk': self.pk})\n", (1016, 1068), False, 'from django.urls import reverse\n'), ((1385, 1412), 'PIL.Image.open', 'Image.open', (['self.photo.path'], {}), '(self.photo.path)\n', (1395, 1412), False, 'from PIL import Image\n')]
|
import torch
from torch import nn, Tensor
import torch.nn.functional as F
from torch.nn.modules import ModuleList
from util.misc import inverse_sigmoid
from modules.transformer import PreProccessor, TransformerEncoderLayer, TransformerDecoderLayer, TransformerEncoder
import copy
from typing import Optional, List
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
# hack implementation for iterative bounding box refinement and two-stage Deformable DETR
self.state_classifier = None
self.obj_classifier = None
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None,
reference_points: Optional[Tensor] = None):
output = tgt
intermediate = []
intermediate_attn = []
intermediate_reference_points = []
for lid, layer in enumerate(self.layers):
output, attn_maps = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.state_classifier is not None:
tmp = self.state_classifier[lid](output)
new_reference_point = tmp + inverse_sigmoid(reference_points)
new_reference_point = new_reference_point.sigmoid()
reference_points = new_reference_point.detach()
if self.return_intermediate:
intermediate.append(self.norm(output))
intermediate_reference_points.append(reference_points)
intermediate_attn.append(attn_maps)
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate), torch.stack(intermediate_attn), torch.stack(intermediate_reference_points)
return output.unsqueeze(0), torch.stack(intermediate_attn), reference_points.unsqueeze(0)
# --------------------------------------------- #
# -------------- HELPER FUNCTIONS ------------- #
def _get_clones(module, N):
return ModuleList([copy.deepcopy(module) for i in range(N)])
def _get_activation_fn(activation):
if activation == "relu":
return F.relu
elif activation == "gelu":
return F.gelu
raise RuntimeError("activation should be relu/gelu, not {}".format(activation))
# --------------------------------------------- #
|
[
"util.misc.inverse_sigmoid",
"copy.deepcopy",
"torch.stack"
] |
[((2674, 2704), 'torch.stack', 'torch.stack', (['intermediate_attn'], {}), '(intermediate_attn)\n', (2685, 2704), False, 'import torch\n'), ((2889, 2910), 'copy.deepcopy', 'copy.deepcopy', (['module'], {}), '(module)\n', (2902, 2910), False, 'import copy\n'), ((2535, 2560), 'torch.stack', 'torch.stack', (['intermediate'], {}), '(intermediate)\n', (2546, 2560), False, 'import torch\n'), ((2562, 2592), 'torch.stack', 'torch.stack', (['intermediate_attn'], {}), '(intermediate_attn)\n', (2573, 2592), False, 'import torch\n'), ((2594, 2636), 'torch.stack', 'torch.stack', (['intermediate_reference_points'], {}), '(intermediate_reference_points)\n', (2605, 2636), False, 'import torch\n'), ((1901, 1934), 'util.misc.inverse_sigmoid', 'inverse_sigmoid', (['reference_points'], {}), '(reference_points)\n', (1916, 1934), False, 'from util.misc import inverse_sigmoid\n')]
|
# Copyright 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import socket
from unittest import mock
import netifaces
from oslotest import base as test_base
import six
from oslo_utils import netutils
class NetworkUtilsTest(test_base.BaseTestCase):
def test_no_host(self):
result = netutils.urlsplit('http://')
self.assertEqual('', result.netloc)
self.assertIsNone(result.port)
self.assertIsNone(result.hostname)
self.assertEqual('http', result.scheme)
def test_parse_host_port(self):
self.assertEqual(('server01', 80),
netutils.parse_host_port('server01:80'))
self.assertEqual(('server01', None),
netutils.parse_host_port('server01'))
self.assertEqual(('server01', 1234),
netutils.parse_host_port('server01',
default_port=1234))
self.assertEqual(('::1', 80),
netutils.parse_host_port('[::1]:80'))
self.assertEqual(('::1', None),
netutils.parse_host_port('[::1]'))
self.assertEqual(('::1', 1234),
netutils.parse_host_port('[::1]',
default_port=1234))
self.assertEqual(('2001:db8:85a3::8a2e:370:7334', 1234),
netutils.parse_host_port(
'2001:db8:85a3::8a2e:370:7334',
default_port=1234))
def test_urlsplit(self):
result = netutils.urlsplit('rpc://myhost?someparam#somefragment')
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'myhost')
self.assertEqual(result.path, '')
self.assertEqual(result.query, 'someparam')
self.assertEqual(result.fragment, 'somefragment')
result = netutils.urlsplit(
'rpc://myhost/mypath?someparam#somefragment',
allow_fragments=False)
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'myhost')
self.assertEqual(result.path, '/mypath')
self.assertEqual(result.query, 'someparam#somefragment')
self.assertEqual(result.fragment, '')
result = netutils.urlsplit(
'rpc://user:pass@myhost/mypath?someparam#somefragment',
allow_fragments=False)
self.assertEqual(result.scheme, 'rpc')
self.assertEqual(result.netloc, 'user:pass@myhost')
self.assertEqual(result.path, '/mypath')
self.assertEqual(result.query, 'someparam#somefragment')
self.assertEqual(result.fragment, '')
def test_urlsplit_ipv6(self):
ipv6_url = 'http://[::1]:443/v2.0/'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'http')
self.assertEqual(result.netloc, '[::1]:443')
self.assertEqual(result.path, '/v2.0/')
self.assertEqual(result.hostname, '::1')
self.assertEqual(result.port, 443)
ipv6_url = 'http://user:pass@[::1]/v2.0/'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'http')
self.assertEqual(result.netloc, 'user:pass@[::1]')
self.assertEqual(result.path, '/v2.0/')
self.assertEqual(result.hostname, '::1')
self.assertIsNone(result.port)
ipv6_url = 'https://[2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b:370:7334]:1234/v2.0/xy?ab#12'
result = netutils.urlsplit(ipv6_url)
self.assertEqual(result.scheme, 'https')
self.assertEqual(result.netloc, '[2001:db8:85a3::8a2e:370:7334]:1234')
self.assertEqual(result.path, '/v2.0/xy')
self.assertEqual(result.hostname, '2001:db8:85a3::8a2e:370:7334')
self.assertEqual(result.port, 1234)
self.assertEqual(result.query, 'ab')
self.assertEqual(result.fragment, '12')
def test_urlsplit_params(self):
test_url = "http://localhost/?a=b&c=d"
result = netutils.urlsplit(test_url)
self.assertEqual({'a': 'b', 'c': 'd'}, result.params())
self.assertEqual({'a': 'b', 'c': 'd'}, result.params(collapse=False))
test_url = "http://localhost/?a=b&a=c&a=d"
result = netutils.urlsplit(test_url)
self.assertEqual({'a': 'd'}, result.params())
self.assertEqual({'a': ['b', 'c', 'd']}, result.params(collapse=False))
test_url = "http://localhost"
result = netutils.urlsplit(test_url)
self.assertEqual({}, result.params())
test_url = "http://localhost?"
result = netutils.urlsplit(test_url)
self.assertEqual({}, result.params())
def test_set_tcp_keepalive(self):
mock_sock = mock.Mock()
netutils.set_tcp_keepalive(mock_sock, True, 100, 10, 5)
calls = [
mock.call.setsockopt(socket.SOL_SOCKET,
socket.SO_KEEPALIVE, True),
]
if hasattr(socket, 'TCP_KEEPIDLE'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPIDLE, 100)
]
if hasattr(socket, 'TCP_KEEPINTVL'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPINTVL, 10),
]
if hasattr(socket, 'TCP_KEEPCNT'):
calls += [
mock.call.setsockopt(socket.IPPROTO_TCP,
socket.TCP_KEEPCNT, 5)
]
mock_sock.assert_has_calls(calls)
mock_sock.reset_mock()
netutils.set_tcp_keepalive(mock_sock, False)
self.assertEqual(1, len(mock_sock.mock_calls))
def test_is_valid_ipv4(self):
self.assertTrue(netutils.is_valid_ipv4('172.16.58.3'))
self.assertFalse(netutils.is_valid_ipv4('-1.11.11.11'))
self.assertFalse(netutils.is_valid_ipv4(''))
def test_is_valid_ipv6(self):
self.assertTrue(netutils.is_valid_ipv6('::1'))
self.assertTrue(netutils.is_valid_ipv6('fe80::1%eth0'))
self.assertFalse(netutils.is_valid_ip('fe%80::1%eth0'))
self.assertFalse(netutils.is_valid_ipv6(
'fc00:e968:6179::de52:7100:85a3::172.31.128.1'))
self.assertFalse(netutils.is_valid_ipv6(''))
def test_escape_ipv6(self):
self.assertEqual('[1234::1234]', netutils.escape_ipv6('1fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b'))
self.assertEqual('127.0.0.1', netutils.escape_ipv6('127.0.0.1'))
def test_is_valid_ip(self):
self.assertTrue(netutils.is_valid_ip('127.0.0.1'))
self.assertTrue(netutils.is_valid_ip('2001:db8::ff00:42:8329'))
self.assertTrue(netutils.is_valid_ip('fe80::1%eth0'))
self.assertFalse(netutils.is_valid_ip('256.0.0.0'))
self.assertFalse(netutils.is_valid_ip('::1.2.3.'))
self.assertFalse(netutils.is_valid_ip(''))
self.assertFalse(netutils.is_valid_ip(None))
def test_is_valid_mac(self):
self.assertTrue(netutils.is_valid_mac("52:54:00:cf:2d:31"))
self.assertTrue(netutils.is_valid_mac(u"52:54:00:cf:2d:31"))
self.assertFalse(netutils.is_valid_mac("127.0.0.1"))
self.assertFalse(netutils.is_valid_mac("not:a:mac:address"))
self.assertFalse(netutils.is_valid_mac("52-54-00-cf-2d-31"))
self.assertFalse(netutils.is_valid_mac("aa bb cc dd ee ff"))
self.assertTrue(netutils.is_valid_mac("AA:BB:CC:DD:EE:FF"))
self.assertFalse(netutils.is_valid_mac("AA BB CC DD EE FF"))
self.assertFalse(netutils.is_valid_mac("AA-BB-CC-DD-EE-FF"))
def test_is_valid_cidr(self):
self.assertTrue(netutils.is_valid_cidr('10.0.0.0/24'))
self.assertTrue(netutils.is_valid_cidr('10.0.0.1/32'))
self.assertTrue(netutils.is_valid_cidr('0.0.0.0/0'))
self.assertTrue(netutils.is_valid_cidr('2600::/64'))
self.assertTrue(netutils.is_valid_cidr(
'0000:0000:0000:0000:0000:0000:0000:0001/32'))
self.assertFalse(netutils.is_valid_cidr('10.0.0.1'))
self.assertFalse(netutils.is_valid_cidr('10.0.0.1/33'))
self.assertFalse(netutils.is_valid_cidr(10))
def test_is_valid_ipv6_cidr(self):
self.assertTrue(netutils.is_valid_ipv6_cidr("2600::/64"))
self.assertTrue(netutils.is_valid_ipv6_cidr(
"fc00:db20:35b:7399::5/48"))
self.assertTrue(netutils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001/32"))
self.assertTrue(netutils.is_valid_ipv6_cidr(
"0000:0000:0000:0000:0000:0000:0000:0001"))
self.assertFalse(netutils.is_valid_ipv6_cidr("foo"))
self.assertFalse(netutils.is_valid_ipv6_cidr("127.0.0.1"))
def test_valid_port(self):
valid_inputs = [0, '0', 1, '1', 2, '3', '5', 8, 13, 21,
'80', '3246', '65535']
for input_str in valid_inputs:
self.assertTrue(netutils.is_valid_port(input_str))
def test_valid_port_fail(self):
invalid_inputs = ['-32768', '65536', 528491, '528491',
'528.491', 'thirty-seven', None]
for input_str in invalid_inputs:
self.assertFalse(netutils.is_valid_port(input_str))
def test_get_my_ip(self):
sock_attrs = {
'return_value.getsockname.return_value': ['1.2.3.4', '']}
with mock.patch('socket.socket', **sock_attrs):
addr = netutils.get_my_ipv4()
self.assertEqual(addr, '1.2.3.4')
def test_is_int_in_range(self):
valid_inputs = [(1, -100, 100),
('1', -100, 100),
(100, -100, 100),
('100', -100, 100),
(-100, -100, 100),
('-100', -100, 100)]
for input_value in valid_inputs:
self.assertTrue(netutils._is_int_in_range(*input_value))
def test_is_int_not_in_range(self):
invalid_inputs = [(None, 1, 100),
('ten', 1, 100),
(-1, 0, 255),
('None', 1, 100)]
for input_value in invalid_inputs:
self.assertFalse(netutils._is_int_in_range(*input_value))
def test_valid_icmp_type(self):
valid_inputs = [1, '1', 0, '0', 255, '255']
for input_value in valid_inputs:
self.assertTrue(netutils.is_valid_icmp_type(input_value))
def test_invalid_icmp_type(self):
invalid_inputs = [-1, '-1', 256, '256', None, 'None', 'five']
for input_value in invalid_inputs:
self.assertFalse(netutils.is_valid_icmp_type(input_value))
def test_valid_icmp_code(self):
valid_inputs = [1, '1', 0, '0', 255, '255', None]
for input_value in valid_inputs:
self.assertTrue(netutils.is_valid_icmp_code(input_value))
def test_invalid_icmp_code(self):
invalid_inputs = [-1, '-1', 256, '256', 'None', 'zero']
for input_value in invalid_inputs:
self.assertFalse(netutils.is_valid_icmp_code(input_value))
@mock.patch('socket.socket')
@mock.patch('oslo_utils.netutils._get_my_ipv4_address')
def test_get_my_ip_socket_error(self, ip, mock_socket):
mock_socket.side_effect = socket.error
ip.return_value = '1.2.3.4'
addr = netutils.get_my_ipv4()
self.assertEqual(addr, '1.2.3.4')
@mock.patch('netifaces.gateways')
@mock.patch('netifaces.ifaddresses')
def test_get_my_ipv4_address_with_default_route(
self, ifaddr, gateways):
with mock.patch.dict(netifaces.__dict__, {'AF_INET': '0'}):
ifaddr.return_value = {'0': [{'addr': '172.18.204.1'}]}
addr = netutils._get_my_ipv4_address()
self.assertEqual('172.18.204.1', addr)
@mock.patch('netifaces.gateways')
@mock.patch('netifaces.ifaddresses')
def test_get_my_ipv4_address_without_default_route(
self, ifaddr, gateways):
with mock.patch.dict(netifaces.__dict__, {'AF_INET': '0'}):
ifaddr.return_value = {}
addr = netutils._get_my_ipv4_address()
self.assertEqual('127.0.0.1', addr)
@mock.patch('netifaces.gateways')
@mock.patch('netifaces.ifaddresses')
def test_get_my_ipv4_address_without_default_interface(
self, ifaddr, gateways):
gateways.return_value = {}
addr = netutils._get_my_ipv4_address()
self.assertEqual('127.0.0.1', addr)
self.assertFalse(ifaddr.called)
class IPv6byEUI64TestCase(test_base.BaseTestCase):
"""Unit tests to generate IPv6 by EUI-64 operations."""
def test_generate_IPv6_by_EUI64(self):
addr = netutils.get_ipv6_addr_by_EUI64('2001:db8::',
'00:16:3e:33:44:55')
self.assertEqual('2001:db8::216:3eff:fe33:4455', addr.format())
def test_generate_IPv6_with_IPv4_prefix(self):
ipv4_prefix = '10.0.8'
mac = '00:16:3e:33:44:55'
self.assertRaises(ValueError, lambda:
netutils.get_ipv6_addr_by_EUI64(ipv4_prefix, mac))
def test_generate_IPv6_with_bad_mac(self):
bad_mac = '00:16:3e:33:44:5Z'
prefix = '2001:db8::'
self.assertRaises(ValueError, lambda:
netutils.get_ipv6_addr_by_EUI64(prefix, bad_mac))
def test_generate_IPv6_with_bad_prefix(self):
mac = '00:16:3e:33:44:55'
bad_prefix = 'bb'
self.assertRaises(ValueError, lambda:
netutils.get_ipv6_addr_by_EUI64(bad_prefix, mac))
def test_generate_IPv6_with_error_prefix_type(self):
mac = '00:16:3e:33:44:55'
prefix = 123
self.assertRaises(TypeError, lambda:
netutils.get_ipv6_addr_by_EUI64(prefix, mac))
def test_generate_IPv6_with_empty_prefix(self):
mac = '00:16:3e:33:44:55'
prefix = ''
self.assertRaises(ValueError, lambda:
netutils.get_ipv6_addr_by_EUI64(prefix, mac))
@contextlib.contextmanager
def mock_file_content(content):
# Allows StringIO to act like a context manager-enabled file.
yield six.StringIO(content)
class TestIsIPv6Enabled(test_base.BaseTestCase):
def setUp(self):
super(TestIsIPv6Enabled, self).setUp()
def reset_detection_flag():
netutils._IS_IPV6_ENABLED = None
reset_detection_flag()
self.addCleanup(reset_detection_flag)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('six.moves.builtins.open', return_value=mock_file_content('0'))
def test_enabled(self, mock_open, exists):
enabled = netutils.is_ipv6_enabled()
self.assertTrue(enabled)
@mock.patch('os.path.exists', return_value=True)
@mock.patch('six.moves.builtins.open', return_value=mock_file_content('1'))
def test_disabled(self, mock_open, exists):
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('six.moves.builtins.open',
side_effect=AssertionError('should not read'))
def test_disabled_non_exists(self, mock_open, exists):
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
@mock.patch('os.path.exists', return_value=True)
def test_memoize_enabled(self, exists):
# Reset the flag to appear that we haven't looked for it yet.
netutils._IS_IPV6_ENABLED = None
with mock.patch('six.moves.builtins.open',
return_value=mock_file_content('0')) as mock_open:
enabled = netutils.is_ipv6_enabled()
self.assertTrue(mock_open.called)
self.assertTrue(netutils._IS_IPV6_ENABLED)
self.assertTrue(enabled)
# The second call should not use open again
with mock.patch('six.moves.builtins.open',
side_effect=AssertionError('should not be called')):
enabled = netutils.is_ipv6_enabled()
self.assertTrue(enabled)
@mock.patch('os.path.exists', return_value=True)
def test_memoize_disabled(self, exists):
# Reset the flag to appear that we haven't looked for it yet.
netutils._IS_IPV6_ENABLED = None
with mock.patch('six.moves.builtins.open',
return_value=mock_file_content('1')):
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
# The second call should not use open again
with mock.patch('six.moves.builtins.open',
side_effect=AssertionError('should not be called')):
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
@mock.patch('os.path.exists', return_value=False)
@mock.patch('six.moves.builtins.open',
side_effect=AssertionError('should not read'))
def test_memoize_not_exists(self, mock_open, exists):
# Reset the flag to appear that we haven't looked for it yet.
netutils._IS_IPV6_ENABLED = None
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
enabled = netutils.is_ipv6_enabled()
self.assertFalse(enabled)
|
[
"oslo_utils.netutils.is_valid_mac",
"oslo_utils.netutils._get_my_ipv4_address",
"oslo_utils.netutils.is_valid_port",
"oslo_utils.netutils.set_tcp_keepalive",
"oslo_utils.netutils.urlsplit",
"oslo_utils.netutils.is_valid_icmp_code",
"oslo_utils.netutils.escape_ipv6",
"oslo_utils.netutils.is_valid_cidr",
"oslo_utils.netutils.is_valid_ip",
"oslo_utils.netutils.is_valid_ipv6_cidr",
"oslo_utils.netutils._is_int_in_range",
"oslo_utils.netutils.get_my_ipv4",
"oslo_utils.netutils.get_ipv6_addr_by_EUI64",
"oslo_utils.netutils.is_valid_ipv6",
"oslo_utils.netutils.parse_host_port",
"unittest.mock.call.setsockopt",
"oslo_utils.netutils.is_ipv6_enabled",
"unittest.mock.patch.dict",
"unittest.mock.patch",
"oslo_utils.netutils.is_valid_ipv4",
"oslo_utils.netutils.is_valid_icmp_type",
"unittest.mock.Mock",
"six.StringIO"
] |
[((11680, 11707), 'unittest.mock.patch', 'mock.patch', (['"""socket.socket"""'], {}), "('socket.socket')\n", (11690, 11707), False, 'from unittest import mock\n'), ((11713, 11767), 'unittest.mock.patch', 'mock.patch', (['"""oslo_utils.netutils._get_my_ipv4_address"""'], {}), "('oslo_utils.netutils._get_my_ipv4_address')\n", (11723, 11767), False, 'from unittest import mock\n'), ((11997, 12029), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.gateways"""'], {}), "('netifaces.gateways')\n", (12007, 12029), False, 'from unittest import mock\n'), ((12035, 12070), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.ifaddresses"""'], {}), "('netifaces.ifaddresses')\n", (12045, 12070), False, 'from unittest import mock\n'), ((12401, 12433), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.gateways"""'], {}), "('netifaces.gateways')\n", (12411, 12433), False, 'from unittest import mock\n'), ((12439, 12474), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.ifaddresses"""'], {}), "('netifaces.ifaddresses')\n", (12449, 12474), False, 'from unittest import mock\n'), ((12774, 12806), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.gateways"""'], {}), "('netifaces.gateways')\n", (12784, 12806), False, 'from unittest import mock\n'), ((12812, 12847), 'unittest.mock.patch', 'mock.patch', (['"""netifaces.ifaddresses"""'], {}), "('netifaces.ifaddresses')\n", (12822, 12847), False, 'from unittest import mock\n'), ((15079, 15126), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(True)'}), "('os.path.exists', return_value=True)\n", (15089, 15126), False, 'from unittest import mock\n'), ((15338, 15385), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(True)'}), "('os.path.exists', return_value=True)\n", (15348, 15385), False, 'from unittest import mock\n'), ((15599, 15647), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(False)'}), "('os.path.exists', return_value=False)\n", (15609, 15647), False, 'from unittest import mock\n'), ((15898, 15945), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(True)'}), "('os.path.exists', return_value=True)\n", (15908, 15945), False, 'from unittest import mock\n'), ((16686, 16733), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(True)'}), "('os.path.exists', return_value=True)\n", (16696, 16733), False, 'from unittest import mock\n'), ((17363, 17411), 'unittest.mock.patch', 'mock.patch', (['"""os.path.exists"""'], {'return_value': '(False)'}), "('os.path.exists', return_value=False)\n", (17373, 17411), False, 'from unittest import mock\n'), ((892, 920), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['"""http://"""'], {}), "('http://')\n", (909, 920), False, 'from oslo_utils import netutils\n'), ((2119, 2175), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['"""rpc://myhost?someparam#somefragment"""'], {}), "('rpc://myhost?someparam#somefragment')\n", (2136, 2175), False, 'from oslo_utils import netutils\n'), ((2443, 2533), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['"""rpc://myhost/mypath?someparam#somefragment"""'], {'allow_fragments': '(False)'}), "('rpc://myhost/mypath?someparam#somefragment',\n allow_fragments=False)\n", (2460, 2533), False, 'from oslo_utils import netutils\n'), ((2830, 2930), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['"""rpc://user:pass@myhost/mypath?someparam#somefragment"""'], {'allow_fragments': '(False)'}), "('rpc://user:pass@myhost/mypath?someparam#somefragment',\n allow_fragments=False)\n", (2847, 2930), False, 'from oslo_utils import netutils\n'), ((3315, 3342), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['ipv6_url'], {}), '(ipv6_url)\n', (3332, 3342), False, 'from oslo_utils import netutils\n'), ((3652, 3679), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['ipv6_url'], {}), '(ipv6_url)\n', (3669, 3679), False, 'from oslo_utils import netutils\n'), ((4045, 4072), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['ipv6_url'], {}), '(ipv6_url)\n', (4062, 4072), False, 'from oslo_utils import netutils\n'), ((4563, 4590), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['test_url'], {}), '(test_url)\n', (4580, 4590), False, 'from oslo_utils import netutils\n'), ((4802, 4829), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['test_url'], {}), '(test_url)\n', (4819, 4829), False, 'from oslo_utils import netutils\n'), ((5020, 5047), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['test_url'], {}), '(test_url)\n', (5037, 5047), False, 'from oslo_utils import netutils\n'), ((5151, 5178), 'oslo_utils.netutils.urlsplit', 'netutils.urlsplit', (['test_url'], {}), '(test_url)\n', (5168, 5178), False, 'from oslo_utils import netutils\n'), ((5284, 5295), 'unittest.mock.Mock', 'mock.Mock', ([], {}), '()\n', (5293, 5295), False, 'from unittest import mock\n'), ((5304, 5359), 'oslo_utils.netutils.set_tcp_keepalive', 'netutils.set_tcp_keepalive', (['mock_sock', '(True)', '(100)', '(10)', '(5)'], {}), '(mock_sock, True, 100, 10, 5)\n', (5330, 5359), False, 'from oslo_utils import netutils\n'), ((6184, 6228), 'oslo_utils.netutils.set_tcp_keepalive', 'netutils.set_tcp_keepalive', (['mock_sock', '(False)'], {}), '(mock_sock, False)\n', (6210, 6228), False, 'from oslo_utils import netutils\n'), ((11926, 11948), 'oslo_utils.netutils.get_my_ipv4', 'netutils.get_my_ipv4', ([], {}), '()\n', (11946, 11948), False, 'from oslo_utils import netutils\n'), ((12995, 13026), 'oslo_utils.netutils._get_my_ipv4_address', 'netutils._get_my_ipv4_address', ([], {}), '()\n', (13024, 13026), False, 'from oslo_utils import netutils\n'), ((13283, 13349), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['"""2001:db8::"""', '"""00:16:3e:33:44:55"""'], {}), "('2001:db8::', '00:16:3e:33:44:55')\n", (13314, 13349), False, 'from oslo_utils import netutils\n'), ((14772, 14793), 'six.StringIO', 'six.StringIO', (['content'], {}), '(content)\n', (14784, 14793), False, 'import six\n'), ((15272, 15298), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (15296, 15298), False, 'from oslo_utils import netutils\n'), ((15532, 15558), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (15556, 15558), False, 'from oslo_utils import netutils\n'), ((15831, 15857), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (15855, 15857), False, 'from oslo_utils import netutils\n'), ((17705, 17731), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (17729, 17731), False, 'from oslo_utils import netutils\n'), ((17784, 17810), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (17808, 17810), False, 'from oslo_utils import netutils\n'), ((1200, 1239), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""server01:80"""'], {}), "('server01:80')\n", (1224, 1239), False, 'from oslo_utils import netutils\n'), ((1311, 1347), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""server01"""'], {}), "('server01')\n", (1335, 1347), False, 'from oslo_utils import netutils\n'), ((1419, 1474), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""server01"""'], {'default_port': '(1234)'}), "('server01', default_port=1234)\n", (1443, 1474), False, 'from oslo_utils import netutils\n'), ((1564, 1600), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""[::1]:80"""'], {}), "('[::1]:80')\n", (1588, 1600), False, 'from oslo_utils import netutils\n'), ((1667, 1700), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""[::1]"""'], {}), "('[::1]')\n", (1691, 1700), False, 'from oslo_utils import netutils\n'), ((1767, 1819), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""[::1]"""'], {'default_port': '(1234)'}), "('[::1]', default_port=1234)\n", (1791, 1819), False, 'from oslo_utils import netutils\n'), ((1936, 2011), 'oslo_utils.netutils.parse_host_port', 'netutils.parse_host_port', (['"""2001:db8:85a3::8a2e:370:7334"""'], {'default_port': '(1234)'}), "('2001:db8:85a3::8a2e:370:7334', default_port=1234)\n", (1960, 2011), False, 'from oslo_utils import netutils\n'), ((5390, 5456), 'unittest.mock.call.setsockopt', 'mock.call.setsockopt', (['socket.SOL_SOCKET', 'socket.SO_KEEPALIVE', '(True)'], {}), '(socket.SOL_SOCKET, socket.SO_KEEPALIVE, True)\n', (5410, 5456), False, 'from unittest import mock\n'), ((6343, 6380), 'oslo_utils.netutils.is_valid_ipv4', 'netutils.is_valid_ipv4', (['"""172.16.58.3"""'], {}), "('172.16.58.3')\n", (6365, 6380), False, 'from oslo_utils import netutils\n'), ((6408, 6445), 'oslo_utils.netutils.is_valid_ipv4', 'netutils.is_valid_ipv4', (['"""-1.11.11.11"""'], {}), "('-1.11.11.11')\n", (6430, 6445), False, 'from oslo_utils import netutils\n'), ((6473, 6499), 'oslo_utils.netutils.is_valid_ipv4', 'netutils.is_valid_ipv4', (['""""""'], {}), "('')\n", (6495, 6499), False, 'from oslo_utils import netutils\n'), ((6560, 6589), 'oslo_utils.netutils.is_valid_ipv6', 'netutils.is_valid_ipv6', (['"""::1"""'], {}), "('::1')\n", (6582, 6589), False, 'from oslo_utils import netutils\n'), ((6616, 6654), 'oslo_utils.netutils.is_valid_ipv6', 'netutils.is_valid_ipv6', (['"""fe80::1%eth0"""'], {}), "('fe80::1%eth0')\n", (6638, 6654), False, 'from oslo_utils import netutils\n'), ((6682, 6719), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""fe%80::1%eth0"""'], {}), "('fe%80::1%eth0')\n", (6702, 6719), False, 'from oslo_utils import netutils\n'), ((6747, 6817), 'oslo_utils.netutils.is_valid_ipv6', 'netutils.is_valid_ipv6', (['"""fc00:e968:6179::de52:7100:85a3::172.31.128.1"""'], {}), "('fc00:e968:6179::de52:7100:85a3::172.31.128.1')\n", (6769, 6817), False, 'from oslo_utils import netutils\n'), ((6858, 6884), 'oslo_utils.netutils.is_valid_ipv6', 'netutils.is_valid_ipv6', (['""""""'], {}), "('')\n", (6880, 6884), False, 'from oslo_utils import netutils\n'), ((6960, 7024), 'oslo_utils.netutils.escape_ipv6', 'netutils.escape_ipv6', (['"""1fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b"""'], {}), "('1fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b')\n", (6980, 7024), False, 'from oslo_utils import netutils\n'), ((7064, 7097), 'oslo_utils.netutils.escape_ipv6', 'netutils.escape_ipv6', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (7084, 7097), False, 'from oslo_utils import netutils\n'), ((7156, 7189), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (7176, 7189), False, 'from oslo_utils import netutils\n'), ((7216, 7262), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""2001:db8::ff00:42:8329"""'], {}), "('2001:db8::ff00:42:8329')\n", (7236, 7262), False, 'from oslo_utils import netutils\n'), ((7289, 7325), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""fe80::1%eth0"""'], {}), "('fe80::1%eth0')\n", (7309, 7325), False, 'from oslo_utils import netutils\n'), ((7353, 7386), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""256.0.0.0"""'], {}), "('256.0.0.0')\n", (7373, 7386), False, 'from oslo_utils import netutils\n'), ((7414, 7446), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['"""::1.2.3."""'], {}), "('::1.2.3.')\n", (7434, 7446), False, 'from oslo_utils import netutils\n'), ((7474, 7498), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['""""""'], {}), "('')\n", (7494, 7498), False, 'from oslo_utils import netutils\n'), ((7526, 7552), 'oslo_utils.netutils.is_valid_ip', 'netutils.is_valid_ip', (['None'], {}), '(None)\n', (7546, 7552), False, 'from oslo_utils import netutils\n'), ((7612, 7654), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""52:54:00:cf:2d:31"""'], {}), "('52:54:00:cf:2d:31')\n", (7633, 7654), False, 'from oslo_utils import netutils\n'), ((7680, 7723), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['u"""52:54:00:cf:2d:31"""'], {}), "(u'52:54:00:cf:2d:31')\n", (7701, 7723), False, 'from oslo_utils import netutils\n'), ((7750, 7784), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (7771, 7784), False, 'from oslo_utils import netutils\n'), ((7811, 7853), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""not:a:mac:address"""'], {}), "('not:a:mac:address')\n", (7832, 7853), False, 'from oslo_utils import netutils\n'), ((7880, 7922), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""52-54-00-cf-2d-31"""'], {}), "('52-54-00-cf-2d-31')\n", (7901, 7922), False, 'from oslo_utils import netutils\n'), ((7949, 7991), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""aa bb cc dd ee ff"""'], {}), "('aa bb cc dd ee ff')\n", (7970, 7991), False, 'from oslo_utils import netutils\n'), ((8017, 8059), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""AA:BB:CC:DD:EE:FF"""'], {}), "('AA:BB:CC:DD:EE:FF')\n", (8038, 8059), False, 'from oslo_utils import netutils\n'), ((8086, 8128), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""AA BB CC DD EE FF"""'], {}), "('AA BB CC DD EE FF')\n", (8107, 8128), False, 'from oslo_utils import netutils\n'), ((8155, 8197), 'oslo_utils.netutils.is_valid_mac', 'netutils.is_valid_mac', (['"""AA-BB-CC-DD-EE-FF"""'], {}), "('AA-BB-CC-DD-EE-FF')\n", (8176, 8197), False, 'from oslo_utils import netutils\n'), ((8258, 8295), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""10.0.0.0/24"""'], {}), "('10.0.0.0/24')\n", (8280, 8295), False, 'from oslo_utils import netutils\n'), ((8321, 8358), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""10.0.0.1/32"""'], {}), "('10.0.0.1/32')\n", (8343, 8358), False, 'from oslo_utils import netutils\n'), ((8384, 8419), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""0.0.0.0/0"""'], {}), "('0.0.0.0/0')\n", (8406, 8419), False, 'from oslo_utils import netutils\n'), ((8445, 8480), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""2600::/64"""'], {}), "('2600::/64')\n", (8467, 8480), False, 'from oslo_utils import netutils\n'), ((8506, 8574), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""0000:0000:0000:0000:0000:0000:0000:0001/32"""'], {}), "('0000:0000:0000:0000:0000:0000:0000:0001/32')\n", (8528, 8574), False, 'from oslo_utils import netutils\n'), ((8627, 8661), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""10.0.0.1"""'], {}), "('10.0.0.1')\n", (8649, 8661), False, 'from oslo_utils import netutils\n'), ((8688, 8725), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['"""10.0.0.1/33"""'], {}), "('10.0.0.1/33')\n", (8710, 8725), False, 'from oslo_utils import netutils\n'), ((8752, 8778), 'oslo_utils.netutils.is_valid_cidr', 'netutils.is_valid_cidr', (['(10)'], {}), '(10)\n', (8774, 8778), False, 'from oslo_utils import netutils\n'), ((8844, 8884), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""2600::/64"""'], {}), "('2600::/64')\n", (8871, 8884), False, 'from oslo_utils import netutils\n'), ((8910, 8965), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""fc00:db20:35b:7399::5/48"""'], {}), "('fc00:db20:35b:7399::5/48')\n", (8937, 8965), False, 'from oslo_utils import netutils\n'), ((9004, 9077), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""0000:0000:0000:0000:0000:0000:0000:0001/32"""'], {}), "('0000:0000:0000:0000:0000:0000:0000:0001/32')\n", (9031, 9077), False, 'from oslo_utils import netutils\n'), ((9116, 9186), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""0000:0000:0000:0000:0000:0000:0000:0001"""'], {}), "('0000:0000:0000:0000:0000:0000:0000:0001')\n", (9143, 9186), False, 'from oslo_utils import netutils\n'), ((9226, 9260), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""foo"""'], {}), "('foo')\n", (9253, 9260), False, 'from oslo_utils import netutils\n'), ((9287, 9327), 'oslo_utils.netutils.is_valid_ipv6_cidr', 'netutils.is_valid_ipv6_cidr', (['"""127.0.0.1"""'], {}), "('127.0.0.1')\n", (9314, 9327), False, 'from oslo_utils import netutils\n'), ((9975, 10016), 'unittest.mock.patch', 'mock.patch', (['"""socket.socket"""'], {}), "('socket.socket', **sock_attrs)\n", (9985, 10016), False, 'from unittest import mock\n'), ((10037, 10059), 'oslo_utils.netutils.get_my_ipv4', 'netutils.get_my_ipv4', ([], {}), '()\n', (10057, 10059), False, 'from oslo_utils import netutils\n'), ((12174, 12227), 'unittest.mock.patch.dict', 'mock.patch.dict', (['netifaces.__dict__', "{'AF_INET': '0'}"], {}), "(netifaces.__dict__, {'AF_INET': '0'})\n", (12189, 12227), False, 'from unittest import mock\n'), ((12316, 12347), 'oslo_utils.netutils._get_my_ipv4_address', 'netutils._get_my_ipv4_address', ([], {}), '()\n', (12345, 12347), False, 'from oslo_utils import netutils\n'), ((12581, 12634), 'unittest.mock.patch.dict', 'mock.patch.dict', (['netifaces.__dict__', "{'AF_INET': '0'}"], {}), "(netifaces.__dict__, {'AF_INET': '0'})\n", (12596, 12634), False, 'from unittest import mock\n'), ((12692, 12723), 'oslo_utils.netutils._get_my_ipv4_address', 'netutils._get_my_ipv4_address', ([], {}), '()\n', (12721, 12723), False, 'from oslo_utils import netutils\n'), ((16249, 16275), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (16273, 16275), False, 'from oslo_utils import netutils\n'), ((16616, 16642), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (16640, 16642), False, 'from oslo_utils import netutils\n'), ((17025, 17051), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (17049, 17051), False, 'from oslo_utils import netutils\n'), ((17292, 17318), 'oslo_utils.netutils.is_ipv6_enabled', 'netutils.is_ipv6_enabled', ([], {}), '()\n', (17316, 17318), False, 'from oslo_utils import netutils\n'), ((5584, 5650), 'unittest.mock.call.setsockopt', 'mock.call.setsockopt', (['socket.IPPROTO_TCP', 'socket.TCP_KEEPIDLE', '(100)'], {}), '(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 100)\n', (5604, 5650), False, 'from unittest import mock\n'), ((5786, 5852), 'unittest.mock.call.setsockopt', 'mock.call.setsockopt', (['socket.IPPROTO_TCP', 'socket.TCP_KEEPINTVL', '(10)'], {}), '(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 10)\n', (5806, 5852), False, 'from unittest import mock\n'), ((5987, 6050), 'unittest.mock.call.setsockopt', 'mock.call.setsockopt', (['socket.IPPROTO_TCP', 'socket.TCP_KEEPCNT', '(5)'], {}), '(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 5)\n', (6007, 6050), False, 'from unittest import mock\n'), ((9539, 9572), 'oslo_utils.netutils.is_valid_port', 'netutils.is_valid_port', (['input_str'], {}), '(input_str)\n', (9561, 9572), False, 'from oslo_utils import netutils\n'), ((9803, 9836), 'oslo_utils.netutils.is_valid_port', 'netutils.is_valid_port', (['input_str'], {}), '(input_str)\n', (9825, 9836), False, 'from oslo_utils import netutils\n'), ((10464, 10503), 'oslo_utils.netutils._is_int_in_range', 'netutils._is_int_in_range', (['*input_value'], {}), '(*input_value)\n', (10489, 10503), False, 'from oslo_utils import netutils\n'), ((10787, 10826), 'oslo_utils.netutils._is_int_in_range', 'netutils._is_int_in_range', (['*input_value'], {}), '(*input_value)\n', (10812, 10826), False, 'from oslo_utils import netutils\n'), ((10986, 11026), 'oslo_utils.netutils.is_valid_icmp_type', 'netutils.is_valid_icmp_type', (['input_value'], {}), '(input_value)\n', (11013, 11026), False, 'from oslo_utils import netutils\n'), ((11209, 11249), 'oslo_utils.netutils.is_valid_icmp_type', 'netutils.is_valid_icmp_type', (['input_value'], {}), '(input_value)\n', (11236, 11249), False, 'from oslo_utils import netutils\n'), ((11415, 11455), 'oslo_utils.netutils.is_valid_icmp_code', 'netutils.is_valid_icmp_code', (['input_value'], {}), '(input_value)\n', (11442, 11455), False, 'from oslo_utils import netutils\n'), ((11632, 11672), 'oslo_utils.netutils.is_valid_icmp_code', 'netutils.is_valid_icmp_code', (['input_value'], {}), '(input_value)\n', (11659, 11672), False, 'from oslo_utils import netutils\n'), ((13658, 13707), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['ipv4_prefix', 'mac'], {}), '(ipv4_prefix, mac)\n', (13689, 13707), False, 'from oslo_utils import netutils\n'), ((13897, 13945), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['prefix', 'bad_mac'], {}), '(prefix, bad_mac)\n', (13928, 13945), False, 'from oslo_utils import netutils\n'), ((14130, 14178), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['bad_prefix', 'mac'], {}), '(bad_prefix, mac)\n', (14161, 14178), False, 'from oslo_utils import netutils\n'), ((14364, 14408), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['prefix', 'mac'], {}), '(prefix, mac)\n', (14395, 14408), False, 'from oslo_utils import netutils\n'), ((14589, 14633), 'oslo_utils.netutils.get_ipv6_addr_by_EUI64', 'netutils.get_ipv6_addr_by_EUI64', (['prefix', 'mac'], {}), '(prefix, mac)\n', (14620, 14633), False, 'from oslo_utils import netutils\n')]
|
""" Extracts data from GPX file and writes data to CSV """
# Imports
import os
import pandas as pd
import mansfield_gpx as mfx
# Define relative path to GPX file
double_up_gpx_path = os.path.join(
"02-raw-data", "mansfield-double-up-course.gpx")
# Define list of GPX attributes
attribute_list = [
"latitude", "longitude", "elevation", "time",
"cadence", "distance", "altitude", "energy",
"speed", "verticalSpeed"
]
# Extract gpx data to dataframe
double_up_gpx_df = pd.DataFrame({
attribute: mfx.extract_gpx_data(double_up_gpx_path, attribute)
for attribute in attribute_list
})
# Write extracted GPX data to CSV
df_out_path = os.path.join(
"03-processed-data", "mansfield-double-up-course-data.csv")
try:
double_up_gpx_df.to_csv(
path_or_buf=df_out_path, sep=',', header=True, index=False)
except Exception as error:
print(f"Could not write to CSV. ERROR: {error}")
else:
print(f"Wrote GPX attributes to CSV: {df_out_path}")
|
[
"mansfield_gpx.extract_gpx_data",
"os.path.join"
] |
[((185, 246), 'os.path.join', 'os.path.join', (['"""02-raw-data"""', '"""mansfield-double-up-course.gpx"""'], {}), "('02-raw-data', 'mansfield-double-up-course.gpx')\n", (197, 246), False, 'import os\n'), ((656, 728), 'os.path.join', 'os.path.join', (['"""03-processed-data"""', '"""mansfield-double-up-course-data.csv"""'], {}), "('03-processed-data', 'mansfield-double-up-course-data.csv')\n", (668, 728), False, 'import os\n'), ((516, 567), 'mansfield_gpx.extract_gpx_data', 'mfx.extract_gpx_data', (['double_up_gpx_path', 'attribute'], {}), '(double_up_gpx_path, attribute)\n', (536, 567), True, 'import mansfield_gpx as mfx\n')]
|
#!/usr/bin/env python3
import networkx as nx
import sqlite3
import logging
import json
import argparse
import pandas as pd
from pathlib import Path
from cdlib.algorithms import leiden
import networkx as nx
import sqlite3
import logging
import json
import argparse
import pandas as pd
from pathlib import Path
from cdlib.algorithms import leiden
def load_from_db(path):
logging.info(f"Loading from {path}")
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute('SELECT file, title, id FROM nodes')
titles = c.fetchall()
c.execute('SELECT source, dest FROM links')
links = c.fetchall()
t = {}
id_to_file = {}
for file, title, or_id in titles:
if 'private' in file:
continue
if title:
title = title[1:-1]
t[file] = title
else:
t[file] = file
id_to_file[or_id] = file
final_links = []
for source, dest in links:
if id_to_file.get(source, None) is None or id_to_file.get(dest, None) is None:
continue
final_links.append((id_to_file[source], id_to_file[dest]))
return t, final_links
def generate_url(file_name, replace_dict):
new_url = file_name
for key, val in replace_dict.items():
if val == "NONE":
val = ""
new_url = new_url.replace(key, val)
return new_url.replace('"', '')
def parse_links(links, titles, top=None, replace_dict={}):
logging.info(f"Parsing links")
l = []
for file1, file2 in links:
if 'private' in file2 or 'private' in file2:
continue
file1_name = titles.get(file1)
file2_name = titles.get(file2)
if file1_name and file2_name:
l.append({
"source": file1_name,
"source_url": generate_url(file1, replace_dict),
"target": file2_name,
"target_url": generate_url(file2, replace_dict),
})
#if top:
#df = pd.DataFrame(l)
return l
def color_nodes(community_dictionary, links):
# community_dictionary is a mapping of 'title' -> 'community'
nodes = {}
for link in links:
source = link['source']
target = link['target']
if source not in nodes:
nodes[source] = {
'id': source,
'url': link['source_url'],
'group': community_dictionary[source]
}
if target not in nodes:
nodes[target] = {
'id': target,
'url': link['target_url'],
'group': community_dictionary[target]
}
return nodes
def generate_community_colors(links, community_algo=leiden):
logging.info(f"Generating community colors with algorithm {community_algo}")
G = nx.Graph()
for link in links:
source = link['source']
target = link['target']
if source not in G:
G.add_node(source)
if target not in G:
G.add_node(target)
G.add_edge(source, target)
community_sets = {}
community_list = community_algo(G).communities
for i, com in enumerate(community_list):
for note_name in com:
community_sets[note_name] = i
nodes = color_nodes(community_sets, links)
return nodes, G
def dump(nodes, links, name):
logging.info(f"Writing json to {name}")
output = {}
for cur_link in links:
cur_link["x1"] = nodes[cur_link["source"]]['x']
cur_link["y1"] = nodes[cur_link["source"]]['y']
cur_link["x2"] = nodes[cur_link["target"]]['x']
cur_link["y2"] = nodes[cur_link["target"]]['y']
output['links'] = links
output['nodes'] = list(nodes.values())
with open(name, 'w') as f:
json.dump(output, f)
def generate_positions(G, nodes, iterations=50):
logging.info(f"Generating and iterating through spring layout with iterations {iterations}")
pos = nx.spring_layout(G, iterations=iterations)
for key, value in pos.items():
if key in nodes:
nodes[key]["x"] = value[0]
nodes[key]["y"] = value[1]
return nodes
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Generates a json file from your org-roam DB")
parser.add_argument("--org-db-location", help="Location of org-roam.db file. Defaults to $HOME/.emacs.d/org-roam.db", type=str, default=f"{Path.home()}/.emacs.d/org-roam.db", dest="db_location")
parser.add_argument("--output", "-o", help="File to output as. Defaults to './org-data.json'", type=str, default="./org-data.json", dest="output_location")
parser.add_argument("--replace", dest="replacements", nargs="+", help="Replacement to generate urls. Takes in <FILE_PATH> <REPLACEMENT_VALUE>")
parser.add_argument("--top", default=None, dest="top", help="Number of nodes to cut off by. Default is to generate all nodes")
args = parser.parse_args()
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler("debug.log"),
logging.StreamHandler()
]
)
if len(args.replacements) % 2 != 0:
print("Replacements must be in pairs")
exit(1)
logging.info(f"Loading db from {args.db_location}")
titles, links = load_from_db(path=args.db_location)
replacements = {args.replacements[i]: args.replacements[i+1] for i in range(0, len(args.replacements), 2)}
logging.info(f"Replacing according to {replacements}")
links = parse_links(links, titles, args.top, replacements)
nodes, G = generate_community_colors(links)
nodes = generate_positions(G, nodes, iterations=200)
dump(nodes, links, name=args.output_location)
|
[
"json.dump",
"argparse.ArgumentParser",
"logging.FileHandler",
"pathlib.Path.home",
"logging.StreamHandler",
"logging.info",
"networkx.spring_layout",
"networkx.Graph",
"sqlite3.connect"
] |
[((375, 411), 'logging.info', 'logging.info', (['f"""Loading from {path}"""'], {}), "(f'Loading from {path}')\n", (387, 411), False, 'import logging\n'), ((423, 444), 'sqlite3.connect', 'sqlite3.connect', (['path'], {}), '(path)\n', (438, 444), False, 'import sqlite3\n'), ((1446, 1476), 'logging.info', 'logging.info', (['f"""Parsing links"""'], {}), "(f'Parsing links')\n", (1458, 1476), False, 'import logging\n'), ((2714, 2790), 'logging.info', 'logging.info', (['f"""Generating community colors with algorithm {community_algo}"""'], {}), "(f'Generating community colors with algorithm {community_algo}')\n", (2726, 2790), False, 'import logging\n'), ((2799, 2809), 'networkx.Graph', 'nx.Graph', ([], {}), '()\n', (2807, 2809), True, 'import networkx as nx\n'), ((3355, 3394), 'logging.info', 'logging.info', (['f"""Writing json to {name}"""'], {}), "(f'Writing json to {name}')\n", (3367, 3394), False, 'import logging\n'), ((3851, 3953), 'logging.info', 'logging.info', (['f"""Generating and iterating through spring layout with iterations {iterations}"""'], {}), "(\n f'Generating and iterating through spring layout with iterations {iterations}'\n )\n", (3863, 3953), False, 'import logging\n'), ((3954, 3996), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {'iterations': 'iterations'}), '(G, iterations=iterations)\n', (3970, 3996), True, 'import networkx as nx\n'), ((4191, 4278), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Generates a json file from your org-roam DB"""'}), "(description=\n 'Generates a json file from your org-roam DB')\n", (4214, 4278), False, 'import argparse\n'), ((5282, 5333), 'logging.info', 'logging.info', (['f"""Loading db from {args.db_location}"""'], {}), "(f'Loading db from {args.db_location}')\n", (5294, 5333), False, 'import logging\n'), ((5505, 5559), 'logging.info', 'logging.info', (['f"""Replacing according to {replacements}"""'], {}), "(f'Replacing according to {replacements}')\n", (5517, 5559), False, 'import logging\n'), ((3775, 3795), 'json.dump', 'json.dump', (['output', 'f'], {}), '(output, f)\n', (3784, 3795), False, 'import json\n'), ((5088, 5120), 'logging.FileHandler', 'logging.FileHandler', (['"""debug.log"""'], {}), "('debug.log')\n", (5107, 5120), False, 'import logging\n'), ((5134, 5157), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (5155, 5157), False, 'import logging\n'), ((4418, 4429), 'pathlib.Path.home', 'Path.home', ([], {}), '()\n', (4427, 4429), False, 'from pathlib import Path\n')]
|
# Copyright 2017-2019 TensorHub, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
import logging
import sys
from guild import batch_util
from guild import op_util
from . import skopt_util
log = logging.getLogger("guild")
DEFAULT_MAX_TRIALS = 20
def main():
op_util.init_logging()
batch_run = batch_util.batch_run()
trials = _batch_trials(batch_run)
batch_util.handle_trials(batch_run, trials)
def _batch_trials(batch_run):
proto_flag_vals = batch_run.batch_proto.get("flags")
batch_run = batch_util.batch_run()
max_trials = batch_run.get("max_trials") or DEFAULT_MAX_TRIALS
random_seed = batch_run.get("random_seed")
try:
return skopt_util.random_trials_for_flags(
proto_flag_vals, max_trials, random_seed)
except skopt_util.MissingSearchDimension as e:
skopt_util.missing_search_dim_error(proto_flag_vals)
except skopt_util.InvalidSearchDimension as e:
_search_dim_error(e)
def _search_dim_error(e):
log.error(str(e))
sys.exit(1)
if __name__ == "__main__":
main()
|
[
"guild.op_util.init_logging",
"guild.batch_util.batch_run",
"logging.getLogger",
"guild.batch_util.handle_trials",
"sys.exit"
] |
[((768, 794), 'logging.getLogger', 'logging.getLogger', (['"""guild"""'], {}), "('guild')\n", (785, 794), False, 'import logging\n'), ((837, 859), 'guild.op_util.init_logging', 'op_util.init_logging', ([], {}), '()\n', (857, 859), False, 'from guild import op_util\n'), ((876, 898), 'guild.batch_util.batch_run', 'batch_util.batch_run', ([], {}), '()\n', (896, 898), False, 'from guild import batch_util\n'), ((941, 984), 'guild.batch_util.handle_trials', 'batch_util.handle_trials', (['batch_run', 'trials'], {}), '(batch_run, trials)\n', (965, 984), False, 'from guild import batch_util\n'), ((1089, 1111), 'guild.batch_util.batch_run', 'batch_util.batch_run', ([], {}), '()\n', (1109, 1111), False, 'from guild import batch_util\n'), ((1585, 1596), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1593, 1596), False, 'import sys\n')]
|
#!/usr/bin/python3
import socket
from JIM import *
class User():
def __init__(self, socket, user_id):
self.socket = socket
self.user_id = user_id
self.status = True
def __repr__(self):
if self.status == True:
status_str = "online"
else:
status_str = "offline"
return 'This is objects %s with status %s\n' % (self.user_id, status_str)
def send_message(self, message):
jim_msg = JIM_msg()
msg = encode.jim_msg(msg.__dict__)
socket.send(msg)
def recv_message(self):
rand = random.randint(1, 10)
if (rand == 4):
message = Message(self.user_id, "general", "Test message", "usual")
#print(message.__dict__)
return message
def set_status_on(self, socket):
self.status = True
self.socket = socket
def set_status_off(self):
self.status = False
self.socket.close()
def is_online(self):
return True
def user_socket(self):
return self.socket
class User_controller():
def __init__(self):
self.list_users = {}
def add_user(self, user):
self.list_users[user.user_id] = user
def users_is_on(self):
users = []
for key in self.list_users.keys():
user = self.list_users[key]
if user.status == True:
users.append(user)
return users
def users_in_list(self, list_user_ids):
users = []
list_user_ids = list(list_user_ids)
for user_id in list_user_ids:
if user_id in self.list_users.keys():
users.append(self.list_users[user_id])
return users
def get_number_users(self):
return len(self.list_users.keys())
if __name__ == '__main__':
user1 = User(None, 'Aina')
user2 = User(None, 'Vlad')
user3 = User(None, 'Gleb')
user4 = User(None, 'Ignat')
user5 = User(None, 'Vladik')
user2.set_status_off()
user5.set_status_off()
control = User_controller()
control.add_user(user1)
control.add_user(user2)
control.add_user(user3)
control.add_user(user4)
control.add_user(user5)
print(control.users_is_on())
user2.set_status_on(None)
print(control.users_is_on())
print(control.users_in_list(['Vladik', 'Ignat']))
|
[
"socket.send"
] |
[((454, 470), 'socket.send', 'socket.send', (['msg'], {}), '(msg)\n', (465, 470), False, 'import socket\n')]
|
"""Static files."""
from __future__ import absolute_import, unicode_literals
import os
def get_file(*args):
# type: (*str) -> str
"""Get filename for static file."""
return os.path.join(os.path.abspath(os.path.dirname(__file__)), *args)
def logo():
# type: () -> bytes
"""Celery logo image."""
return get_file('celery_128.png')
|
[
"os.path.dirname"
] |
[((217, 242), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (232, 242), False, 'import os\n')]
|
#importing required libraries
import heapq
import matplotlib.pyplot as plt
import numpy as np
import math
import time
start_time = time.time()
startx =int(input("please enter start point x coordinate: "))
starty =int(input("please enter start point y coordinate: "))
goalx =int(input("please enter goal point x coordinate: "))
goaly =int(input("please enter goal point y coordinate: "))
#res = int(input("enter grid resolution: "))
res = 1
start = (round(startx/res),round(starty/res))
goal = (round(goalx/res),round(goaly/res))
extra = 0
plt.plot(start[0], start[1], "Dr")
plt.plot(goal[0], goal[1], "Dr")
obstacle = np.zeros(shape=(int(151/res),int(251/res)))
plotx = []
ploty = []
####################################WALLS######################################
ox = []
oy = []
for i in range(round(251/res)):
ox.append(i)
oy.append(0)
obstacle[0][i] = 1
ox.append(i)
oy.append(round(150/res))
obstacle[round(150/res)][i] = 1
for i in range(round(151/res)):
ox.append(0)
oy.append(i)
obstacle[i][0] = 1
ox.append(round(250/res))
oy.append(i)
obstacle[i][round(250/res)] = 1
##############################RECTANGLE########################################
points1 = []
points2 = []
points3 = []
points4 = []
for x in range(round(251/res)):
for y in range(round(151/res)):
f1 = -y + (67.5/res) - extra
if f1<=0:
points1.append((x,y))
for x in range(round(251/res)):
for y in range(round(151/res)):
f1 = y - (112.5/res) - extra
if f1<=0:
points2.append((x,y))
for x in range(round(251/res)):
for y in range(round(151/res)):
f1 = -x + (50/res) - extra
if f1<=0:
points3.append((x,y))
for x in range(round(251/res)):
for y in range(round(151/res)):
f1 = x - (100/res) - extra
if f1<=0:
points4.append((x,y))
rectangle = list(set(points1) & set(points2) & set(points3) & set(points4))
for i in rectangle:
obstacle[i[1]][i[0]] = 1
xs = [x[0] for x in rectangle]
ys = [x[1] for x in rectangle]
plt.scatter(xs,ys,color = 'r')
################################CIRCLE#########################################
circle = []
for x in range(round(251/res)):
for y in range(round(151/res)):
f5 = (x - round(190/res))**2 + (y - round(130/res))**2 - ((15/res) + extra)**2
if f5<=0:
circle.append((x,y))
xcircle = [x[0] for x in circle]
ycircle = [x[1] for x in circle]
for i in circle:
obstacle[i[1]][i[0]] = 1
plt.scatter(xcircle,ycircle, color = 'g')
#############################ELLIPSE###########################################
ellipse = []
major_ax = (15/res) + extra
minor_ax = (6/res) + extra
for x in range(round(251/res)):
for y in range(round(151/res)):
f6 = (((x - (140/res))**2)/major_ax**2) + (((y - (120/res))**2)/minor_ax**2) -1
if f6<=0:
ellipse.append((x,y))
xellipse = [x[0] for x in ellipse]
yellipse = [x[1] for x in ellipse]
plt.scatter(xellipse,yellipse, color = 'b')
for i in ellipse:
obstacle[i[1]][i[0]] = 1
#####################################POLYGON###################################
shapeA1 = []
shapeA2 = []
shapeA3 = []
shapeB1 = []
shapeB2 = []
shapeB3 = []
shapeB4 = []
shapeC1 = []
shapeC2 = []
shapeC3 = []
shapeC4 = []
for x in range(round(251/res)):
for y in range(round(151/res)):
################################TRIANGLE1######################################
f1 = 38*x + 23*y - (8530/res) - (extra*math.sqrt(38**2 + 23**2))
if f1<=0:
shapeA1.append((x,y))
f2 = -y + (52/res) - extra
if f2<=0:
shapeA2.append((x,y))
f3 = -38*x + 7*y + (5830/res) - (extra*math.sqrt(38**2 + 7**2))
if f3<=0:
shapeA3.append((x,y))
#################################TRIANGLE2#####################################
f4 = 2*x + 19*y - (1314/res) - (extra*math.sqrt(2**2 + 19**2))
if f4<=0:
shapeB1.append((x,y))
f5 = -41*x - 25*y + (6525/res) - (extra*math.sqrt(41**2 + 25**2))
if f5<=0:
shapeB2.append((x,y))
f6 = 37*x - 13*y - (5355/res) - (extra*math.sqrt(37**2 + 13**2))
if f6<=0:
shapeB3.append((x,y))
################################QUADRILATERAL##################################
f7 = y - (52/res) - extra
if f7<=0:
shapeC1.append((x,y))
f8 = 37*x - 20*y - (6101/res) - extra*math.sqrt(37**2 + 20**2)
if f8<=0:
shapeC2.append((x,y))
f9 = -y + (15/res) - extra
if f9<=0:
shapeC3.append((x,y))
f10 = -37*x + 13*y + (5355/res) - extra*math.sqrt(37**2 + 13**2)
if f10<=0:
shapeC4.append((x,y))
poly1 = list(set(shapeA1) & set(shapeA2) & set(shapeA3))
poly2 = list(set(shapeB1) & set(shapeB2) & set(shapeB3))
poly3 = list(set(shapeC1) & set(shapeC2) & set(shapeC3) & set(shapeC4))
xpoly1 = [x[0] for x in poly1]
ypoly1 = [x[1] for x in poly1]
xpoly2 = [x[0] for x in poly2]
ypoly2 = [x[1] for x in poly2]
xpoly3 = [x[0] for x in poly3]
ypoly3 = [x[1] for x in poly3]
final_poly = list(set(poly1) | set(poly2) | set(poly3))
xpolyf = [x[0] for x in final_poly]
ypolyf = [x[1] for x in final_poly]
final_obstacle_space = set(set(rectangle) or set(circle) or set(ellipse) or set(final_poly) or set(zip(ox,oy)))
for i in final_poly:
obstacle[i[1]][i[0]] = 1
plt.scatter(xpolyf,ypolyf, color = 'm')
obstacle_t = obstacle.T
obs = []
for i in range(round(250/res)):
obs.append(obstacle_t[i])
plt.scatter(ox,oy,color = 'k')
def get_motion_model():
steps = [[1,0,1],
[0,1,1],
[-1,0,1],
[0,-1,1],
[1,1,math.sqrt(2)],
[1,-1,math.sqrt(2)],
[-1,-1,math.sqrt(2)],
[-1,1,math.sqrt(2)]]
return steps
def retrace(clist):
backtrack = []
l = len(clist)
current_pos = clist[l-1][1]
backtrack.append(current_pos)
parent = clist[l-1][2]
while parent != None:
for i in range(l):
X = clist[i]
if X[1] == parent:
parent = X[2]
current_pos = X[1]
backtrack.append(current_pos)
return backtrack[::-1]
def dijkstra_algorithm(start,goal, obstacle):
start_vertex = (0,start,None)
goal_vertex = (0,goal,None)
motion = get_motion_model()
open_list = []
closed_list = []
heapq.heappush(open_list,(start_vertex))
obstacle[start_vertex[1][0]][start_vertex[1][1]] = 1
while len(open_list)>0:
current_node = heapq.heappop(open_list)
heapq.heappush(closed_list,current_node)
plotx.append(current_node[1][0])
ploty.append(current_node[1][1])
if len(ploty)%1000 == 0:
plt.plot(goal[0], goal[1], "Dr")
plt.plot(plotx,ploty, '.y')
plt.plot(goal[0], goal[1], "Dr")
plt.pause(0.001)
if current_node[1] == goal_vertex[1] :
print('goal coordinates found')
final_path = retrace(closed_list)
return final_path
neighbors = []
for new_position in motion:
# Get node position
node_position = (current_node[1][0] + new_position[0],
current_node[1][1] + new_position[1])
node_position_cost = current_node[0] + new_position[2]
node_parent = current_node[1]
minx = 0
miny = 0
maxy = (len(obstacle) - 1)
maxx = (len(obstacle[0]) -1)
# Make sure within range
if node_position[0] > maxy:
continue
if node_position[0] < miny:
continue
if node_position[1] > maxx:
continue
if node_position[1] < minx:
continue
# Make sure walkable terrain
if obstacle[node_position[0]][node_position[1]] != 0:
continue
#Creating cost_map
obstacle[node_position[0]][node_position[1]] = 1
new_node = (node_position_cost,node_position,node_parent)
neighbors.append(new_node)
heapq.heappush(open_list,(new_node))
if start in (zip(ox,oy) or final_obstacle_space):
print("start node in obstacle space")
elif goal in (zip(ox,oy) or final_obstacle_space):
print("goal node in obstacle space")
else:
path = dijkstra_algorithm(start,goal, obs)
plt.plot(plotx,ploty, '.y')
if path!= None:
scatterx = [x[0] for x in path]
scattery = [x[1] for x in path]
plt.plot(scatterx,scattery,color = 'c',linewidth = 4)
elapsed_time = time.time() - start_time
print("time elapsed: ", elapsed_time)
else:
print("path not found")
|
[
"heapq.heappush",
"matplotlib.pyplot.plot",
"math.sqrt",
"matplotlib.pyplot.scatter",
"heapq.heappop",
"time.time",
"matplotlib.pyplot.pause"
] |
[((141, 152), 'time.time', 'time.time', ([], {}), '()\n', (150, 152), False, 'import time\n'), ((572, 606), 'matplotlib.pyplot.plot', 'plt.plot', (['start[0]', 'start[1]', '"""Dr"""'], {}), "(start[0], start[1], 'Dr')\n", (580, 606), True, 'import matplotlib.pyplot as plt\n'), ((608, 640), 'matplotlib.pyplot.plot', 'plt.plot', (['goal[0]', 'goal[1]', '"""Dr"""'], {}), "(goal[0], goal[1], 'Dr')\n", (616, 640), True, 'import matplotlib.pyplot as plt\n'), ((2282, 2312), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xs', 'ys'], {'color': '"""r"""'}), "(xs, ys, color='r')\n", (2293, 2312), True, 'import matplotlib.pyplot as plt\n'), ((2741, 2781), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xcircle', 'ycircle'], {'color': '"""g"""'}), "(xcircle, ycircle, color='g')\n", (2752, 2781), True, 'import matplotlib.pyplot as plt\n'), ((3237, 3279), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xellipse', 'yellipse'], {'color': '"""b"""'}), "(xellipse, yellipse, color='b')\n", (3248, 3279), True, 'import matplotlib.pyplot as plt\n'), ((5933, 5971), 'matplotlib.pyplot.scatter', 'plt.scatter', (['xpolyf', 'ypolyf'], {'color': '"""m"""'}), "(xpolyf, ypolyf, color='m')\n", (5944, 5971), True, 'import matplotlib.pyplot as plt\n'), ((6084, 6114), 'matplotlib.pyplot.scatter', 'plt.scatter', (['ox', 'oy'], {'color': '"""k"""'}), "(ox, oy, color='k')\n", (6095, 6114), True, 'import matplotlib.pyplot as plt\n'), ((7045, 7084), 'heapq.heappush', 'heapq.heappush', (['open_list', 'start_vertex'], {}), '(open_list, start_vertex)\n', (7059, 7084), False, 'import heapq\n'), ((7199, 7223), 'heapq.heappop', 'heapq.heappop', (['open_list'], {}), '(open_list)\n', (7212, 7223), False, 'import heapq\n'), ((7233, 7274), 'heapq.heappush', 'heapq.heappush', (['closed_list', 'current_node'], {}), '(closed_list, current_node)\n', (7247, 7274), False, 'import heapq\n'), ((9416, 9444), 'matplotlib.pyplot.plot', 'plt.plot', (['plotx', 'ploty', '""".y"""'], {}), "(plotx, ploty, '.y')\n", (9424, 9444), True, 'import matplotlib.pyplot as plt\n'), ((6258, 6270), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6267, 6270), False, 'import math\n'), ((6293, 6305), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6302, 6305), False, 'import math\n'), ((6329, 6341), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6338, 6341), False, 'import math\n'), ((6364, 6376), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (6373, 6376), False, 'import math\n'), ((7415, 7447), 'matplotlib.pyplot.plot', 'plt.plot', (['goal[0]', 'goal[1]', '"""Dr"""'], {}), "(goal[0], goal[1], 'Dr')\n", (7423, 7447), True, 'import matplotlib.pyplot as plt\n'), ((7461, 7489), 'matplotlib.pyplot.plot', 'plt.plot', (['plotx', 'ploty', '""".y"""'], {}), "(plotx, ploty, '.y')\n", (7469, 7489), True, 'import matplotlib.pyplot as plt\n'), ((7502, 7534), 'matplotlib.pyplot.plot', 'plt.plot', (['goal[0]', 'goal[1]', '"""Dr"""'], {}), "(goal[0], goal[1], 'Dr')\n", (7510, 7534), True, 'import matplotlib.pyplot as plt\n'), ((7548, 7564), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (7557, 7564), True, 'import matplotlib.pyplot as plt\n'), ((9122, 9157), 'heapq.heappush', 'heapq.heappush', (['open_list', 'new_node'], {}), '(open_list, new_node)\n', (9136, 9157), False, 'import heapq\n'), ((9556, 9608), 'matplotlib.pyplot.plot', 'plt.plot', (['scatterx', 'scattery'], {'color': '"""c"""', 'linewidth': '(4)'}), "(scatterx, scattery, color='c', linewidth=4)\n", (9564, 9608), True, 'import matplotlib.pyplot as plt\n'), ((3776, 3804), 'math.sqrt', 'math.sqrt', (['(38 ** 2 + 23 ** 2)'], {}), '(38 ** 2 + 23 ** 2)\n', (3785, 3804), False, 'import math\n'), ((4025, 4052), 'math.sqrt', 'math.sqrt', (['(38 ** 2 + 7 ** 2)'], {}), '(38 ** 2 + 7 ** 2)\n', (4034, 4052), False, 'import math\n'), ((4246, 4273), 'math.sqrt', 'math.sqrt', (['(2 ** 2 + 19 ** 2)'], {}), '(2 ** 2 + 19 ** 2)\n', (4255, 4273), False, 'import math\n'), ((4402, 4430), 'math.sqrt', 'math.sqrt', (['(41 ** 2 + 25 ** 2)'], {}), '(41 ** 2 + 25 ** 2)\n', (4411, 4430), False, 'import math\n'), ((4556, 4584), 'math.sqrt', 'math.sqrt', (['(37 ** 2 + 13 ** 2)'], {}), '(37 ** 2 + 13 ** 2)\n', (4565, 4584), False, 'import math\n'), ((4893, 4921), 'math.sqrt', 'math.sqrt', (['(37 ** 2 + 20 ** 2)'], {}), '(37 ** 2 + 20 ** 2)\n', (4902, 4921), False, 'import math\n'), ((5135, 5163), 'math.sqrt', 'math.sqrt', (['(37 ** 2 + 13 ** 2)'], {}), '(37 ** 2 + 13 ** 2)\n', (5144, 5163), False, 'import math\n'), ((9634, 9645), 'time.time', 'time.time', ([], {}), '()\n', (9643, 9645), False, 'import time\n')]
|
from functools import wraps
import time
from hashlib import md5
import threading
class memoize(object):
""" Memoize the results of a function. Supports an optional timeout
for automatic cache expiration.
If the optional manual_flush argument is True, a function called
"flush_cache" will be added to the wrapped function. When
called, it will remove all the timed out values from the cache.
If you use this decorator as a class method, you must specify
instance_method=True otherwise you will have a single shared
cache for every instance of your class.
This decorator is thread safe.
"""
def __init__(self, timeout=None, manual_flush=False, instance_method=False):
self.timeout = timeout
self.manual_flush = manual_flush
self.instance_method = instance_method
self.cache = {}
self.cache_lock = threading.RLock()
def __call__(self, fn):
if self.instance_method:
@wraps(fn)
def rewrite_instance_method(instance, *args, **kwargs):
# the first time we are called we overwrite the method
# on the class instance with a new memoize instance
if hasattr(instance, fn.__name__):
bound_fn = fn.__get__(instance, instance.__class__)
new_memoizer = memoize(self.timeout, self.manual_flush)(bound_fn)
setattr(instance, fn.__name__, new_memoizer)
return getattr(instance, fn.__name__)(*args, **kwargs)
return rewrite_instance_method
def flush_cache():
with self.cache_lock:
removables = set()
for key in list(self.cache.keys()):
if self.timeout is not None and (time.time() - self.cache[key][1]) > self.timeout:
removables.add(key)
if self.timeout is None:
removables.add(key)
for key in removables:
self.cache.pop(key)
removables.clear()
@wraps(fn)
def wrapped(*args, **kwargs):
kw = list(kwargs.items())
kw.sort()
key_str = repr((fn, args, kw))
key = md5(key_str.encode('utf-8', errors='replace')).hexdigest()
with self.cache_lock:
try:
result, cache_time = self.cache[key]
if self.timeout is not None and (time.time() - cache_time) > self.timeout:
raise KeyError
except KeyError:
result, _ = self.cache[key] = (fn(*args, **kwargs), time.time())
if not self.manual_flush and self.timeout is not None:
flush_cache()
return result
if self.manual_flush:
wrapped.flush_cache = flush_cache
return wrapped
|
[
"threading.RLock",
"functools.wraps",
"time.time"
] |
[((918, 935), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (933, 935), False, 'import threading\n'), ((2133, 2142), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (2138, 2142), False, 'from functools import wraps\n'), ((1011, 1020), 'functools.wraps', 'wraps', (['fn'], {}), '(fn)\n', (1016, 1020), False, 'from functools import wraps\n'), ((2713, 2724), 'time.time', 'time.time', ([], {}), '()\n', (2722, 2724), False, 'import time\n'), ((1823, 1834), 'time.time', 'time.time', ([], {}), '()\n', (1832, 1834), False, 'import time\n'), ((2527, 2538), 'time.time', 'time.time', ([], {}), '()\n', (2536, 2538), False, 'import time\n')]
|
import unittest
import doctest
import str_util
class TestStrUtil(unittest.TestCase):
def test_side_effects(self):
list = ['B', 'A', 'C', 'C', '']
length = len(list)
new_list = str_util.trim(list)
new_list = str_util.sort(list)
new_list = str_util.unique(list)
new_list = str_util.unique(list, True)
new_list = str_util.lowercase(list)
new_list = str_util.replace(list, 'A', 'Abe')
new_list = str_util.replace_substring(list, 'A', 'Abe')
self.assertEqual(len(list), length) # Original list has the same length
self.assertEqual(['B', 'A', 'C', 'C', ''], list)
def test_to_list(self):
self.assertEqual(str_util.to_list("Hello"), ['Hello'])
self.assertEqual(str_util.to_list([]), [])
def test_to_string(self):
self.assertEqual(str_util.to_string(1), "1")
self.assertEqual(str_util.to_string("Hello"), "Hello")
self.assertEqual(str_util.to_string(None), "None")
self.assertEqual(str_util.to_string([1, 2, 3]), "[1, 2, 3]")
def test_is_string(self):
self.assertTrue(str_util.is_string("Hello"))
self.assertFalse(str_util.is_string(6))
self.assertFalse(str_util.is_string(None))
self.assertFalse(str_util.is_string(["A"]))
def test_is_list(self):
self.assertTrue(str_util.is_list(["A"]))
self.assertTrue(str_util.is_list([]))
self.assertFalse(str_util.is_list("Hello"))
self.assertFalse(str_util.is_list("Hello"))
def test_trim(self):
self.assertEqual(str_util.trim("A B C "), "A B C")
self.assertEqual(str_util.trim(["A B C ", "", "E "]), ["A B C", "E"])
string_with_newlines = """ A B
C """
self.assertEqual(str_util.trim(string_with_newlines), "A B C")
self.assertRaises(BaseException, str_util.trim, None)
self.assertRaises(BaseException, str_util.trim, 1)
def test_is_empty(self):
self.assertTrue(str_util.is_empty(" "))
self.assertTrue(str_util.is_empty(""))
self.assertTrue(str_util.is_empty(None))
def test_contains(self):
self.assertTrue(str_util.contains("Hello World", "Wo"))
self.assertFalse(str_util.contains("Hello World", "wo"))
self.assertTrue(str_util.contains("Hello World", "wo", True))
def test_replace_substring(self):
self.assertEqual(str_util.replace_substring('c:\\temp', '\\', '/'), "c:/temp")
self.assertEqual(str_util.replace_substring('c:/temp/*.*', '/', '\\'), "c:\\temp\\*.*")
def test_word(self):
self.assertEqual(str_util.word('a b c', 4), '')
def test_doctest(self):
suite = unittest.TestSuite()
suite.addTest(doctest.DocTestSuite("str_util"))
result = unittest.TextTestRunner().run(suite)
self.assertTrue(result.wasSuccessful())
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"str_util.replace_substring",
"str_util.to_string",
"unittest.TextTestRunner",
"unittest.TestSuite",
"str_util.is_empty",
"doctest.DocTestSuite",
"str_util.contains",
"str_util.is_string",
"str_util.unique",
"str_util.to_list",
"str_util.lowercase",
"str_util.sort",
"str_util.trim",
"str_util.word",
"str_util.replace",
"str_util.is_list"
] |
[((2931, 2946), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2944, 2946), False, 'import unittest\n'), ((206, 225), 'str_util.trim', 'str_util.trim', (['list'], {}), '(list)\n', (219, 225), False, 'import str_util\n'), ((245, 264), 'str_util.sort', 'str_util.sort', (['list'], {}), '(list)\n', (258, 264), False, 'import str_util\n'), ((284, 305), 'str_util.unique', 'str_util.unique', (['list'], {}), '(list)\n', (299, 305), False, 'import str_util\n'), ((325, 352), 'str_util.unique', 'str_util.unique', (['list', '(True)'], {}), '(list, True)\n', (340, 352), False, 'import str_util\n'), ((372, 396), 'str_util.lowercase', 'str_util.lowercase', (['list'], {}), '(list)\n', (390, 396), False, 'import str_util\n'), ((416, 450), 'str_util.replace', 'str_util.replace', (['list', '"""A"""', '"""Abe"""'], {}), "(list, 'A', 'Abe')\n", (432, 450), False, 'import str_util\n'), ((470, 514), 'str_util.replace_substring', 'str_util.replace_substring', (['list', '"""A"""', '"""Abe"""'], {}), "(list, 'A', 'Abe')\n", (496, 514), False, 'import str_util\n'), ((2719, 2739), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (2737, 2739), False, 'import unittest\n'), ((708, 733), 'str_util.to_list', 'str_util.to_list', (['"""Hello"""'], {}), "('Hello')\n", (724, 733), False, 'import str_util\n'), ((771, 791), 'str_util.to_list', 'str_util.to_list', (['[]'], {}), '([])\n', (787, 791), False, 'import str_util\n'), ((853, 874), 'str_util.to_string', 'str_util.to_string', (['(1)'], {}), '(1)\n', (871, 874), False, 'import str_util\n'), ((906, 933), 'str_util.to_string', 'str_util.to_string', (['"""Hello"""'], {}), "('Hello')\n", (924, 933), False, 'import str_util\n'), ((969, 993), 'str_util.to_string', 'str_util.to_string', (['None'], {}), '(None)\n', (987, 993), False, 'import str_util\n'), ((1028, 1057), 'str_util.to_string', 'str_util.to_string', (['[1, 2, 3]'], {}), '([1, 2, 3])\n', (1046, 1057), False, 'import str_util\n'), ((1127, 1154), 'str_util.is_string', 'str_util.is_string', (['"""Hello"""'], {}), "('Hello')\n", (1145, 1154), False, 'import str_util\n'), ((1181, 1202), 'str_util.is_string', 'str_util.is_string', (['(6)'], {}), '(6)\n', (1199, 1202), False, 'import str_util\n'), ((1229, 1253), 'str_util.is_string', 'str_util.is_string', (['None'], {}), '(None)\n', (1247, 1253), False, 'import str_util\n'), ((1280, 1305), 'str_util.is_string', 'str_util.is_string', (["['A']"], {}), "(['A'])\n", (1298, 1305), False, 'import str_util\n'), ((1360, 1383), 'str_util.is_list', 'str_util.is_list', (["['A']"], {}), "(['A'])\n", (1376, 1383), False, 'import str_util\n'), ((1409, 1429), 'str_util.is_list', 'str_util.is_list', (['[]'], {}), '([])\n', (1425, 1429), False, 'import str_util\n'), ((1456, 1481), 'str_util.is_list', 'str_util.is_list', (['"""Hello"""'], {}), "('Hello')\n", (1472, 1481), False, 'import str_util\n'), ((1508, 1533), 'str_util.is_list', 'str_util.is_list', (['"""Hello"""'], {}), "('Hello')\n", (1524, 1533), False, 'import str_util\n'), ((1586, 1612), 'str_util.trim', 'str_util.trim', (['"""A B C """'], {}), "('A B C ')\n", (1599, 1612), False, 'import str_util\n'), ((1648, 1688), 'str_util.trim', 'str_util.trim', (["['A B C ', '', 'E ']"], {}), "(['A B C ', '', 'E '])\n", (1661, 1688), False, 'import str_util\n'), ((1794, 1829), 'str_util.trim', 'str_util.trim', (['string_with_newlines'], {}), '(string_with_newlines)\n', (1807, 1829), False, 'import str_util\n'), ((2016, 2043), 'str_util.is_empty', 'str_util.is_empty', (['""" """'], {}), "(' ')\n", (2033, 2043), False, 'import str_util\n'), ((2069, 2090), 'str_util.is_empty', 'str_util.is_empty', (['""""""'], {}), "('')\n", (2086, 2090), False, 'import str_util\n'), ((2116, 2139), 'str_util.is_empty', 'str_util.is_empty', (['None'], {}), '(None)\n', (2133, 2139), False, 'import str_util\n'), ((2195, 2233), 'str_util.contains', 'str_util.contains', (['"""Hello World"""', '"""Wo"""'], {}), "('Hello World', 'Wo')\n", (2212, 2233), False, 'import str_util\n'), ((2260, 2298), 'str_util.contains', 'str_util.contains', (['"""Hello World"""', '"""wo"""'], {}), "('Hello World', 'wo')\n", (2277, 2298), False, 'import str_util\n'), ((2324, 2368), 'str_util.contains', 'str_util.contains', (['"""Hello World"""', '"""wo"""', '(True)'], {}), "('Hello World', 'wo', True)\n", (2341, 2368), False, 'import str_util\n'), ((2434, 2483), 'str_util.replace_substring', 'str_util.replace_substring', (['"""c:\\\\temp"""', '"""\\\\"""', '"""/"""'], {}), "('c:\\\\temp', '\\\\', '/')\n", (2460, 2483), False, 'import str_util\n'), ((2521, 2573), 'str_util.replace_substring', 'str_util.replace_substring', (['"""c:/temp/*.*"""', '"""/"""', '"""\\\\"""'], {}), "('c:/temp/*.*', '/', '\\\\')\n", (2547, 2573), False, 'import str_util\n'), ((2643, 2668), 'str_util.word', 'str_util.word', (['"""a b c"""', '(4)'], {}), "('a b c', 4)\n", (2656, 2668), False, 'import str_util\n'), ((2762, 2794), 'doctest.DocTestSuite', 'doctest.DocTestSuite', (['"""str_util"""'], {}), "('str_util')\n", (2782, 2794), False, 'import doctest\n'), ((2813, 2838), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {}), '()\n', (2836, 2838), False, 'import unittest\n')]
|
# PyZX - Python library for quantum circuit rewriting
# and optimisation using the ZX-calculus
# Copyright (C) 2021 - <NAME> and <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains objective functions to guide local search over ZX-diagrams. The wgc method defines a measure of circuit complexity -- a weighted gate count where 2-qubit counts incur a higher cost. The g_wgc takes a ZX-diagram as input and optionally applies various optimizations before measuring the complexity of the circuit obtained via extraction.
"""
import sys
if __name__ == '__main__':
sys.path.append('..')
from pyzx.extract import extract_circuit
from pyzx.simplify import full_reduce
from pyzx.optimize import basic_optimization
# Weighted gate count
def wgc(c, two_qb_weight=10):
"""A measure of the complexity of a given circuit. By default, 2-qubit
gates are treated as 10X more costly than single-qubit gates"""
c_tmp = c.to_basic_gates()
total = len(c_tmp.gates)
n2 = c_tmp.twoqubitcount()
single_qubit_count = total - n2
return two_qb_weight * n2 + single_qubit_count
# Weighted gate count of a ZX-diagram
def g_wgc(g, two_qb_weight=10, g_simplify=True, c_simplify=True):
"""A measure of the complexity of the circuit obtained from a a ZX-diagram
upon extraction using the above measure of circuit complexity"""
g_tmp = g.copy()
if g_simplify:
full_reduce(g_tmp)
c = extract_circuit(g_tmp.copy()).to_basic_gates()
if c_simplify:
c = basic_optimization(c)
return wgc(c, two_qb_weight=two_qb_weight)
|
[
"sys.path.append",
"pyzx.optimize.basic_optimization",
"pyzx.simplify.full_reduce"
] |
[((1096, 1117), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (1111, 1117), False, 'import sys\n'), ((1919, 1937), 'pyzx.simplify.full_reduce', 'full_reduce', (['g_tmp'], {}), '(g_tmp)\n', (1930, 1937), False, 'from pyzx.simplify import full_reduce\n'), ((2026, 2047), 'pyzx.optimize.basic_optimization', 'basic_optimization', (['c'], {}), '(c)\n', (2044, 2047), False, 'from pyzx.optimize import basic_optimization\n')]
|
from struct import pack, unpack
# NS_ANDROID_URI = 'http://schemas.android.com/apk/res/android'
# AXML FORMAT ########################################
# Translated from
# http://code.google.com/p/android4me/source/browse/src/android/content/res/AXmlResourceParser.java
UTF8_FLAG = 0x00000100
CHUNK_STRINGPOOL_TYPE = 0x001C0001
CHUNK_NULL_TYPE = 0x00000000
class StringPoolChunk(object):
'''
解析String Pool Chunk
'''
def __init__(self, buff):
self.size_of_buff = buff.size()
self.start = buff.get_idx()
self._cache = {}
self.header_size, self.header = self.skipNullPadding(buff)
# 块大小
self.chunkSize = unpack('<i', buff.read(4))[0]
# 字符串数
self.stringCount = unpack('<i', buff.read(4))[0]
# style 数
self.styleCount = unpack('<i', buff.read(4))[0]
# 字符串格式标记
self.flags = unpack('<i', buff.read(4))[0]
# 字符串的格式有两种,一种是16bit,另外一种是UTF8
self.m_isUTF8 = (self.flags & UTF8_FLAG) != 0
# 字符串起始位置
self.stringsStart = unpack('<i', buff.read(4))[0]
# 注意:
# 1. 如果解析的是清单,那么这个值肯定是为空的。
# 2. 该值不可能大于小于文件的大小(开发者通常用来对抗解析工具)
self.stylesStart = unpack('<i', buff.read(4))[0]
if self.stylesStart > buff.size():
self.stylesStart = 0
# 字符串偏移数组
self.m_stringIndices = []
# style 偏移数组
self.m_styleIndices = []
# 字符串池
self.m_charbuff = ""
# style pan 池
self.m_styles = []
for _ in range(0, self.stringCount):
tmp = buff.read(4)
self.m_stringIndices.append(unpack('<i', tmp)[0])
for _ in range(0, self.styleCount):
tmp = buff.read(4)
self.m_styleIndices.append(unpack('<i', tmp)[0])
# 4字节对齐
size = self.chunkSize - self.stringsStart
if self.stylesStart != 0:
size = self.stylesStart - self.stringsStart
# 字符串池
self.m_charbuff = buff.read(size)
if self.stylesStart != 0:
size = self.chunkSize - self.stylesStart
for _ in range(0, int(size / 4) - 1):
tmp = buff.read(4)
self.m_styles.append(unpack('<i', tmp)[0])
def skipNullPadding(self, buff):
'''
不断地寻找 CHUNK_STRINGPOOL_TYPE,目前暂时没有遇到这种样本。
'''
def readNext(buff, first_run=True):
datas = unpack('<i', buff.read(4))
header = datas[0]
if header == CHUNK_NULL_TYPE and first_run:
print("Skipping null padding in StringPoolChunk header")
header = readNext(buff, first_run=False)
elif header != CHUNK_STRINGPOOL_TYPE:
print("Invalid StringPoolChunk header")
return header
header = readNext(buff)
return header >> 8, header & 0xFF
def getString(self, idx):
if idx in self._cache:
return self._cache[idx]
if idx < 0 or not self.m_stringIndices or idx >= len(
self.m_stringIndices):
return ""
offset = self.m_stringIndices[idx]
if self.m_isUTF8:
self._cache[idx] = self.decode8(offset)
else:
self._cache[idx] = self.decode16(offset)
return self._cache[idx]
def getStyle(self, idx):
return self.m_styles[idx]
def decode8(self, offset):
str_len, skip = self.decodeLength(offset, 1)
offset += skip
encoded_bytes, skip = self.decodeLength(offset, 1)
offset += skip
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-8', str_len)
def decode16(self, offset):
str_len, skip = self.decodeLength(offset, 2)
offset += skip
encoded_bytes = str_len * 2
data = self.m_charbuff[offset: offset + encoded_bytes]
return self.decode_bytes(data, 'utf-16', str_len)
def decode_bytes(self, data, encoding, str_len):
string = data.decode(encoding, 'replace')
if len(string) != str_len:
raise Exception("invalid decoded string length")
return string
def decodeLength(self, offset, sizeof_char):
length = self.m_charbuff[offset]
sizeof_2chars = sizeof_char << 1
fmt_chr = 'B' if sizeof_char == 1 else 'H'
fmt = "<2" + fmt_chr
length1, length2 = unpack(
fmt, self.m_charbuff[offset:(offset + sizeof_2chars)])
highbit = 0x80 << (8 * (sizeof_char - 1))
if (length & highbit) != 0:
return ((length1 & ~highbit) << (8 * sizeof_char)) | length2, sizeof_2chars
return length1, sizeof_char
def show(self, flag=False):
print("String Pool Chunk:")
print(" - start:", self.start)
print(" - header Size:", self.header_size)
print(" - chunkSize:", self.chunkSize)
print(" - stringCount:", self.stringCount)
print(" - styleCount:", self.styleCount)
print(" - stringsStart:", self.stringsStart)
print(" - stylesStart:", self.stylesStart)
print(" - flags:", self.flags)
print(" - size_of_buff:", self.size_of_buff)
if not flag:
return
for i in range(0, len(self.m_stringIndices)):
print((i, repr(self.getString(i))))
CHUNK_RESOURCEIDS_TYPE = 0x00080180
class ResourceIDChunk(object):
pass
class SV(object):
def __init__(self, size, buff):
self.__size = size
self.__value = unpack(self.__size, buff)[0]
def _get(self):
return pack(self.__size, self.__value)
def __str__(self):
return "0x%x" % self.__value
def __int__(self):
return self.__value
def get_value_buff(self):
return self._get()
def get_value(self):
return self.__value
def set_value(self, attr):
self.__value = attr
class BuffHandle(object):
def __init__(self, buff):
self.__buff = buff
self.__idx = 0
def size(self):
return len(self.__buff)
def set_idx(self, idx):
self.__idx = idx
def get_idx(self):
return self.__idx
def readNullString(self, size):
data = self.read(size)
return data
def read_b(self, size):
return self.__buff[self.__idx:self.__idx + size]
def read_at(self, offset, size):
return self.__buff[offset:offset + size]
def read(self, size):
if isinstance(size, SV):
size = size.value
buff = self.__buff[self.__idx:self.__idx + size]
self.__idx += size
return buff
def end(self):
return self.__idx == len(self.__buff)
# ATTRIBUTE_IX_NAMESPACE_URI = 0
# ATTRIBUTE_IX_NAME = 1
# ATTRIBUTE_IX_VALUE_STRING = 2
# ATTRIBUTE_IX_VALUE_TYPE = 3
# ATTRIBUTE_IX_VALUE_DATA = 4
# ATTRIBUTE_LENGHT = 5
#
# CHUNK_AXML_FILE = 0x00080003
# CHUNK_XML_FIRST = 0x00100100
# CHUNK_XML_START_NAMESPACE = 0x00100100
# CHUNK_XML_END_NAMESPACE = 0x00100101
# CHUNK_XML_START_TAG = 0x00100102
# CHUNK_XML_END_TAG = 0x00100103
# CHUNK_XML_TEXT = 0x00100104
# CHUNK_XML_LAST = 0x00100104
#
# START_DOCUMENT = 0
# END_DOCUMENT = 1
# START_TAG = 2
# END_TAG = 3
# TEXT = 4
#
# RADIX_MULTS = [0.00390625, 3.051758E-005, 1.192093E-007, 4.656613E-010]
# DIMENSION_UNITS = ["px", "dip", "sp", "pt", "in", "mm"]
# FRACTION_UNITS = ["%", "%p"]
#
# COMPLEX_UNIT_MASK = 15
|
[
"struct.unpack",
"struct.pack"
] |
[((4419, 4478), 'struct.unpack', 'unpack', (['fmt', 'self.m_charbuff[offset:offset + sizeof_2chars]'], {}), '(fmt, self.m_charbuff[offset:offset + sizeof_2chars])\n', (4425, 4478), False, 'from struct import pack, unpack\n'), ((5602, 5633), 'struct.pack', 'pack', (['self.__size', 'self.__value'], {}), '(self.__size, self.__value)\n', (5606, 5633), False, 'from struct import pack, unpack\n'), ((5537, 5562), 'struct.unpack', 'unpack', (['self.__size', 'buff'], {}), '(self.__size, buff)\n', (5543, 5562), False, 'from struct import pack, unpack\n'), ((1630, 1647), 'struct.unpack', 'unpack', (['"""<i"""', 'tmp'], {}), "('<i', tmp)\n", (1636, 1647), False, 'from struct import pack, unpack\n'), ((1767, 1784), 'struct.unpack', 'unpack', (['"""<i"""', 'tmp'], {}), "('<i', tmp)\n", (1773, 1784), False, 'from struct import pack, unpack\n'), ((2215, 2232), 'struct.unpack', 'unpack', (['"""<i"""', 'tmp'], {}), "('<i', tmp)\n", (2221, 2232), False, 'from struct import pack, unpack\n')]
|
import unittest
import uuid
import py3crdt
from py3crdt.gset import GSet
class TestLWW(unittest.TestCase):
def setUp(self):
# Create a GSet
self.gset1 = GSet(uuid.uuid4())
# Create another GSet
self.gset2 = GSet(uuid.uuid4())
# Add elements to gset1
self.gset1.add('a')
self.gset1.add('b')
# Add elements to gset1
self.gset2.add('b')
self.gset2.add('c')
self.gset2.add('d')
def test_elements_add_correctly_gset(self):
self.assertEqual(self.gset1.payload, ['a', 'b'])
self.assertEqual(self.gset2.payload, ['b', 'c', 'd'])
def test_querying_gset_without_merging(self):
# Check gset1 querying
self.assertTrue(self.gset1.query('a'))
self.assertTrue(self.gset1.query('b'))
self.assertFalse(self.gset1.query('c'))
self.assertFalse(self.gset1.query('d'))
# Check gset2 querying
self.assertFalse(self.gset2.query('a'))
self.assertTrue(self.gset2.query('b'))
self.assertTrue(self.gset2.query('c'))
self.assertTrue(self.gset2.query('d'))
def test_merging_gset(self):
# Check gset1 merging
self.gset1.merge(self.gset2)
self.assertEqual(self.gset1.payload, ['a', 'b', 'c', 'd'])
# Check gset2 merging
self.gset2.merge(self.gset1)
self.assertEqual(self.gset2.payload, ['a', 'b', 'c', 'd'])
# Check if they are both equal
self.assertEqual(self.gset1.payload, self.gset2.payload)
def test_querying_gset_with_merging(self):
# Check gset2 merging
self.gset2.merge(self.gset1)
self.assertTrue(self.gset2.query('a'))
self.assertTrue(self.gset2.query('b'))
self.assertTrue(self.gset2.query('c'))
self.assertTrue(self.gset2.query('d'))
# Check gset1 merging
self.gset1.merge(self.gset2)
self.assertTrue(self.gset1.query('a'))
self.assertTrue(self.gset1.query('b'))
self.assertTrue(self.gset1.query('c'))
self.assertTrue(self.gset1.query('d'))
if __name__ == '__main__':
unittest.main()
|
[
"unittest.main",
"uuid.uuid4"
] |
[((2132, 2147), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2145, 2147), False, 'import unittest\n'), ((180, 192), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (190, 192), False, 'import uuid\n'), ((251, 263), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (261, 263), False, 'import uuid\n')]
|
# -*- coding: utf-8 -*-
""" Define the Attention Layer of the model.
"""
from __future__ import print_function, division
import torch
from torch.autograd import Variable
from torch.nn import Module
from torch.nn.parameter import Parameter
class Attention(Module):
"""
Computes a weighted average of the different channels across timesteps.
Uses 1 parameter pr. channel to compute the attention value for a single timestep.
"""
def __init__(self, attention_size, return_attention=False):
""" Initialize the attention layer
# Arguments:
attention_size: Size of the attention vector.
return_attention: If true, output will include the weight for each input token
used for the prediction
"""
super(Attention, self).__init__()
self.return_attention = return_attention
self.attention_size = attention_size
self.attention_vector = Parameter(torch.FloatTensor(attention_size))
self.attention_vector.data.normal_(std=0.05) # Initialize attention vector
def __repr__(self):
s = '{name}({attention_size}, return attention={return_attention})'
return s.format(name=self.__class__.__name__, **self.__dict__)
def forward(self, inputs, input_lengths):
""" Forward pass.
# Arguments:
inputs (Torch.Variable): Tensor of input sequences
input_lengths (torch.LongTensor): Lengths of the sequences
# Return:
Tuple with (representations and attentions if self.return_attention else None).
"""
logits = inputs.matmul(self.attention_vector)
unnorm_ai = (logits - logits.max()).exp()
# Compute a mask for the attention on the padded sequences
# See e.g. https://discuss.pytorch.org/t/self-attention-on-words-and-masking/5671/5
max_len = unnorm_ai.size(1)
idxes = torch.arange(0, max_len, out=torch.LongTensor(max_len)).unsqueeze(0)
mask = Variable((idxes < input_lengths.unsqueeze(1)).float())
# apply mask and renormalize attention scores (weights)
if unnorm_ai.is_cuda:
mask = mask.cuda()
masked_weights = unnorm_ai * mask
att_sums = masked_weights.sum(dim=1, keepdim=True) # sums per sequence
attentions = masked_weights.div(att_sums)
# apply attention weights
weighted = torch.mul(inputs, attentions.unsqueeze(-1).expand_as(inputs))
# get the final fixed vector representations of the sentences
representations = weighted.sum(dim=1)
return (representations, attentions if self.return_attention else None)
|
[
"torch.FloatTensor",
"torch.LongTensor"
] |
[((971, 1004), 'torch.FloatTensor', 'torch.FloatTensor', (['attention_size'], {}), '(attention_size)\n', (988, 1004), False, 'import torch\n'), ((1958, 1983), 'torch.LongTensor', 'torch.LongTensor', (['max_len'], {}), '(max_len)\n', (1974, 1983), False, 'import torch\n')]
|
# coding: utf-8
"""
Gitea API.
This documentation describes the Gitea API. # noqa: E501
OpenAPI spec version: 1.16.7
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class Release(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'assets': 'list[Attachment]',
'author': 'User',
'body': 'str',
'created_at': 'datetime',
'draft': 'bool',
'html_url': 'str',
'id': 'int',
'name': 'str',
'prerelease': 'bool',
'published_at': 'datetime',
'tag_name': 'str',
'tarball_url': 'str',
'target_commitish': 'str',
'url': 'str',
'zipball_url': 'str'
}
attribute_map = {
'assets': 'assets',
'author': 'author',
'body': 'body',
'created_at': 'created_at',
'draft': 'draft',
'html_url': 'html_url',
'id': 'id',
'name': 'name',
'prerelease': 'prerelease',
'published_at': 'published_at',
'tag_name': 'tag_name',
'tarball_url': 'tarball_url',
'target_commitish': 'target_commitish',
'url': 'url',
'zipball_url': 'zipball_url'
}
def __init__(self, assets=None, author=None, body=None, created_at=None, draft=None, html_url=None, id=None, name=None, prerelease=None, published_at=None, tag_name=None, tarball_url=None, target_commitish=None, url=None, zipball_url=None): # noqa: E501
"""Release - a model defined in Swagger""" # noqa: E501
self._assets = None
self._author = None
self._body = None
self._created_at = None
self._draft = None
self._html_url = None
self._id = None
self._name = None
self._prerelease = None
self._published_at = None
self._tag_name = None
self._tarball_url = None
self._target_commitish = None
self._url = None
self._zipball_url = None
self.discriminator = None
if assets is not None:
self.assets = assets
if author is not None:
self.author = author
if body is not None:
self.body = body
if created_at is not None:
self.created_at = created_at
if draft is not None:
self.draft = draft
if html_url is not None:
self.html_url = html_url
if id is not None:
self.id = id
if name is not None:
self.name = name
if prerelease is not None:
self.prerelease = prerelease
if published_at is not None:
self.published_at = published_at
if tag_name is not None:
self.tag_name = tag_name
if tarball_url is not None:
self.tarball_url = tarball_url
if target_commitish is not None:
self.target_commitish = target_commitish
if url is not None:
self.url = url
if zipball_url is not None:
self.zipball_url = zipball_url
@property
def assets(self):
"""Gets the assets of this Release. # noqa: E501
:return: The assets of this Release. # noqa: E501
:rtype: list[Attachment]
"""
return self._assets
@assets.setter
def assets(self, assets):
"""Sets the assets of this Release.
:param assets: The assets of this Release. # noqa: E501
:type: list[Attachment]
"""
self._assets = assets
@property
def author(self):
"""Gets the author of this Release. # noqa: E501
:return: The author of this Release. # noqa: E501
:rtype: User
"""
return self._author
@author.setter
def author(self, author):
"""Sets the author of this Release.
:param author: The author of this Release. # noqa: E501
:type: User
"""
self._author = author
@property
def body(self):
"""Gets the body of this Release. # noqa: E501
:return: The body of this Release. # noqa: E501
:rtype: str
"""
return self._body
@body.setter
def body(self, body):
"""Sets the body of this Release.
:param body: The body of this Release. # noqa: E501
:type: str
"""
self._body = body
@property
def created_at(self):
"""Gets the created_at of this Release. # noqa: E501
:return: The created_at of this Release. # noqa: E501
:rtype: datetime
"""
return self._created_at
@created_at.setter
def created_at(self, created_at):
"""Sets the created_at of this Release.
:param created_at: The created_at of this Release. # noqa: E501
:type: datetime
"""
self._created_at = created_at
@property
def draft(self):
"""Gets the draft of this Release. # noqa: E501
:return: The draft of this Release. # noqa: E501
:rtype: bool
"""
return self._draft
@draft.setter
def draft(self, draft):
"""Sets the draft of this Release.
:param draft: The draft of this Release. # noqa: E501
:type: bool
"""
self._draft = draft
@property
def html_url(self):
"""Gets the html_url of this Release. # noqa: E501
:return: The html_url of this Release. # noqa: E501
:rtype: str
"""
return self._html_url
@html_url.setter
def html_url(self, html_url):
"""Sets the html_url of this Release.
:param html_url: The html_url of this Release. # noqa: E501
:type: str
"""
self._html_url = html_url
@property
def id(self):
"""Gets the id of this Release. # noqa: E501
:return: The id of this Release. # noqa: E501
:rtype: int
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this Release.
:param id: The id of this Release. # noqa: E501
:type: int
"""
self._id = id
@property
def name(self):
"""Gets the name of this Release. # noqa: E501
:return: The name of this Release. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this Release.
:param name: The name of this Release. # noqa: E501
:type: str
"""
self._name = name
@property
def prerelease(self):
"""Gets the prerelease of this Release. # noqa: E501
:return: The prerelease of this Release. # noqa: E501
:rtype: bool
"""
return self._prerelease
@prerelease.setter
def prerelease(self, prerelease):
"""Sets the prerelease of this Release.
:param prerelease: The prerelease of this Release. # noqa: E501
:type: bool
"""
self._prerelease = prerelease
@property
def published_at(self):
"""Gets the published_at of this Release. # noqa: E501
:return: The published_at of this Release. # noqa: E501
:rtype: datetime
"""
return self._published_at
@published_at.setter
def published_at(self, published_at):
"""Sets the published_at of this Release.
:param published_at: The published_at of this Release. # noqa: E501
:type: datetime
"""
self._published_at = published_at
@property
def tag_name(self):
"""Gets the tag_name of this Release. # noqa: E501
:return: The tag_name of this Release. # noqa: E501
:rtype: str
"""
return self._tag_name
@tag_name.setter
def tag_name(self, tag_name):
"""Sets the tag_name of this Release.
:param tag_name: The tag_name of this Release. # noqa: E501
:type: str
"""
self._tag_name = tag_name
@property
def tarball_url(self):
"""Gets the tarball_url of this Release. # noqa: E501
:return: The tarball_url of this Release. # noqa: E501
:rtype: str
"""
return self._tarball_url
@tarball_url.setter
def tarball_url(self, tarball_url):
"""Sets the tarball_url of this Release.
:param tarball_url: The tarball_url of this Release. # noqa: E501
:type: str
"""
self._tarball_url = tarball_url
@property
def target_commitish(self):
"""Gets the target_commitish of this Release. # noqa: E501
:return: The target_commitish of this Release. # noqa: E501
:rtype: str
"""
return self._target_commitish
@target_commitish.setter
def target_commitish(self, target_commitish):
"""Sets the target_commitish of this Release.
:param target_commitish: The target_commitish of this Release. # noqa: E501
:type: str
"""
self._target_commitish = target_commitish
@property
def url(self):
"""Gets the url of this Release. # noqa: E501
:return: The url of this Release. # noqa: E501
:rtype: str
"""
return self._url
@url.setter
def url(self, url):
"""Sets the url of this Release.
:param url: The url of this Release. # noqa: E501
:type: str
"""
self._url = url
@property
def zipball_url(self):
"""Gets the zipball_url of this Release. # noqa: E501
:return: The zipball_url of this Release. # noqa: E501
:rtype: str
"""
return self._zipball_url
@zipball_url.setter
def zipball_url(self, zipball_url):
"""Sets the zipball_url of this Release.
:param zipball_url: The zipball_url of this Release. # noqa: E501
:type: str
"""
self._zipball_url = zipball_url
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(Release, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, Release):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
|
[
"six.iteritems"
] |
[((10549, 10582), 'six.iteritems', 'six.iteritems', (['self.swagger_types'], {}), '(self.swagger_types)\n', (10562, 10582), False, 'import six\n')]
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
# Modified 2017 Microsoft Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Generic training script that trains a model using a given dataset."""
import tensorflow as tf
import pandas as pd
import numpy as np
import os
import functools
from tensorflow.python.ops import control_flow_ops
from deployment import model_deploy
from nets import resnet_v1 # Needed to be modified, see https://github.com/tensorflow/models/issues/533
from tensorflow.contrib.training.python.training import evaluation
slim = tf.contrib.slim
''' Enumerate the flags '''
tf.app.flags.DEFINE_string('train_dir',
'D:\\tf\\models',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_string('dataset_name', 'aerial', 'The name of the dataset to load.')
tf.app.flags.DEFINE_string('dataset_dir',
'D:\\combined\\train_subsample',
'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string('checkpoint_path',
'D:\\tf\\resnet_v1_50.ckpt',
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string('checkpoint_exclude_scopes', 'resnet_v1_50/logits',
'Comma-separated list of scopes of variables to exclude when restoring '
'from a checkpoint.')
tf.app.flags.DEFINE_string('trainable_scopes', 'resnet_v1_50/logits',
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_integer('num_clones', 1, 'Number of model clones to deploy.')
tf.app.flags.DEFINE_boolean('clone_on_cpu', False, 'Use CPUs to deploy clones.')
tf.app.flags.DEFINE_integer('num_readers', 4, 'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_integer('num_preprocessing_threads', 4, 'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer('log_every_n_steps', 10, 'The frequency with which logs are printed.')
tf.app.flags.DEFINE_integer('save_summaries_secs', 600, 'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer('save_interval_secs', 600, 'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_float('weight_decay', 0.00004, 'The weight decay on the model weights.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_float('learning_rate', 0.02, 'Initial learning rate.')
tf.app.flags.DEFINE_float('label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_float('learning_rate_decay_factor', 0.9, 'Learning rate decay factor.')
tf.app.flags.DEFINE_float('num_epochs_per_decay', 2.0, 'Number of epochs after which learning rate decays.')
tf.app.flags.DEFINE_integer('replicas_to_aggregate', 1, 'The number of gradients to collect before updating params.')
tf.app.flags.DEFINE_integer('batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer('max_number_of_steps', 4000, 'The maximum number of training steps.')
FLAGS = tf.app.flags.FLAGS
def get_image_and_class_count(dataset_dir, split_name):
df = pd.read_csv(os.path.join(dataset_dir, 'dataset_split_info.csv'))
image_count = len(df.loc[df['split_name'] == split_name].index)
class_count = len(df['class_name'].unique())
return(image_count, class_count)
def read_label_file(dataset_dir, filename='labels.txt'):
labels_filename = os.path.join(dataset_dir, filename)
with tf.gfile.Open(labels_filename, 'r') as f:
lines = f.read()
lines = lines.split('\n')
lines = filter(None, lines)
labels_to_class_names = {}
for line in lines:
index = line.index(':')
labels_to_class_names[line[:index]] = line[index+1:]
return(labels_to_class_names)
def mean_image_subtraction(image, means):
if image.get_shape().ndims != 3:
raise ValueError('Input must be of size [height, width, C>0]')
num_channels = image.get_shape().as_list()[-1]
if len(means) != num_channels:
raise ValueError('len(means) must match the number of channels')
channels = tf.split(axis=2, num_or_size_splits=num_channels, value=image)
for i in range(num_channels):
channels[i] -= means[i]
return(tf.concat(axis=2, values=channels))
def get_preprocessing():
def preprocessing_fn(image, output_height=224, output_width=224):
''' Resize the image and subtract "mean" RGB values '''
_R_MEAN = 123.68
_G_MEAN = 116.78
_B_MEAN = 103.94
#image = tf.expand_dims(image, 0)
temp_dim = np.random.randint(175, 223)
distorted_image = tf.random_crop(image, [output_height, output_width, 3])
distorted_image = tf.expand_dims(distorted_image, 0)
resized_image = tf.image.resize_bilinear(distorted_image, [output_height, output_width], align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([output_height, output_width, 3])
resized_image = tf.image.random_flip_left_right(resized_image)
image = tf.to_float(resized_image)
return(mean_image_subtraction(image, [_R_MEAN, _G_MEAN, _B_MEAN]))
return(preprocessing_fn)
def get_network_fn(num_classes, weight_decay=0.0):
arg_scope = resnet_v1.resnet_arg_scope(weight_decay=weight_decay)
func = resnet_v1.resnet_v1_50
@functools.wraps(func)
def network_fn(images):
with slim.arg_scope(arg_scope):
return func(images, num_classes)
if hasattr(func, 'default_image_size'):
network_fn.default_image_size = func.default_image_size
return(network_fn)
def _add_variables_summaries(learning_rate):
summaries = []
for variable in slim.get_model_variables():
summaries.append(tf.summary.image(variable.op.name, variable))
summaries.append(tf.summary.scalar(learning_rate, name='training/Learning Rate'))
return(summaries)
def _get_init_fn():
if (FLAGS.checkpoint_path is None) or (tf.train.latest_checkpoint(FLAGS.train_dir)):
return None
exclusions = []
if FLAGS.checkpoint_exclude_scopes:
exclusions = [scope.strip() for scope in FLAGS.checkpoint_exclude_scopes.split(',')]
variables_to_restore = []
for var in slim.get_model_variables():
excluded = False
for exclusion in exclusions:
if var.op.name.startswith(exclusion):
excluded = True
break
if not excluded:
variables_to_restore.append(var)
if tf.gfile.IsDirectory(FLAGS.checkpoint_path):
checkpoint_path = tf.train.latest_checkpoint(FLAGS.checkpoint_path)
else:
checkpoint_path = FLAGS.checkpoint_path
tf.logging.info('Fine-tuning from {}'.format(checkpoint_path))
return(slim.assign_from_checkpoint_fn(checkpoint_path,
variables_to_restore,
ignore_missing_vars=False))
def _get_variables_to_train():
scopes = [scope.strip() for scope in FLAGS.trainable_scopes.split(',')]
variables_to_train = []
for scope in scopes:
variables = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
variables_to_train.extend(variables)
return(variables_to_train)
def get_dataset(dataset_name, dataset_dir, image_count, class_count, split_name):
slim = tf.contrib.slim
items_to_descriptions = {'image': 'A color image.',
'label': 'An integer in range(0, class_count)'}
file_pattern = os.path.join(dataset_dir, '{}_{}_*.tfrecord'.format(dataset_name, split_name))
reader = tf.TFRecordReader
keys_to_features = {'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='png'),
'image/class/label': tf.FixedLenFeature([], tf.int64,
default_value=tf.zeros([], dtype=tf.int64))}
items_to_handlers = {'image': slim.tfexample_decoder.Image(),
'label': slim.tfexample_decoder.Tensor('image/class/label')}
decoder = slim.tfexample_decoder.TFExampleDecoder(keys_to_features, items_to_handlers)
labels_to_names = read_label_file(dataset_dir)
return(slim.dataset.Dataset(data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=image_count,
items_to_descriptions=items_to_descriptions,
num_classes=class_count,
labels_to_names=labels_to_names,
shuffle=True))
def main(_):
tf.logging.set_verbosity(tf.logging.INFO)
with tf.Graph().as_default():
deploy_config = model_deploy.DeploymentConfig(num_clones=FLAGS.num_clones,
clone_on_cpu=FLAGS.clone_on_cpu,
replica_id=0,
num_replicas=1,
num_ps_tasks=0)
with tf.device(deploy_config.variables_device()):
global_step = slim.create_global_step()
image_count, class_count = get_image_and_class_count(FLAGS.dataset_dir, 'train')
dataset = get_dataset('aerial', FLAGS.dataset_dir, image_count, class_count, 'train')
network_fn = get_network_fn(num_classes=(dataset.num_classes), weight_decay=FLAGS.weight_decay)
image_preprocessing_fn = get_preprocessing()
with tf.device(deploy_config.inputs_device()):
provider = slim.dataset_data_provider.DatasetDataProvider(dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20 * FLAGS.batch_size,
common_queue_min=10 * FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image = image_preprocessing_fn(image, 224, 224)
images, labels = tf.train.batch([image, label],
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(labels, dataset.num_classes)
batch_queue = slim.prefetch_queue.prefetch_queue([images, labels], capacity=2 * deploy_config.num_clones)
def clone_fn(batch_queue):
images, labels = batch_queue.dequeue()
logits, end_points = network_fn(images)
logits = tf.squeeze(logits) # added -- does this help?
slim.losses.softmax_cross_entropy(logits, labels, label_smoothing=FLAGS.label_smoothing, weights=1.0)
return(end_points)
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
clones = model_deploy.create_clones(deploy_config, clone_fn, [batch_queue])
first_clone_scope = deploy_config.clone_scope(0)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, first_clone_scope)
end_points = clones[0].outputs
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram('activations/' + end_point, x))
summaries.add(tf.summary.scalar('sparsity/' + end_point, tf.nn.zero_fraction(x)))
for loss in tf.get_collection(tf.GraphKeys.LOSSES, first_clone_scope):
summaries.add(tf.summary.scalar('losses/%s' % loss.op.name, loss))
for variable in slim.get_model_variables():
summaries.add(tf.summary.histogram(variable.op.name, variable))
with tf.device(deploy_config.optimizer_device()):
decay_steps = int(dataset.num_samples / FLAGS.batch_size * FLAGS.num_epochs_per_decay)
learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,
global_step,
decay_steps,
FLAGS.learning_rate_decay_factor,
staircase=True,
name='exponential_decay_learning_rate')
optimizer = tf.train.RMSPropOptimizer(learning_rate,
decay=FLAGS.rmsprop_decay,
momentum=FLAGS.rmsprop_momentum,
epsilon=FLAGS.opt_epsilon)
summaries.add(tf.summary.scalar('learning_rate', learning_rate))
variables_to_train = _get_variables_to_train()
total_loss, clones_gradients = model_deploy.optimize_clones(clones, optimizer, var_list=variables_to_train)
summaries.add(tf.summary.scalar('total_loss', total_loss))
grad_updates = optimizer.apply_gradients(clones_gradients, global_step=global_step)
update_ops.append(grad_updates)
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op], total_loss, name='train_op')
summaries |= set(tf.get_collection(tf.GraphKeys.SUMMARIES, first_clone_scope))
summary_op = tf.summary.merge(list(summaries), name='summary_op')
slim.learning.train(train_tensor,
logdir=FLAGS.train_dir,
master='',
is_chief=True,
init_fn=_get_init_fn(),
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
log_every_n_steps=FLAGS.log_every_n_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
sync_optimizer=None)
if __name__ == '__main__':
tf.app.run()
|
[
"tensorflow.app.flags.DEFINE_float",
"tensorflow.nn.zero_fraction",
"tensorflow.get_collection",
"tensorflow.train.RMSPropOptimizer",
"tensorflow.logging.set_verbosity",
"tensorflow.app.flags.DEFINE_boolean",
"numpy.random.randint",
"tensorflow.train.latest_checkpoint",
"tensorflow.python.ops.control_flow_ops.with_dependencies",
"tensorflow.split",
"tensorflow.app.flags.DEFINE_integer",
"os.path.join",
"tensorflow.train.batch",
"tensorflow.concat",
"nets.resnet_v1.resnet_arg_scope",
"tensorflow.summary.histogram",
"tensorflow.to_float",
"tensorflow.squeeze",
"tensorflow.random_crop",
"tensorflow.app.run",
"deployment.model_deploy.DeploymentConfig",
"tensorflow.summary.image",
"tensorflow.summary.scalar",
"tensorflow.gfile.IsDirectory",
"tensorflow.image.random_flip_left_right",
"tensorflow.group",
"functools.wraps",
"tensorflow.Graph",
"tensorflow.train.exponential_decay",
"tensorflow.expand_dims",
"deployment.model_deploy.create_clones",
"deployment.model_deploy.optimize_clones",
"tensorflow.zeros",
"tensorflow.app.flags.DEFINE_string",
"tensorflow.FixedLenFeature",
"tensorflow.gfile.Open",
"tensorflow.image.resize_bilinear"
] |
[((1201, 1324), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""train_dir"""', '"""D:\\\\tf\\\\models"""', '"""Directory where checkpoints and event logs are written to."""'], {}), "('train_dir', 'D:\\\\tf\\\\models',\n 'Directory where checkpoints and event logs are written to.')\n", (1227, 1324), True, 'import tensorflow as tf\n'), ((1375, 1467), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_name"""', '"""aerial"""', '"""The name of the dataset to load."""'], {}), "('dataset_name', 'aerial',\n 'The name of the dataset to load.')\n", (1401, 1467), True, 'import tensorflow as tf\n'), ((1464, 1595), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""dataset_dir"""', '"""D:\\\\combined\\\\train_subsample"""', '"""The directory where the dataset files are stored."""'], {}), "('dataset_dir', 'D:\\\\combined\\\\train_subsample',\n 'The directory where the dataset files are stored.')\n", (1490, 1595), True, 'import tensorflow as tf\n'), ((1646, 1777), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_path"""', '"""D:\\\\tf\\\\resnet_v1_50.ckpt"""', '"""The path to a checkpoint from which to fine-tune."""'], {}), "('checkpoint_path', 'D:\\\\tf\\\\resnet_v1_50.ckpt',\n 'The path to a checkpoint from which to fine-tune.')\n", (1672, 1777), True, 'import tensorflow as tf\n'), ((1829, 2012), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""checkpoint_exclude_scopes"""', '"""resnet_v1_50/logits"""', '"""Comma-separated list of scopes of variables to exclude when restoring from a checkpoint."""'], {}), "('checkpoint_exclude_scopes',\n 'resnet_v1_50/logits',\n 'Comma-separated list of scopes of variables to exclude when restoring from a checkpoint.'\n )\n", (1855, 2012), True, 'import tensorflow as tf\n'), ((2057, 2257), 'tensorflow.app.flags.DEFINE_string', 'tf.app.flags.DEFINE_string', (['"""trainable_scopes"""', '"""resnet_v1_50/logits"""', '"""Comma-separated list of scopes to filter the set of variables to train.By default, None would train all the variables."""'], {}), "('trainable_scopes', 'resnet_v1_50/logits',\n 'Comma-separated list of scopes to filter the set of variables to train.By default, None would train all the variables.'\n )\n", (2083, 2257), True, 'import tensorflow as tf\n'), ((2307, 2392), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_clones"""', '(1)', '"""Number of model clones to deploy."""'], {}), "('num_clones', 1,\n 'Number of model clones to deploy.')\n", (2334, 2392), True, 'import tensorflow as tf\n'), ((2389, 2474), 'tensorflow.app.flags.DEFINE_boolean', 'tf.app.flags.DEFINE_boolean', (['"""clone_on_cpu"""', '(False)', '"""Use CPUs to deploy clones."""'], {}), "('clone_on_cpu', False, 'Use CPUs to deploy clones.'\n )\n", (2416, 2474), True, 'import tensorflow as tf\n'), ((2470, 2586), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_readers"""', '(4)', '"""The number of parallel readers that read data from the dataset."""'], {}), "('num_readers', 4,\n 'The number of parallel readers that read data from the dataset.')\n", (2497, 2586), True, 'import tensorflow as tf\n'), ((2583, 2699), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""num_preprocessing_threads"""', '(4)', '"""The number of threads used to create the batches."""'], {}), "('num_preprocessing_threads', 4,\n 'The number of threads used to create the batches.')\n", (2610, 2699), True, 'import tensorflow as tf\n'), ((2696, 2798), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""log_every_n_steps"""', '(10)', '"""The frequency with which logs are printed."""'], {}), "('log_every_n_steps', 10,\n 'The frequency with which logs are printed.')\n", (2723, 2798), True, 'import tensorflow as tf\n'), ((2795, 2915), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_summaries_secs"""', '(600)', '"""The frequency with which summaries are saved, in seconds."""'], {}), "('save_summaries_secs', 600,\n 'The frequency with which summaries are saved, in seconds.')\n", (2822, 2915), True, 'import tensorflow as tf\n'), ((2912, 3030), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""save_interval_secs"""', '(600)', '"""The frequency with which the model is saved, in seconds."""'], {}), "('save_interval_secs', 600,\n 'The frequency with which the model is saved, in seconds.')\n", (2939, 3030), True, 'import tensorflow as tf\n'), ((3028, 3122), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""weight_decay"""', '(4e-05)', '"""The weight decay on the model weights."""'], {}), "('weight_decay', 4e-05,\n 'The weight decay on the model weights.')\n", (3053, 3122), True, 'import tensorflow as tf\n'), ((3121, 3206), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""opt_epsilon"""', '(1.0)', '"""Epsilon term for the optimizer."""'], {}), "('opt_epsilon', 1.0, 'Epsilon term for the optimizer.'\n )\n", (3146, 3206), True, 'import tensorflow as tf\n'), ((3202, 3265), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""rmsprop_momentum"""', '(0.9)', '"""Momentum."""'], {}), "('rmsprop_momentum', 0.9, 'Momentum.')\n", (3227, 3265), True, 'import tensorflow as tf\n'), ((3266, 3340), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""rmsprop_decay"""', '(0.9)', '"""Decay term for RMSProp."""'], {}), "('rmsprop_decay', 0.9, 'Decay term for RMSProp.')\n", (3291, 3340), True, 'import tensorflow as tf\n'), ((3341, 3415), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate"""', '(0.02)', '"""Initial learning rate."""'], {}), "('learning_rate', 0.02, 'Initial learning rate.')\n", (3366, 3415), True, 'import tensorflow as tf\n'), ((3416, 3503), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""label_smoothing"""', '(0.0)', '"""The amount of label smoothing."""'], {}), "('label_smoothing', 0.0,\n 'The amount of label smoothing.')\n", (3441, 3503), True, 'import tensorflow as tf\n'), ((3500, 3595), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""learning_rate_decay_factor"""', '(0.9)', '"""Learning rate decay factor."""'], {}), "('learning_rate_decay_factor', 0.9,\n 'Learning rate decay factor.')\n", (3525, 3595), True, 'import tensorflow as tf\n'), ((3592, 3704), 'tensorflow.app.flags.DEFINE_float', 'tf.app.flags.DEFINE_float', (['"""num_epochs_per_decay"""', '(2.0)', '"""Number of epochs after which learning rate decays."""'], {}), "('num_epochs_per_decay', 2.0,\n 'Number of epochs after which learning rate decays.')\n", (3617, 3704), True, 'import tensorflow as tf\n'), ((3701, 3822), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""replicas_to_aggregate"""', '(1)', '"""The number of gradients to collect before updating params."""'], {}), "('replicas_to_aggregate', 1,\n 'The number of gradients to collect before updating params.')\n", (3728, 3822), True, 'import tensorflow as tf\n'), ((3819, 3908), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""batch_size"""', '(32)', '"""The number of samples in each batch."""'], {}), "('batch_size', 32,\n 'The number of samples in each batch.')\n", (3846, 3908), True, 'import tensorflow as tf\n'), ((3905, 4006), 'tensorflow.app.flags.DEFINE_integer', 'tf.app.flags.DEFINE_integer', (['"""max_number_of_steps"""', '(4000)', '"""The maximum number of training steps."""'], {}), "('max_number_of_steps', 4000,\n 'The maximum number of training steps.')\n", (3932, 4006), True, 'import tensorflow as tf\n'), ((4396, 4431), 'os.path.join', 'os.path.join', (['dataset_dir', 'filename'], {}), '(dataset_dir, filename)\n', (4408, 4431), False, 'import os\n'), ((5078, 5140), 'tensorflow.split', 'tf.split', ([], {'axis': '(2)', 'num_or_size_splits': 'num_channels', 'value': 'image'}), '(axis=2, num_or_size_splits=num_channels, value=image)\n', (5086, 5140), True, 'import tensorflow as tf\n'), ((5218, 5252), 'tensorflow.concat', 'tf.concat', ([], {'axis': '(2)', 'values': 'channels'}), '(axis=2, values=channels)\n', (5227, 5252), True, 'import tensorflow as tf\n'), ((6243, 6296), 'nets.resnet_v1.resnet_arg_scope', 'resnet_v1.resnet_arg_scope', ([], {'weight_decay': 'weight_decay'}), '(weight_decay=weight_decay)\n', (6269, 6296), False, 'from nets import resnet_v1\n'), ((6336, 6357), 'functools.wraps', 'functools.wraps', (['func'], {}), '(func)\n', (6351, 6357), False, 'import functools\n'), ((7506, 7549), 'tensorflow.gfile.IsDirectory', 'tf.gfile.IsDirectory', (['FLAGS.checkpoint_path'], {}), '(FLAGS.checkpoint_path)\n', (7526, 7549), True, 'import tensorflow as tf\n'), ((9782, 9823), 'tensorflow.logging.set_verbosity', 'tf.logging.set_verbosity', (['tf.logging.INFO'], {}), '(tf.logging.INFO)\n', (9806, 9823), True, 'import tensorflow as tf\n'), ((15311, 15323), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (15321, 15323), True, 'import tensorflow as tf\n'), ((4109, 4160), 'os.path.join', 'os.path.join', (['dataset_dir', '"""dataset_split_info.csv"""'], {}), "(dataset_dir, 'dataset_split_info.csv')\n", (4121, 4160), False, 'import os\n'), ((4441, 4476), 'tensorflow.gfile.Open', 'tf.gfile.Open', (['labels_filename', '"""r"""'], {}), "(labels_filename, 'r')\n", (4454, 4476), True, 'import tensorflow as tf\n'), ((5551, 5578), 'numpy.random.randint', 'np.random.randint', (['(175)', '(223)'], {}), '(175, 223)\n', (5568, 5578), True, 'import numpy as np\n'), ((5605, 5660), 'tensorflow.random_crop', 'tf.random_crop', (['image', '[output_height, output_width, 3]'], {}), '(image, [output_height, output_width, 3])\n', (5619, 5660), True, 'import tensorflow as tf\n'), ((5687, 5721), 'tensorflow.expand_dims', 'tf.expand_dims', (['distorted_image', '(0)'], {}), '(distorted_image, 0)\n', (5701, 5721), True, 'import tensorflow as tf\n'), ((5746, 5843), 'tensorflow.image.resize_bilinear', 'tf.image.resize_bilinear', (['distorted_image', '[output_height, output_width]'], {'align_corners': '(False)'}), '(distorted_image, [output_height, output_width],\n align_corners=False)\n', (5770, 5843), True, 'import tensorflow as tf\n'), ((5864, 5889), 'tensorflow.squeeze', 'tf.squeeze', (['resized_image'], {}), '(resized_image)\n', (5874, 5889), True, 'import tensorflow as tf\n'), ((5980, 6026), 'tensorflow.image.random_flip_left_right', 'tf.image.random_flip_left_right', (['resized_image'], {}), '(resized_image)\n', (6011, 6026), True, 'import tensorflow as tf\n'), ((6044, 6070), 'tensorflow.to_float', 'tf.to_float', (['resized_image'], {}), '(resized_image)\n', (6055, 6070), True, 'import tensorflow as tf\n'), ((6807, 6870), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['learning_rate'], {'name': '"""training/Learning Rate"""'}), "(learning_rate, name='training/Learning Rate')\n", (6824, 6870), True, 'import tensorflow as tf\n'), ((6958, 7001), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.train_dir'], {}), '(FLAGS.train_dir)\n', (6984, 7001), True, 'import tensorflow as tf\n'), ((7577, 7626), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoint_path'], {}), '(FLAGS.checkpoint_path)\n', (7603, 7626), True, 'import tensorflow as tf\n'), ((8128, 8186), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.TRAINABLE_VARIABLES', 'scope'], {}), '(tf.GraphKeys.TRAINABLE_VARIABLES, scope)\n', (8145, 8186), True, 'import tensorflow as tf\n'), ((8676, 8727), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '""""""'}), "((), tf.string, default_value='')\n", (8694, 8727), True, 'import tensorflow as tf\n'), ((8769, 8823), 'tensorflow.FixedLenFeature', 'tf.FixedLenFeature', (['()', 'tf.string'], {'default_value': '"""png"""'}), "((), tf.string, default_value='png')\n", (8787, 8823), True, 'import tensorflow as tf\n'), ((9882, 10024), 'deployment.model_deploy.DeploymentConfig', 'model_deploy.DeploymentConfig', ([], {'num_clones': 'FLAGS.num_clones', 'clone_on_cpu': 'FLAGS.clone_on_cpu', 'replica_id': '(0)', 'num_replicas': '(1)', 'num_ps_tasks': '(0)'}), '(num_clones=FLAGS.num_clones, clone_on_cpu=\n FLAGS.clone_on_cpu, replica_id=0, num_replicas=1, num_ps_tasks=0)\n', (9911, 10024), False, 'from deployment import model_deploy\n'), ((12195, 12261), 'deployment.model_deploy.create_clones', 'model_deploy.create_clones', (['deploy_config', 'clone_fn', '[batch_queue]'], {}), '(deploy_config, clone_fn, [batch_queue])\n', (12221, 12261), False, 'from deployment import model_deploy\n'), ((12340, 12401), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.UPDATE_OPS', 'first_clone_scope'], {}), '(tf.GraphKeys.UPDATE_OPS, first_clone_scope)\n', (12357, 12401), True, 'import tensorflow as tf\n'), ((12710, 12767), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.LOSSES', 'first_clone_scope'], {}), '(tf.GraphKeys.LOSSES, first_clone_scope)\n', (12727, 12767), True, 'import tensorflow as tf\n'), ((14077, 14153), 'deployment.model_deploy.optimize_clones', 'model_deploy.optimize_clones', (['clones', 'optimizer'], {'var_list': 'variables_to_train'}), '(clones, optimizer, var_list=variables_to_train)\n', (14105, 14153), False, 'from deployment import model_deploy\n'), ((14375, 14396), 'tensorflow.group', 'tf.group', (['*update_ops'], {}), '(*update_ops)\n', (14383, 14396), True, 'import tensorflow as tf\n'), ((14420, 14496), 'tensorflow.python.ops.control_flow_ops.with_dependencies', 'control_flow_ops.with_dependencies', (['[update_op]', 'total_loss'], {'name': '"""train_op"""'}), "([update_op], total_loss, name='train_op')\n", (14454, 14496), False, 'from tensorflow.python.ops import control_flow_ops\n'), ((6740, 6784), 'tensorflow.summary.image', 'tf.summary.image', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (6756, 6784), True, 'import tensorflow as tf\n'), ((11300, 11440), 'tensorflow.train.batch', 'tf.train.batch', (['[image, label]'], {'batch_size': 'FLAGS.batch_size', 'num_threads': 'FLAGS.num_preprocessing_threads', 'capacity': '(5 * FLAGS.batch_size)'}), '([image, label], batch_size=FLAGS.batch_size, num_threads=\n FLAGS.num_preprocessing_threads, capacity=5 * FLAGS.batch_size)\n', (11314, 11440), True, 'import tensorflow as tf\n'), ((11918, 11936), 'tensorflow.squeeze', 'tf.squeeze', (['logits'], {}), '(logits)\n', (11928, 11936), True, 'import tensorflow as tf\n'), ((12134, 12175), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES'], {}), '(tf.GraphKeys.SUMMARIES)\n', (12151, 12175), True, 'import tensorflow as tf\n'), ((13162, 13334), 'tensorflow.train.exponential_decay', 'tf.train.exponential_decay', (['FLAGS.learning_rate', 'global_step', 'decay_steps', 'FLAGS.learning_rate_decay_factor'], {'staircase': '(True)', 'name': '"""exponential_decay_learning_rate"""'}), "(FLAGS.learning_rate, global_step, decay_steps,\n FLAGS.learning_rate_decay_factor, staircase=True, name=\n 'exponential_decay_learning_rate')\n", (13188, 13334), True, 'import tensorflow as tf\n'), ((13625, 13756), 'tensorflow.train.RMSPropOptimizer', 'tf.train.RMSPropOptimizer', (['learning_rate'], {'decay': 'FLAGS.rmsprop_decay', 'momentum': 'FLAGS.rmsprop_momentum', 'epsilon': 'FLAGS.opt_epsilon'}), '(learning_rate, decay=FLAGS.rmsprop_decay,\n momentum=FLAGS.rmsprop_momentum, epsilon=FLAGS.opt_epsilon)\n', (13650, 13756), True, 'import tensorflow as tf\n'), ((14176, 14219), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""total_loss"""', 'total_loss'], {}), "('total_loss', total_loss)\n", (14193, 14219), True, 'import tensorflow as tf\n'), ((14523, 14583), 'tensorflow.get_collection', 'tf.get_collection', (['tf.GraphKeys.SUMMARIES', 'first_clone_scope'], {}), '(tf.GraphKeys.SUMMARIES, first_clone_scope)\n', (14540, 14583), True, 'import tensorflow as tf\n'), ((8981, 9009), 'tensorflow.zeros', 'tf.zeros', (['[]'], {'dtype': 'tf.int64'}), '([], dtype=tf.int64)\n', (8989, 9009), True, 'import tensorflow as tf\n'), ((9833, 9843), 'tensorflow.Graph', 'tf.Graph', ([], {}), '()\n', (9841, 9843), True, 'import tensorflow as tf\n'), ((12543, 12594), 'tensorflow.summary.histogram', 'tf.summary.histogram', (["('activations/' + end_point)", 'x'], {}), "('activations/' + end_point, x)\n", (12563, 12594), True, 'import tensorflow as tf\n'), ((12795, 12846), 'tensorflow.summary.scalar', 'tf.summary.scalar', (["('losses/%s' % loss.op.name)", 'loss'], {}), "('losses/%s' % loss.op.name, loss)\n", (12812, 12846), True, 'import tensorflow as tf\n'), ((12926, 12974), 'tensorflow.summary.histogram', 'tf.summary.histogram', (['variable.op.name', 'variable'], {}), '(variable.op.name, variable)\n', (12946, 12974), True, 'import tensorflow as tf\n'), ((13929, 13978), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning_rate"""', 'learning_rate'], {}), "('learning_rate', learning_rate)\n", (13946, 13978), True, 'import tensorflow as tf\n'), ((12665, 12687), 'tensorflow.nn.zero_fraction', 'tf.nn.zero_fraction', (['x'], {}), '(x)\n', (12684, 12687), True, 'import tensorflow as tf\n')]
|
import datetime
from tornado.web import RequestHandler
from websdk.db_context import DBContext
from libs.aws.session import get_aws_session
from libs.base_handler import BaseHandler
from libs.web_logs import ins_log
from models.com_ami import ComAmi
from models.uncom_ec2 import UnComEc2
from settings import settings
class AmiHandler(BaseHandler):
def post(self, *args, **kwargs):
"""添加合规的ami信息"""
key = self.get_argument('key', default=None, strip=True)
with DBContext('w') as session:
if not session.query(ComAmi).filter(ComAmi.ami_id == key).first():
s = get_aws_session(**settings.get("aws_key"))
clients = s.client("ec2")
try:
resp_dict = clients.describe_images(ImageIds=[key])
ami_info = resp_dict.get("Images")[0]
except Exception as e:
ami_info = None
ins_log.read_log("info", e)
if ami_info:
try:
ami_name = ami_info["Name"]
except Exception as e:
ami_name = "UnKnown"
ins_log.read_log("info", e)
try:
tag = ami_info["Tags"]
except Exception as e:
tag = None
ins_log.read_log("info", e)
name = "UnKnown"
if tag:
for i in tag:
if i["Key"] == "Name":
name = i["Value"]
times = ami_info["CreationDate"]
utc_date = datetime.datetime.strptime(times, "%Y-%m-%dT%H:%M:%S.000Z")
local_date = utc_date + datetime.timedelta(hours=8)
created_times = datetime.datetime.strftime(local_date, '%Y-%m-%d %H:%M:%S')
describe = ami_info["Description"]
if not describe:
describe = "UnKnown"
new_ami = ComAmi(ami_id=key, ami_name=ami_name, name=name,
creation_date=created_times, describe=describe,
create_time=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
session.add(new_ami)
else:
new_ami = ComAmi(ami_id=key, ami_name="UnKnown", name="UnKnown", describe="UnKnown",
creation_date=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")),
create_time=str(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")))
session.add(new_ami)
# 从数据库中删除ami相对应的ec2数据
session.query(UnComEc2).filter(UnComEc2.ami_id == key).delete()
session.commit()
ami_host_urls = [
(r"/v1/cmdb/add_ami/", AmiHandler),
]
if __name__ == '__main__':
pass
|
[
"datetime.datetime.strftime",
"websdk.db_context.DBContext",
"datetime.datetime.strptime",
"libs.web_logs.ins_log.read_log",
"settings.settings.get",
"datetime.timedelta",
"datetime.datetime.now"
] |
[((494, 508), 'websdk.db_context.DBContext', 'DBContext', (['"""w"""'], {}), "('w')\n", (503, 508), False, 'from websdk.db_context import DBContext\n'), ((1716, 1775), 'datetime.datetime.strptime', 'datetime.datetime.strptime', (['times', '"""%Y-%m-%dT%H:%M:%S.000Z"""'], {}), "(times, '%Y-%m-%dT%H:%M:%S.000Z')\n", (1742, 1775), False, 'import datetime\n'), ((1884, 1943), 'datetime.datetime.strftime', 'datetime.datetime.strftime', (['local_date', '"""%Y-%m-%d %H:%M:%S"""'], {}), "(local_date, '%Y-%m-%d %H:%M:%S')\n", (1910, 1943), False, 'import datetime\n'), ((638, 661), 'settings.settings.get', 'settings.get', (['"""aws_key"""'], {}), "('aws_key')\n", (650, 661), False, 'from settings import settings\n'), ((951, 978), 'libs.web_logs.ins_log.read_log', 'ins_log.read_log', (['"""info"""', 'e'], {}), "('info', e)\n", (967, 978), False, 'from libs.web_logs import ins_log\n'), ((1820, 1847), 'datetime.timedelta', 'datetime.timedelta', ([], {'hours': '(8)'}), '(hours=8)\n', (1838, 1847), False, 'import datetime\n'), ((1197, 1224), 'libs.web_logs.ins_log.read_log', 'ins_log.read_log', (['"""info"""', 'e'], {}), "('info', e)\n", (1213, 1224), False, 'from libs.web_logs import ins_log\n'), ((1399, 1426), 'libs.web_logs.ins_log.read_log', 'ins_log.read_log', (['"""info"""', 'e'], {}), "('info', e)\n", (1415, 1426), False, 'from libs.web_logs import ins_log\n'), ((2298, 2321), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2319, 2321), False, 'import datetime\n'), ((2578, 2601), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2599, 2601), False, 'import datetime\n'), ((2687, 2710), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2708, 2710), False, 'import datetime\n')]
|
import jieba
import pandas as pd
import numpy as np
from sklearn import feature_extraction
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.feature_extraction.text import CountVectorizer
import time
import os
import re
def readExcel(url):
df=pd.read_excel(url,na_values='')
return df
def writeExcel(df,url=''):
write = pd.ExcelWriter(url)
df.to_excel(write, sheet_name='Sheet1',merge_cells=False)
write.save()
def genDF(df_Base,df_IPC):
columns=df_Base['合并后'].unique()
index=df_IPC['NAME'].unique()
return pd.DataFrame(None,index=index,columns=columns)
pass
def checkVect(df_Input,vectkey,vectipc):
coripc=__ipc(df_Input['IPC'])
corkey=__jiebabatch(df_Input['ABSTRACT'])
matkey=vectkey.transform(corkey)
matipc=vectipc.transform(coripc)
matkey = np.where(matkey.toarray() > 0, 1, 0)
matipc = np.where(matipc.toarray() > 0, 1, 0)
return matkey,matipc
def addNewPT(key,newkey,ipc,newipc,Cpy,Cat,df_result=[],s_Cat=[],corkeyNo=[]):
'''输出两个:1. 公司各个行业属性的专利记录,2. 专利对应的属性'''
if df_result==[]:
df_result=pd.DataFrame(None,columns=['Company','Cat','Val'])
for i in range(newkey.shape[0]):
print('Processing--------------',i)
onekey=newkey[i]
oneipc=newipc[i]
companyName=Cpy[i]
mat=np.dot(key, onekey) * np.dot(ipc, oneipc)
matcat=Cat[(mat-corkeyNo)>=0].unique()
cat=[]#用于记录ip的属性
for j in matcat:
df_result=df_result.append({'Company':companyName,'Cat':j,'Val':1},ignore_index=True)
cat.append(j)
s_Cat.append(' '.join(cat))
return df_result,s_Cat
pass
def __jiebabatch(summary):
'''每行增加none,使得没有的也能匹配上'''
cor=[]
for j,i in enumerate(summary):
if j%1000==0:
print('ABSTRACT@----------',j)
subcor = ['None']
if type(i)!=type(1.1):
b=jieba.cut(i)
for j in b:
subcor.append(j)
joint=' '
subcor=joint.join(subcor)
else:
subcor='None'
cor.append(subcor)
return cor
pass
def __ipc(ipc):
'''每行增加none,使得没有的也能匹配上'''
cor=[]
lv0=re.compile(u'\D+[0-9]{1,3}')
lv1=re.compile(u'\D+[0-9]{1,3}\D+')
lv2=re.compile(u'\D+[0-9]{1,3}\D+[0-9]{1,3}')
for j,i in enumerate(ipc):
if j%1000==0:#todo
print('IPC@---------',j)
subcor=['none']
if type(i)!=type(1.1):#写入ipc三级域名 modified--0828
subcor.append(lv0.findall(i)[0])
subcor.append(lv1.findall(i)[0])
if lv2.findall(i):
subcor.append(lv2.findall(i)[0])
a = i.split(sep='_')
if len(a)>1:
subcor.append(i)
joint = ' '
subcor = joint.join(subcor)
cor.append(subcor)
return cor
pass
def addDict(df):
'''根据分类标准在结巴分词中加入新关键词'''
word=set(df['keywords'].unique())
word1=set(df['keywords1'].unique())
word2 = set(df['keyward2'].unique())
word3 = set(df['notkeywords'].unique())
word=word | word1 | word2 | word3
for i in word:
jieba.add_word(str(i),freq=100)
pass
def getBaseCor(df):
'''用于tdidf'''
corkey=[]
coripc = []
corkeyNo=[]
for i in df.index:
subcor=[]
no=0
if type(df.loc[i,'keywords'])!=type(1.1):
subcor.append(df.loc[i,'keywords'])
no=no+1
if type(df.loc[i, 'keywords1']) != type(1.1):
subcor.append(df.loc[i, 'keywords1'])
no=no+1
else:
subcor=['None']
no=1
joint=' '
subcor=joint.join(subcor)
corkey.append(subcor)
corkeyNo.append(no)
subcor=[]
if type(df.loc[i, 'IPC']) != type(1.1):
subcor.append(df.loc[i, 'IPC'])
# a=df.loc[i,'IPC'].split(sep='/')
# if len(a)>1:
# subcor.append(a[0])
else:
subcor=['None']
joint=' '
subcor=joint.join(subcor)
coripc.append(subcor)
return corkey,coripc,np.array(corkeyNo)
def TFtraintfidf(corkey,coripc):
'''计算分词矩阵'''
vectorizerkey = CountVectorizer(analyzer='word',token_pattern='(?<KEY>')
#CountVectorizer(analyzer='word',token_pattern='(?<KEY>')
key=vectorizerkey.fit_transform(corkey)
vectorizeripc = CountVectorizer()
ipc=vectorizeripc.fit_transform(coripc)
return np.where(key.toarray()>0,1,0),vectorizerkey,np.where(ipc.toarray()>0,1,0),vectorizeripc
def readFilesName(dir):
name=[]
for parent,dirnames,filenames in os.walk(dir):
for filename in filenames:
url=parent+filename
name.append(url)
return name
if __name__=="__main__":
#1.inial
docdir = os.path.abspath('..') + '\\doc\\'
name=readFilesName(docdir+'raw\\')
IndustryFileName='industry_sep.xlsx'
print('Initialize Finished')#todo
#df_IPC=readExcel(docdir+IPCFileName)
#2.read file
for j,i in enumerate(name):
df_Input=readExcel(i)
df_Input['IPC']=df_Input['IPC'].str.replace('/','_')
df_Base=readExcel(docdir+IndustryFileName)
df_Base['IPC']=df_Base['IPC'].str.replace('/','_')
addDict(df_Base)
print('Data_Read_Finished')#todo
#3.run
start=time.clock()
corkey, coripc,corkeyNo=getBaseCor(df_Base)
print('Classify Finished')#todo
key, vectkey, ipc, vectipc=TFtraintfidf(corkey,coripc)
newkey,newipc=checkVect(df_Input,vectkey,vectipc)
print('Analysising new input finished')#todo
Cpy=df_Input['APPLICATION']
Cat=df_Base['合并后']
w,cat=addNewPT(key, newkey, ipc, newipc, Cpy, Cat, df_result=[],s_Cat=[],corkeyNo=corkeyNo)
end=time.clock()
dur=end-start
df_f=w.groupby(['Company','Cat'])['Cat'].count()
#4.export excel
#for check
df_Input['Cat']=cat
writeExcel(df_Input, docdir + 're\\check0828' + str(j) + '.xlsx')
writeExcel(df_f,docdir+'re\\result0828'+str(j)+'.xlsx')
print(dur)
|
[
"pandas.DataFrame",
"sklearn.feature_extraction.text.CountVectorizer",
"os.path.abspath",
"jieba.cut",
"os.walk",
"time.clock",
"pandas.read_excel",
"numpy.array",
"numpy.dot",
"pandas.ExcelWriter",
"re.compile"
] |
[((272, 304), 'pandas.read_excel', 'pd.read_excel', (['url'], {'na_values': '""""""'}), "(url, na_values='')\n", (285, 304), True, 'import pandas as pd\n'), ((358, 377), 'pandas.ExcelWriter', 'pd.ExcelWriter', (['url'], {}), '(url)\n', (372, 377), True, 'import pandas as pd\n'), ((566, 614), 'pandas.DataFrame', 'pd.DataFrame', (['None'], {'index': 'index', 'columns': 'columns'}), '(None, index=index, columns=columns)\n', (578, 614), True, 'import pandas as pd\n'), ((2190, 2219), 're.compile', 're.compile', (['u"""\\\\D+[0-9]{1,3}"""'], {}), "(u'\\\\D+[0-9]{1,3}')\n", (2200, 2219), False, 'import re\n'), ((2227, 2260), 're.compile', 're.compile', (['u"""\\\\D+[0-9]{1,3}\\\\D+"""'], {}), "(u'\\\\D+[0-9]{1,3}\\\\D+')\n", (2237, 2260), False, 'import re\n'), ((2267, 2310), 're.compile', 're.compile', (['u"""\\\\D+[0-9]{1,3}\\\\D+[0-9]{1,3}"""'], {}), "(u'\\\\D+[0-9]{1,3}\\\\D+[0-9]{1,3}')\n", (2277, 2310), False, 'import re\n'), ((4194, 4251), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {'analyzer': '"""word"""', 'token_pattern': '"""(?<KEY>"""'}), "(analyzer='word', token_pattern='(?<KEY>')\n", (4209, 4251), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4377, 4394), 'sklearn.feature_extraction.text.CountVectorizer', 'CountVectorizer', ([], {}), '()\n', (4392, 4394), False, 'from sklearn.feature_extraction.text import CountVectorizer\n'), ((4612, 4624), 'os.walk', 'os.walk', (['dir'], {}), '(dir)\n', (4619, 4624), False, 'import os\n'), ((1106, 1159), 'pandas.DataFrame', 'pd.DataFrame', (['None'], {'columns': "['Company', 'Cat', 'Val']"}), "(None, columns=['Company', 'Cat', 'Val'])\n", (1118, 1159), True, 'import pandas as pd\n'), ((4104, 4122), 'numpy.array', 'np.array', (['corkeyNo'], {}), '(corkeyNo)\n', (4112, 4122), True, 'import numpy as np\n'), ((4790, 4811), 'os.path.abspath', 'os.path.abspath', (['""".."""'], {}), "('..')\n", (4805, 4811), False, 'import os\n'), ((5331, 5343), 'time.clock', 'time.clock', ([], {}), '()\n', (5341, 5343), False, 'import time\n'), ((5786, 5798), 'time.clock', 'time.clock', ([], {}), '()\n', (5796, 5798), False, 'import time\n'), ((1327, 1346), 'numpy.dot', 'np.dot', (['key', 'onekey'], {}), '(key, onekey)\n', (1333, 1346), True, 'import numpy as np\n'), ((1349, 1368), 'numpy.dot', 'np.dot', (['ipc', 'oneipc'], {}), '(ipc, oneipc)\n', (1355, 1368), True, 'import numpy as np\n'), ((1903, 1915), 'jieba.cut', 'jieba.cut', (['i'], {}), '(i)\n', (1912, 1915), False, 'import jieba\n')]
|
# ============================================================================
# 第十章 家電・調理
# Ver.04(エネルギー消費性能計算プログラム(住宅版)Ver.02~)
# ============================================================================
import numpy as np
from pyhees.section11_3 import load_schedule, get_schedule_app, get_schedule_cc
# ============================================================================
# 5. 家電の一次エネルギー消費量
# ============================================================================
# ============================================================================
# 5.1 消費電力量
# ============================================================================
def calc_E_E_AP_d_t(n_p):
"""1 時間当たりの家電の消費電力量
Args:
n_p(float): 仮想居住人数 仮想居住人数
Returns:
ndarray: 1 時間当たりの家電の消費電力量
"""
schedule = load_schedule()
schedule_app = get_schedule_app(schedule)
if 1 <= n_p and n_p <= 2:
E_E_AP_1_d_t = get_E_E_AP_p_d_t(1, schedule_app)
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
return E_E_AP_1_d_t * (2 - n_p) / (2 - 1) + E_E_AP_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
E_E_AP_2_d_t = get_E_E_AP_p_d_t(2, schedule_app)
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
return E_E_AP_2_d_t * (3 - n_p) / (3 - 2) + E_E_AP_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
E_E_AP_3_d_t = get_E_E_AP_p_d_t(3, schedule_app)
E_E_AP_4_d_t = get_E_E_AP_p_d_t(4, schedule_app)
return E_E_AP_3_d_t * (4 - n_p) / (4 - 3) + E_E_AP_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
# ============================================================================
# 5.2 ガス消費量
# ============================================================================
def get_E_G_AP_d_t():
"""1 時間当たりの家電のガス消費量
Args:
Returns:
ndarray: 1 時間当たりの家電のガス消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 5.3 灯油消費量
# ============================================================================
def get_E_K_AP_d_t():
"""1 時間当たりの家電の灯油消費量
Args:
Returns:
ndarray: 1 時間当たりの家電の灯油消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 5.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_AP_d_t():
"""1 時間当たりの家電のその他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: 1 時間当たりの家電のその他の燃料による一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 6. 調理の一次エネルギー消費量
# ============================================================================
# ============================================================================
# 6.1 消費電力量
# ============================================================================
def get_E_E_CC_d_t():
"""1 時間当たりの調理の消費電力量
Args:
Returns:
ndarray: 1 時間当たりの調理の消費電力量
"""
return np.zeros(24 * 365)
# ============================================================================
# 6.2 ガス消費量
# ============================================================================
def calc_E_G_CC_d_t(n_p):
"""1 時間当たりの調理のガス消費量
Args:
n_p(float): 仮想居住人数
Returns:
ndarray: 1 時間当たりの調理のガス消費量
"""
schedule = load_schedule()
schedule_cc = get_schedule_cc(schedule)
if 1 <= n_p and n_p <= 2:
E_G_CC_1_d_t = get_E_G_CC_p_d_t(1, schedule_cc)
E_G_CC_2_d_t = get_E_G_CC_p_d_t(2, schedule_cc)
return E_G_CC_1_d_t * (2 - n_p) / (2 - 1) + E_G_CC_2_d_t * (n_p - 1) / (2 - 1)
elif 2 <= n_p and n_p <= 3:
E_G_CC_2_d_t = get_E_G_CC_p_d_t(2, schedule_cc)
E_G_CC_3_d_t = get_E_G_CC_p_d_t(3, schedule_cc)
return E_G_CC_2_d_t * (3 - n_p) / (3 - 2) + E_G_CC_3_d_t * (n_p - 2) / (3 - 2)
elif 3 <= n_p and n_p <= 4:
E_G_CC_3_d_t = get_E_G_CC_p_d_t(3, schedule_cc)
E_G_CC_4_d_t = get_E_G_CC_p_d_t(4, schedule_cc)
return E_G_CC_3_d_t * (4 - n_p) / (4 - 3) + E_G_CC_4_d_t * (n_p - 3) / (4 - 3)
else:
raise ValueError(n_p)
# ============================================================================
# 6.3 灯油消費量
# ============================================================================
def get_E_K_CC_d_t():
"""1 時間当たりの調理の灯油消費量
Args:
Returns:
ndarray: 1 時間当たりの調理の灯油消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 6.4 その他の燃料による一次エネルギー消費量
# ============================================================================
def get_E_M_CC_d_t():
"""1 時間当たりの調理のその他の燃料による一次エネルギー消費量
Args:
Returns:
ndarray: 1 時間当たりの調理のその他の燃料による一次エネルギー消費量
"""
return np.zeros(24 * 365)
# ============================================================================
# 付録 A 1 時間当たりのエネルギー消費量の計算方法
# ============================================================================
# ============================================================================
# A.1 家電による消費電力量
# ============================================================================
def get_E_E_AP_p_d_t(p, schedule_app):
"""1 時間当たりの居住人数がp人における家電の消費電力量
Args:
p(float): 居住人数
schedule_app(ndarray): 家電スケジュール
Returns:
ndarray: 1 時間当たりの居住人数がp人における家電の消費電力量
"""
# 平日
workday = np.tile([get_table_a_1()[i][(p - 1) * 3 + 0] for i in range(24)], 365)
# 休日外出
holiday_out = np.tile([get_table_a_1()[i][(p - 1) * 3 + 1] for i in range(24)], 365)
# 休日在宅
holiday_in = np.tile([get_table_a_1()[i][(p - 1) * 3 + 2] for i in range(24)], 365)
# スケジュールを時間ごとに拡張
schedule = np.repeat(schedule_app, 24)
return (workday * (schedule == '平日')
+ holiday_out * (schedule == '休日外')
+ holiday_in * (schedule == '休日在'))
# ============================================================================
# A.2 調理によるガス消費量
# ============================================================================
def get_E_G_CC_p_d_t(p, schedule_cc):
"""1 時間当たりの居住人数がp人における調理のガス消費量
Args:
p(float): 居住人数
schedule_cc(ndarray): 調理スケジュール
Returns:
ndarray: 1 時間当たりの居住人数がp人における調理のガス消費量
"""
# 平日
workday = np.tile([get_table_a_2()[i][(p - 1) * 3 + 0] for i in range(24)], 365)
# 休日外出
holiday_out = np.tile([get_table_a_2()[i][(p - 1) * 3 + 1] for i in range(24)], 365)
# 休日在宅
holiday_in = np.tile([get_table_a_2()[i][(p - 1) * 3 + 2] for i in range(24)], 365)
# スケジュールを時間ごとに拡張
schedule = np.repeat(schedule_cc, 24)
return (workday * (schedule == '平日')
+ holiday_out * (schedule == '休日外')
+ holiday_in * (schedule == '休日在'))
def get_table_a_1():
"""表 A.1 家電による 1 時間当たりの消費電力量
Args:
Returns:
list: 表 A.1 家電による 1 時間当たりの消費電力量
"""
table_a_1 = [
(0.1578, 0.1578, 0.1578, 0.1578, 0.1578, 0.1578, 0.1806, 0.1806, 0.1806, 0.1812, 0.1812, 0.1812),
(0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0711, 0.0711, 0.0711, 0.0717, 0.0717, 0.0717),
(0.0560, 0.0560, 0.0560, 0.0561, 0.0561, 0.0561, 0.0788, 0.0788, 0.0788, 0.0795, 0.0795, 0.0795),
(0.0560, 0.0560, 0.0560, 0.0561, 0.0561, 0.0561, 0.0788, 0.0788, 0.0788, 0.0795, 0.0795, 0.0795),
(0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0711, 0.0711, 0.0711, 0.0717, 0.0717, 0.0717),
(0.0560, 0.0560, 0.0560, 0.0561, 0.0561, 0.0561, 0.0788, 0.0788, 0.0788, 0.0795, 0.0795, 0.0795),
(0.1925, 0.0859, 0.0560, 0.2611, 0.1159, 0.0561, 0.3525, 0.1685, 0.0788, 0.3531, 0.1692, 0.0795),
(0.1524, 0.2346, 0.1168, 0.2480, 0.2703, 0.1854, 0.3662, 0.3287, 0.2767, 0.3669, 0.3294, 0.2774),
(0.1091, 0.2282, 0.2156, 0.1448, 0.3325, 0.2812, 0.2032, 0.4595, 0.3696, 0.2039, 0.4602, 0.3702),
(0.3011, 0.0560, 0.2163, 0.3368, 0.0561, 0.2520, 0.3953, 0.0788, 0.3265, 0.3960, 0.0795, 0.3272),
(0.0483, 0.0483, 0.1917, 0.0483, 0.0483, 0.2274, 0.0711, 0.0711, 0.3128, 0.0717, 0.0717, 0.3134),
(0.0560, 0.0560, 0.1994, 0.0561, 0.0561, 0.2352, 0.0788, 0.0788, 0.3150, 0.0795, 0.0795, 0.3157),
(0.1983, 0.0560, 0.1983, 0.2727, 0.0561, 0.2727, 0.3698, 0.0788, 0.3698, 0.3705, 0.0795, 0.3705),
(0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0483, 0.0711, 0.0711, 0.0711, 0.0717, 0.0717, 0.0717),
(0.0560, 0.0560, 0.0560, 0.0561, 0.0561, 0.0561, 0.0788, 0.0788, 0.0788, 0.0795, 0.0795, 0.0795),
(0.0560, 0.0560, 0.0560, 0.0561, 0.0561, 0.0561, 0.0788, 0.0788, 0.0788, 0.0795, 0.0795, 0.0795),
(0.0483, 0.0483, 0.2095, 0.0483, 0.0483, 0.2750, 0.0711, 0.0711, 0.3956, 0.0717, 0.0717, 0.5453),
(0.1304, 0.0560, 0.2423, 0.2048, 0.0561, 0.2781, 0.3019, 0.0788, 0.3689, 0.3026, 0.0795, 0.5187),
(0.3030, 0.0560, 0.1819, 0.3387, 0.0561, 0.2177, 0.3972, 0.0788, 0.2978, 0.5469, 0.0795, 0.4476),
(0.0991, 0.0483, 0.0998, 0.1348, 0.0483, 0.1355, 0.1932, 0.0711, 0.2049, 0.1939, 0.0717, 0.2056),
(0.0917, 0.1304, 0.0917, 0.1275, 0.2048, 0.1275, 0.2102, 0.3129, 0.2183, 0.2109, 0.4626, 0.2190),
(0.1216, 0.1755, 0.1755, 0.1873, 0.2411, 0.2411, 0.2837, 0.3322, 0.3458, 0.2844, 0.4447, 0.3465),
(0.1738, 0.0510, 0.1139, 0.1917, 0.0511, 0.1795, 0.2620, 0.0952, 0.3002, 0.2626, 0.0959, 0.3009),
(0.1877, 0.1877, 0.1578, 0.2176, 0.2176, 0.1578, 0.2756, 0.2703, 0.1806, 0.2763, 0.2709, 0.1812),
]
return table_a_1
def get_table_a_2():
"""表 A.2 調理による 1 時間当たりのガス消費量
Args:
Returns:
list: 表 A.2 調理による 1 時間当たりのガス消費量
"""
table_a_2 = [
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 1.5935, 0, 0, 1.9235, 0, 0, 2.2536, 0, 0),
(1.0672, 0, 0, 0, 1.116, 1.116, 0, 0, 0, 0, 0, 0),
(0, 1.0672, 1.0672, 0, 0, 0, 0, 1.3472, 1.3472, 0, 1.5783, 1.5783),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 1.1401, 1.3762, 0, 1.3762, 1.6123, 0, 1.6123),
(0, 0, 1.0902, 1.1401, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 5.4175, 0, 0, 6.5395, 0, 0, 7.6615),
(0, 0, 5.1806, 5.4175, 0, 0, 6.5395, 0, 0, 7.6615, 0, 0),
(5.1806, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0),
]
return table_a_2
|
[
"pyhees.section11_3.get_schedule_cc",
"pyhees.section11_3.get_schedule_app",
"numpy.zeros",
"pyhees.section11_3.load_schedule",
"numpy.repeat"
] |
[((823, 838), 'pyhees.section11_3.load_schedule', 'load_schedule', ([], {}), '()\n', (836, 838), False, 'from pyhees.section11_3 import load_schedule, get_schedule_app, get_schedule_cc\n'), ((858, 884), 'pyhees.section11_3.get_schedule_app', 'get_schedule_app', (['schedule'], {}), '(schedule)\n', (874, 884), False, 'from pyhees.section11_3 import load_schedule, get_schedule_app, get_schedule_cc\n'), ((1919, 1937), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (1927, 1937), True, 'import numpy as np\n'), ((2234, 2252), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (2242, 2252), True, 'import numpy as np\n'), ((2591, 2609), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (2599, 2609), True, 'import numpy as np\n'), ((3084, 3102), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (3092, 3102), True, 'import numpy as np\n'), ((3432, 3447), 'pyhees.section11_3.load_schedule', 'load_schedule', ([], {}), '()\n', (3445, 3447), False, 'from pyhees.section11_3 import load_schedule, get_schedule_app, get_schedule_cc\n'), ((3466, 3491), 'pyhees.section11_3.get_schedule_cc', 'get_schedule_cc', (['schedule'], {}), '(schedule)\n', (3481, 3491), False, 'from pyhees.section11_3 import load_schedule, get_schedule_app, get_schedule_cc\n'), ((4520, 4538), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (4528, 4538), True, 'import numpy as np\n'), ((4878, 4896), 'numpy.zeros', 'np.zeros', (['(24 * 365)'], {}), '(24 * 365)\n', (4886, 4896), True, 'import numpy as np\n'), ((5805, 5832), 'numpy.repeat', 'np.repeat', (['schedule_app', '(24)'], {}), '(schedule_app, 24)\n', (5814, 5832), True, 'import numpy as np\n'), ((6689, 6715), 'numpy.repeat', 'np.repeat', (['schedule_cc', '(24)'], {}), '(schedule_cc, 24)\n', (6698, 6715), True, 'import numpy as np\n')]
|
import pytest
import traceback
from flask_unchained.bundles.security.commands.roles import list_roles, create_role, delete_role
class TestRolesCommands:
@pytest.mark.roles(dict(name='role1'),
dict(name='role2'),
dict(name='role3'))
def test_list_roles(self, roles, cli_runner):
result = cli_runner.invoke(list_roles)
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
lines = result.output.strip().splitlines()
assert len(lines) == 5
assert lines[0] == 'ID Name '
assert lines[1] == '---------'
assert lines[-1] == f' {roles[-1].id} {roles[-1].name}'
assert lines[-2] == f' {roles[-2].id} {roles[-2].name}'
assert lines[-3] == f' {roles[-3].id} {roles[-3].name}'
def test_create_role(self, cli_runner):
result = cli_runner.invoke(create_role, args=['--name', 'new-role'], input='y\n')
assert result.exit_code == 0, traceback.print_exception(*result.exc_info)
assert result.output.strip().splitlines()[-1] == \
"Successfully created Role(id=1, name='new-role')"
@pytest.mark.role(name='role1')
def test_delete_role(self, role, cli_runner):
result = cli_runner.invoke(delete_role, args=['name=role1'], input='y\n')
assert result.exit_code == 0
assert result.output.strip().splitlines()[-1] == \
"Successfully deleted Role(id=1, name='role1')"
|
[
"pytest.mark.role",
"traceback.print_exception"
] |
[((1165, 1195), 'pytest.mark.role', 'pytest.mark.role', ([], {'name': '"""role1"""'}), "(name='role1')\n", (1181, 1195), False, 'import pytest\n'), ((420, 463), 'traceback.print_exception', 'traceback.print_exception', (['*result.exc_info'], {}), '(*result.exc_info)\n', (445, 463), False, 'import traceback\n'), ((993, 1036), 'traceback.print_exception', 'traceback.print_exception', (['*result.exc_info'], {}), '(*result.exc_info)\n', (1018, 1036), False, 'import traceback\n')]
|
#!/usr/bin/python
# This scripts loads a pretrained model and a raw .txt files. It then performs sentence splitting and tokenization and passes
# the input sentences to the model for tagging. Prints the tokens and the tags in a CoNLL format to stdout
# Usage: python RunModel.py modelPath inputPath
# For pretrained models see docs/Pretrained_Models.md
from __future__ import print_function
import nltk
from util.preprocessing import addCharInformation, createMatrices, addCasingInformation
from neuralnets.BiLSTM import BiLSTM
import sys
#if len(sys.argv) < 3:
# print("Usage: python RunModel.py modelPath inputPath")
# exit()
def cn_word_segmentation(text):
out_text = ''
import jieba
for line in text.splitlines():
line_out = ' '.join(list(jieba.cut(line)))
out_text += line_out + '\n'
return out_text
class InferenceHelper:
def __init__(self):
pass
def init(self, model_path, mode='cn', timeit=False, mute=True):
self.is_chinese = False
self.is_name_recognition = False
self.is_timeit_mode = False
if mode == 'cn':
self.is_chinese = True
if mode == 'name':
self.is_name_recognition = True
if timeit:
self.is_timeit_mode = True
self.mute = mute
# :: Load the model ::
self.lstmModel = BiLSTM.loadModel(model_path)
def infer(self, text):
if self.is_chinese:
text = cn_word_segmentation(text)
if not self.mute:
print('Chinese mode. Segemented text:')
print(text)
if self.is_name_recognition:
if not self.mute:
print('Name mode.')
text = ' '.join(text)
# :: Prepare the input ::
sentences = [{'tokens': nltk.word_tokenize(sent)} for sent in nltk.sent_tokenize(text)]
addCharInformation(sentences)
addCasingInformation(sentences)
dataMatrix = createMatrices(sentences, self.lstmModel.mappings, True)
# :: Tag the input ::
tags = self.lstmModel.tagSentences(dataMatrix)
if self.is_timeit_mode:
print('timeit mode:')
import time
test_count = 100
start_time = time.time()
for i in range(test_count):
tags = self.lstmModel.tagSentences(dataMatrix)
elapsed = time.time() - start_time
print('test_count = {}, avg time = {}'.format(test_count, elapsed/test_count))
if not self.mute:
# :: Output to stdout ::
for sentenceIdx in range(len(sentences)):
tokens = sentences[sentenceIdx]['tokens']
for tokenIdx in range(len(tokens)):
tokenTags = []
for modelName in sorted(tags.keys()):
tokenTags.append(tags[modelName][sentenceIdx][tokenIdx])
print("%s\t%s" % (tokens[tokenIdx], "\t".join(tokenTags)))
print("")
return sentences, tags
|
[
"util.preprocessing.addCharInformation",
"util.preprocessing.createMatrices",
"jieba.cut",
"nltk.sent_tokenize",
"util.preprocessing.addCasingInformation",
"time.time",
"neuralnets.BiLSTM.BiLSTM.loadModel",
"nltk.word_tokenize"
] |
[((1356, 1384), 'neuralnets.BiLSTM.BiLSTM.loadModel', 'BiLSTM.loadModel', (['model_path'], {}), '(model_path)\n', (1372, 1384), False, 'from neuralnets.BiLSTM import BiLSTM\n'), ((1877, 1906), 'util.preprocessing.addCharInformation', 'addCharInformation', (['sentences'], {}), '(sentences)\n', (1895, 1906), False, 'from util.preprocessing import addCharInformation, createMatrices, addCasingInformation\n'), ((1915, 1946), 'util.preprocessing.addCasingInformation', 'addCasingInformation', (['sentences'], {}), '(sentences)\n', (1935, 1946), False, 'from util.preprocessing import addCharInformation, createMatrices, addCasingInformation\n'), ((1968, 2024), 'util.preprocessing.createMatrices', 'createMatrices', (['sentences', 'self.lstmModel.mappings', '(True)'], {}), '(sentences, self.lstmModel.mappings, True)\n', (1982, 2024), False, 'from util.preprocessing import addCharInformation, createMatrices, addCasingInformation\n'), ((2255, 2266), 'time.time', 'time.time', ([], {}), '()\n', (2264, 2266), False, 'import time\n'), ((772, 787), 'jieba.cut', 'jieba.cut', (['line'], {}), '(line)\n', (781, 787), False, 'import jieba\n'), ((1805, 1829), 'nltk.word_tokenize', 'nltk.word_tokenize', (['sent'], {}), '(sent)\n', (1823, 1829), False, 'import nltk\n'), ((1843, 1867), 'nltk.sent_tokenize', 'nltk.sent_tokenize', (['text'], {}), '(text)\n', (1861, 1867), False, 'import nltk\n'), ((2392, 2403), 'time.time', 'time.time', ([], {}), '()\n', (2401, 2403), False, 'import time\n')]
|
from web3 import Web3
from constants import Constants
w3 = Web3(Web3.HTTPProvider("https://api.avax.network/ext/bc/C/rpc"))
if not w3.isConnected():
print("Error web3 can't connect")
ipefi_contract = w3.eth.contract(address=Constants.IPEFI_ADDRESS, abi=Constants.IPEFI_ABI)
def getIPefiRatio():
return w3.fromWei(ipefi_contract.functions.currentExchangeRate().call(), 'ether')
if __name__ == '__main__':
getIPefiRatio()
|
[
"web3.Web3.HTTPProvider"
] |
[((66, 124), 'web3.Web3.HTTPProvider', 'Web3.HTTPProvider', (['"""https://api.avax.network/ext/bc/C/rpc"""'], {}), "('https://api.avax.network/ext/bc/C/rpc')\n", (83, 124), False, 'from web3 import Web3\n')]
|