hexsha stringlengths 40 40 | size int64 4 996k | ext stringclasses 8
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 4 245 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 40 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 245 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 40 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 245 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 40 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 4 996k | avg_line_length float64 1.33 58.2k | max_line_length int64 2 323k | alphanum_fraction float64 0 0.97 | content_no_comment stringlengths 0 946k | is_comment_constant_removed bool 2
classes | is_sharp_comment_removed bool 1
class |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f7f7ffce6617f4d71c3dacf5bee7fc9021e90a49 | 1,456 | py | Python | scibeam/tests/test_common.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | 3 | 2020-07-31T09:24:46.000Z | 2021-03-01T23:59:51.000Z | scibeam/tests/test_common.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | 12 | 2018-05-14T18:40:14.000Z | 2019-02-06T22:51:33.000Z | scibeam/tests/test_common.py | SuperYuLu/SciBeam | 80601adda9288fd32efeedf5b1de015761c1f8e5 | [
"MIT"
] | null | null | null | # test_core_common.py ---
#
# Filename: test_core_common.py
# Description:
#
# Author: Yu Lu
# Email: yulu@utexas.edu
# Github: https://github.com/SuperYuLu
#
# Created: Fri May 4 11:33:37 2018 (-0500)
# Version:
# Last-Updated: Sat Jul 28 15:00:42 2018 (-0500)
# By: yulu
# Update #: 24
#
import unittest
import os
import numpy as np
#test_data_root = '../examples/data/'
from scibeam.core.common import winPathHandler, loadFile
import pkg_resources
DATA_FILE = pkg_resources.resource_filename('scibeam', 'data/test/time_series_1D/single_time_series.lvm')
class TestFunctions(unittest.TestCase):
'''
Test core.common.py
'''
def test_winPathHandler(self):
test_path_win = r'C:\Documents\MyFolder\Whatever.txt'
test_path_linux = '/home/MyFolder/Whatever.txt'
test_folder_linux = '../examples/data'
self.assertEqual(winPathHandler(test_path_win), 'C:/Documents/MyFolder/Whatever.txt')
self.assertEqual(winPathHandler(test_path_linux), '/home/MyFolder/Whatever.txt')
self.assertEqual(winPathHandler([test_path_win, test_path_linux]),['C:/Documents/MyFolder/Whatever.txt','/home/MyFolder/Whatever.txt'])
self.assertEqual(winPathHandler(test_folder_linux), '../examples/data')
def test_loadFile(self):
self.assertEqual(loadFile(DATA_FILE).shape, (25000, 2))
if __name__ == '__main__':
unittest.main()
| 28 | 143 | 0.68544 |
port unittest
import os
import numpy as np
from scibeam.core.common import winPathHandler, loadFile
import pkg_resources
DATA_FILE = pkg_resources.resource_filename('scibeam', 'data/test/time_series_1D/single_time_series.lvm')
class TestFunctions(unittest.TestCase):
def test_winPathHandler(self):
test_path_win = r'C:\Documents\MyFolder\Whatever.txt'
test_path_linux = '/home/MyFolder/Whatever.txt'
test_folder_linux = '../examples/data'
self.assertEqual(winPathHandler(test_path_win), 'C:/Documents/MyFolder/Whatever.txt')
self.assertEqual(winPathHandler(test_path_linux), '/home/MyFolder/Whatever.txt')
self.assertEqual(winPathHandler([test_path_win, test_path_linux]),['C:/Documents/MyFolder/Whatever.txt','/home/MyFolder/Whatever.txt'])
self.assertEqual(winPathHandler(test_folder_linux), '../examples/data')
def test_loadFile(self):
self.assertEqual(loadFile(DATA_FILE).shape, (25000, 2))
if __name__ == '__main__':
unittest.main()
| true | true |
f7f800ac106a265213147c82ffe9cc092bebed51 | 2,886 | py | Python | source/datamanager.py | YeongHyeon/CVAE | 5db95ea6a1a01475cd0356e31bf593f09b5479c2 | [
"MIT"
] | 1 | 2021-07-21T11:47:52.000Z | 2021-07-21T11:47:52.000Z | source/datamanager.py | YeongHyeon/CVAE | 5db95ea6a1a01475cd0356e31bf593f09b5479c2 | [
"MIT"
] | null | null | null | source/datamanager.py | YeongHyeon/CVAE | 5db95ea6a1a01475cd0356e31bf593f09b5479c2 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
class Dataset(object):
def __init__(self, normalize=True):
print("\nInitializing Dataset...")
self.normalize = normalize
(x_tr, y_tr), (x_te, y_te) = tf.keras.datasets.mnist.load_data()
self.x_tr, self.y_tr = x_tr, y_tr
self.x_te, self.y_te = x_te, y_te
# Type casting from uint8 to float32
self.x_tr = np.ndarray.astype(self.x_tr, np.float32)
self.x_te = np.ndarray.astype(self.x_te, np.float32)
self.num_tr, self.num_te = self.x_tr.shape[0], self.x_te.shape[0]
self.idx_tr, self.idx_te = 0, 0
print("Number of data\nTraining: %d, Test: %d\n" %(self.num_tr, self.num_te))
x_sample, y_sample = self.x_te[0], self.y_te[0]
self.height = x_sample.shape[0]
self.width = x_sample.shape[1]
try: self.channel = x_sample.shape[2]
except: self.channel = 1
self.min_val, self.max_val = x_sample.min(), x_sample.max()
self.num_class = (y_te.max()+1)
print("Information of data")
print("Shape Height: %d, Width: %d, Channel: %d" %(self.height, self.width, self.channel))
print("Value Min: %.3f, Max: %.3f" %(self.min_val, self.max_val))
print("Class %d" %(self.num_class))
print("Normalization: %r" %(self.normalize))
if(self.normalize): print("(from %.3f-%.3f to %.3f-%.3f)" %(self.min_val, self.max_val, 0, 1))
def reset_idx(self): self.idx_tr, self.idx_te = 0, 0
def next_train(self, batch_size=1, fix=False):
start, end = self.idx_tr, self.idx_tr+batch_size
x_tr, y_tr = self.x_tr[start:end], self.y_tr[start:end]
x_tr = np.expand_dims(x_tr, axis=3)
terminator = False
if(end >= self.num_tr):
terminator = True
self.idx_tr = 0
self.x_tr, self.y_tr = shuffle(self.x_tr, self.y_tr)
else: self.idx_tr = end
if(fix): self.idx_tr = start
if(x_tr.shape[0] != batch_size):
x_tr, y_tr = self.x_tr[-1-batch_size:-1], self.y_tr[-1-batch_size:-1]
x_tr = np.expand_dims(x_tr, axis=3)
if(self.normalize):
min_x, max_x = x_tr.min(), x_tr.max()
x_tr = (x_tr - min_x) / (max_x - min_x)
return x_tr, y_tr, terminator
def next_test(self, batch_size=1):
start, end = self.idx_te, self.idx_te+batch_size
x_te, y_te = self.x_te[start:end], self.y_te[start:end]
x_te = np.expand_dims(x_te, axis=3)
terminator = False
if(end >= self.num_te):
terminator = True
self.idx_te = 0
else: self.idx_te = end
if(self.normalize):
min_x, max_x = x_te.min(), x_te.max()
x_te = (x_te - min_x) / (max_x - min_x)
return x_te, y_te, terminator
| 33.172414 | 102 | 0.586625 | import numpy as np
import tensorflow as tf
from sklearn.utils import shuffle
class Dataset(object):
def __init__(self, normalize=True):
print("\nInitializing Dataset...")
self.normalize = normalize
(x_tr, y_tr), (x_te, y_te) = tf.keras.datasets.mnist.load_data()
self.x_tr, self.y_tr = x_tr, y_tr
self.x_te, self.y_te = x_te, y_te
self.x_tr = np.ndarray.astype(self.x_tr, np.float32)
self.x_te = np.ndarray.astype(self.x_te, np.float32)
self.num_tr, self.num_te = self.x_tr.shape[0], self.x_te.shape[0]
self.idx_tr, self.idx_te = 0, 0
print("Number of data\nTraining: %d, Test: %d\n" %(self.num_tr, self.num_te))
x_sample, y_sample = self.x_te[0], self.y_te[0]
self.height = x_sample.shape[0]
self.width = x_sample.shape[1]
try: self.channel = x_sample.shape[2]
except: self.channel = 1
self.min_val, self.max_val = x_sample.min(), x_sample.max()
self.num_class = (y_te.max()+1)
print("Information of data")
print("Shape Height: %d, Width: %d, Channel: %d" %(self.height, self.width, self.channel))
print("Value Min: %.3f, Max: %.3f" %(self.min_val, self.max_val))
print("Class %d" %(self.num_class))
print("Normalization: %r" %(self.normalize))
if(self.normalize): print("(from %.3f-%.3f to %.3f-%.3f)" %(self.min_val, self.max_val, 0, 1))
def reset_idx(self): self.idx_tr, self.idx_te = 0, 0
def next_train(self, batch_size=1, fix=False):
start, end = self.idx_tr, self.idx_tr+batch_size
x_tr, y_tr = self.x_tr[start:end], self.y_tr[start:end]
x_tr = np.expand_dims(x_tr, axis=3)
terminator = False
if(end >= self.num_tr):
terminator = True
self.idx_tr = 0
self.x_tr, self.y_tr = shuffle(self.x_tr, self.y_tr)
else: self.idx_tr = end
if(fix): self.idx_tr = start
if(x_tr.shape[0] != batch_size):
x_tr, y_tr = self.x_tr[-1-batch_size:-1], self.y_tr[-1-batch_size:-1]
x_tr = np.expand_dims(x_tr, axis=3)
if(self.normalize):
min_x, max_x = x_tr.min(), x_tr.max()
x_tr = (x_tr - min_x) / (max_x - min_x)
return x_tr, y_tr, terminator
def next_test(self, batch_size=1):
start, end = self.idx_te, self.idx_te+batch_size
x_te, y_te = self.x_te[start:end], self.y_te[start:end]
x_te = np.expand_dims(x_te, axis=3)
terminator = False
if(end >= self.num_te):
terminator = True
self.idx_te = 0
else: self.idx_te = end
if(self.normalize):
min_x, max_x = x_te.min(), x_te.max()
x_te = (x_te - min_x) / (max_x - min_x)
return x_te, y_te, terminator
| true | true |
f7f800d72b7136be59be7a488a23e84fc16dde0e | 758 | py | Python | pyschematron/elementpath_extensions/xslt1_parser.py | Ionite/pyschematron | f535f8f5a580ff91b601bc56ac35d0e916e7ea7c | [
"MIT"
] | null | null | null | pyschematron/elementpath_extensions/xslt1_parser.py | Ionite/pyschematron | f535f8f5a580ff91b601bc56ac35d0e916e7ea7c | [
"MIT"
] | 2 | 2021-03-31T19:50:13.000Z | 2021-12-13T20:38:36.000Z | pyschematron/elementpath_extensions/xslt1_parser.py | Ionite/pyschematron | f535f8f5a580ff91b601bc56ac35d0e916e7ea7c | [
"MIT"
] | null | null | null | from elementpath.xpath1_parser import XPath1Parser, is_document_node
class XSLT1Parser(XPath1Parser):
SYMBOLS = XPath1Parser.SYMBOLS | {
'current'
}
register = XSLT1Parser.register
unregister = XSLT1Parser.unregister
literal = XSLT1Parser.literal
prefix = XSLT1Parser.prefix
infix = XSLT1Parser.infix
infixr = XSLT1Parser.infixr
method = XSLT1Parser.method
function = XSLT1Parser.function
register('current')
@method(function('current', nargs=0))
def select(self, context=None):
if context is None:
self.missing_context()
if context.current_item is not None:
return [context.current_item]
else:
raise Exception("current() called in a context without an original context item")
XSLT1Parser.build()
| 22.969697 | 89 | 0.742744 | from elementpath.xpath1_parser import XPath1Parser, is_document_node
class XSLT1Parser(XPath1Parser):
SYMBOLS = XPath1Parser.SYMBOLS | {
'current'
}
register = XSLT1Parser.register
unregister = XSLT1Parser.unregister
literal = XSLT1Parser.literal
prefix = XSLT1Parser.prefix
infix = XSLT1Parser.infix
infixr = XSLT1Parser.infixr
method = XSLT1Parser.method
function = XSLT1Parser.function
register('current')
@method(function('current', nargs=0))
def select(self, context=None):
if context is None:
self.missing_context()
if context.current_item is not None:
return [context.current_item]
else:
raise Exception("current() called in a context without an original context item")
XSLT1Parser.build()
| true | true |
f7f80142682ee2eaa2d12695e92cc59eb4d8a700 | 486 | py | Python | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/runner/__init__.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 4 | 2016-03-30T14:31:52.000Z | 2019-02-02T05:01:32.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/runner/__init__.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 1 | 2020-03-06T04:49:42.000Z | 2020-03-06T04:49:42.000Z | IMU/VTK-6.2.0/ThirdParty/Twisted/twisted/runner/__init__.py | timkrentz/SunTracker | 9a189cc38f45e5fbc4e4c700d7295a871d022795 | [
"MIT"
] | 2 | 2019-08-30T23:36:13.000Z | 2019-11-08T16:52:01.000Z | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Twisted runner: run and monitor processes
Maintainer: Andrew Bennetts
classic inetd(8) support:
Future Plans: The basic design should be final. There are some bugs that need
fixing regarding UDP and Sun-RPC support. Perhaps some day xinetd
compatibility will be added.
procmon:monitor and restart processes
"""
from twisted.runner._version import version
__version__ = version.short()
| 25.578947 | 79 | 0.757202 |
from twisted.runner._version import version
__version__ = version.short()
| true | true |
f7f8015bd437d58dde37a5c2655524e3b43eaa47 | 19 | py | Python | permafrost/forms.py | jared-hardy/django-permafrost | 588c0783791ec10f683da0235162a90f6936110a | [
"MIT"
] | null | null | null | permafrost/forms.py | jared-hardy/django-permafrost | 588c0783791ec10f683da0235162a90f6936110a | [
"MIT"
] | null | null | null | permafrost/forms.py | jared-hardy/django-permafrost | 588c0783791ec10f683da0235162a90f6936110a | [
"MIT"
] | null | null | null | # Permafrost Forms
| 9.5 | 18 | 0.789474 | true | true | |
f7f801e0f6e63198c93660a6e99628bc9c66d514 | 332 | py | Python | tests/test_utils.py | bdpedigo/molesq | 297c08dc0a41390dda5e8e5fc1bda612d7c417c0 | [
"MIT"
] | 4 | 2021-04-08T20:35:32.000Z | 2021-12-29T12:08:28.000Z | tests/test_utils.py | bdpedigo/molesq | 297c08dc0a41390dda5e8e5fc1bda612d7c417c0 | [
"MIT"
] | 7 | 2021-04-08T15:04:31.000Z | 2021-09-10T08:50:49.000Z | tests/test_utils.py | bdpedigo/molesq | 297c08dc0a41390dda5e8e5fc1bda612d7c417c0 | [
"MIT"
] | 1 | 2021-04-12T13:56:08.000Z | 2021-04-12T13:56:08.000Z | import numpy as np
from molesq.utils import grid_field
def test_grid_field():
test, shape = grid_field([0, 0], [2, 2])
ref = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
]
).astype(test.dtype)
assert np.allclose(test, ref)
assert shape == (2, 2)
| 18.444444 | 44 | 0.46988 | import numpy as np
from molesq.utils import grid_field
def test_grid_field():
test, shape = grid_field([0, 0], [2, 2])
ref = np.array(
[
[0, 0],
[0, 1],
[1, 0],
[1, 1],
]
).astype(test.dtype)
assert np.allclose(test, ref)
assert shape == (2, 2)
| true | true |
f7f8024a32b0c75b5cb62b92ce0fa6a17fea3976 | 4,792 | py | Python | misc/acrn-config/kconfig/silentoldconfig.py | stanleyintel/acrn-hypervisor | 0461ac209f5f265c269b6d77415043b6f028598b | [
"BSD-3-Clause"
] | 1 | 2021-05-27T09:39:48.000Z | 2021-05-27T09:39:48.000Z | misc/acrn-config/kconfig/silentoldconfig.py | stanleyintel/acrn-hypervisor | 0461ac209f5f265c269b6d77415043b6f028598b | [
"BSD-3-Clause"
] | 1 | 2021-07-26T22:16:18.000Z | 2021-07-26T22:16:18.000Z | misc/acrn-config/kconfig/silentoldconfig.py | stanleyintel/acrn-hypervisor | 0461ac209f5f265c269b6d77415043b6f028598b | [
"BSD-3-Clause"
] | 1 | 2020-05-22T04:48:20.000Z | 2020-05-22T04:48:20.000Z | # Copyright (C) 2018 Intel Corporation.
# SPDX-License-Identifier: BSD-3-Clause
# This script
#
# 1. takes a Kconfig and a .config and an optional list of symbol-value pairs,
# 2. checks whether the specified symbols have the specified values in the
# given .config, and
# 3. reconstruct .config with the given list of symbol-value pairs if there
# is any disagreement.
import sys
import os
# Kconfiglib: Copyright (c) 2011-2018, Ulf Magnusson
# SPDX-License-Identifier: ISC
# Refer to scripts/kconfig/LICENSE.kconfiglib for the permission notice.
import kconfiglib
def usage():
sys.stdout.write("%s: <Kconfig file> <.config file> [<symbol1>=<value1> ...]\n" % sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
sys.exit(1)
kconfig_path = sys.argv[1]
if not os.path.isfile(kconfig_path):
sys.stderr.write("Cannot find file %s\n" % kconfig_path)
sys.exit(1)
kconfig = kconfiglib.Kconfig(kconfig_path)
# Parse the configs specified on cmdline
cmdline_conf = {}
for sym_val in sys.argv[3:]:
if sym_val.find("=") == -1:
continue
sym_name, val = sym_val.split("=")[:2]
if sym_name in kconfig.syms.keys() and val:
cmdline_conf[sym_name] = val
# Determine the base config.
#
# If either
#
# 1. no .config exists, or
# 2. the BOARD in the existing .config is different from the BOARD
# specified in the environment variable
#
# the defconfig will be used as the base config. Otherwise the existing
# .config is used as the base.
#
# If .config does not exist, it is required that Kconfig specifies an
# existing defconfig, otherwise this script will refuse to generate a
# .config.
config_path = sys.argv[2]
defconfig_path = kconfig.defconfig_filename
if defconfig_path and os.path.isfile(defconfig_path):
kdefconfig = kconfiglib.Kconfig(kconfig_path)
kdefconfig.load_config(defconfig_path)
else:
kdefconfig = None
need_update = False
if os.path.isfile(config_path):
kconfig.load_config(config_path)
# The BOARD given by the environment variable may be different from what
# is specified in the corresponding defconfig. So compare the value of
# CONFIG_BOARD directly. This is applicable only when CONFIG_BOARD
# exists in the Kconfig.
if kdefconfig and 'BOARD' in kconfig.syms and \
kconfig.syms['BOARD'].str_value != kdefconfig.syms['BOARD'].str_value:
kconfig = kdefconfig
sys.stdout.write("Overwrite with default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
# Use the existing .config as the base.
#
# Mark need_update if any visible symbol picks a different value
# from what is specified in .config.
for sym in [x for x in kconfig.unique_defined_syms if x.visibility]:
if sym.type in [kconfiglib.BOOL, kconfiglib.TRISTATE]:
picked_value = sym.tri_value
else:
picked_value = sym.str_value
need_update = (picked_value != sym.user_value)
if need_update:
break
else:
# base on a default configuration
if kdefconfig:
kconfig = kdefconfig
sys.stdout.write("Default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
# report an error if no known defconfig exists
sys.stderr.write(".config does not exist and no defconfig available for BOARD %s on SCENARIO %s.\n"
% (os.environ['BOARD'], os.environ['SCENARIO']))
sys.exit(1)
# Update the old .config with those specified on cmdline
#
# Note: the user shall be careful what configuration symbols to overwrite by
# silentoldconfig. After changing a symbol value, the invisible symbols are
# updated accordingly because they always use the default value, while
# visible symbols keep their original value in the old .config. This may
# lead to invalid .config for a specific platform.
#
# Currently it is recommended to use the following update only for
# RELEASE. For PLATFORM reinvoke defconfig is preferred.
for sym_name, val in cmdline_conf.items():
sym = kconfig.syms[sym_name]
if sym.str_value and sym.str_value != val:
kconfig.syms[sym_name].set_value(val)
need_update = True
if need_update:
kconfig.write_config(config_path)
sys.stdout.write("Configuration written to %s.\n" % config_path)
if __name__ == "__main__":
main()
| 37.732283 | 111 | 0.643573 |
import sys
import os
import kconfiglib
def usage():
sys.stdout.write("%s: <Kconfig file> <.config file> [<symbol1>=<value1> ...]\n" % sys.argv[0])
def main():
if len(sys.argv) < 3:
usage()
sys.exit(1)
kconfig_path = sys.argv[1]
if not os.path.isfile(kconfig_path):
sys.stderr.write("Cannot find file %s\n" % kconfig_path)
sys.exit(1)
kconfig = kconfiglib.Kconfig(kconfig_path)
cmdline_conf = {}
for sym_val in sys.argv[3:]:
if sym_val.find("=") == -1:
continue
sym_name, val = sym_val.split("=")[:2]
if sym_name in kconfig.syms.keys() and val:
cmdline_conf[sym_name] = val
config_path = sys.argv[2]
defconfig_path = kconfig.defconfig_filename
if defconfig_path and os.path.isfile(defconfig_path):
kdefconfig = kconfiglib.Kconfig(kconfig_path)
kdefconfig.load_config(defconfig_path)
else:
kdefconfig = None
need_update = False
if os.path.isfile(config_path):
kconfig.load_config(config_path)
if kdefconfig and 'BOARD' in kconfig.syms and \
kconfig.syms['BOARD'].str_value != kdefconfig.syms['BOARD'].str_value:
kconfig = kdefconfig
sys.stdout.write("Overwrite with default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
for sym in [x for x in kconfig.unique_defined_syms if x.visibility]:
if sym.type in [kconfiglib.BOOL, kconfiglib.TRISTATE]:
picked_value = sym.tri_value
else:
picked_value = sym.str_value
need_update = (picked_value != sym.user_value)
if need_update:
break
else:
if kdefconfig:
kconfig = kdefconfig
sys.stdout.write("Default configuration based on %s.\n" % defconfig_path)
need_update = True
else:
sys.stderr.write(".config does not exist and no defconfig available for BOARD %s on SCENARIO %s.\n"
% (os.environ['BOARD'], os.environ['SCENARIO']))
sys.exit(1)
for sym_name, val in cmdline_conf.items():
sym = kconfig.syms[sym_name]
if sym.str_value and sym.str_value != val:
kconfig.syms[sym_name].set_value(val)
need_update = True
if need_update:
kconfig.write_config(config_path)
sys.stdout.write("Configuration written to %s.\n" % config_path)
if __name__ == "__main__":
main()
| true | true |
f7f8031de972c097f7ceec05150b7ba2550e9682 | 5,404 | py | Python | lapps-grid/tools/stats/grouping.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 6 | 2018-11-03T22:43:35.000Z | 2022-02-15T17:51:33.000Z | lapps-grid/tools/stats/grouping.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 3 | 2015-06-06T22:16:03.000Z | 2015-11-12T00:22:45.000Z | lapps-grid/tools/stats/grouping.py | CDCgov/DCPC | c3fadef1bd6345e01a58afef051491d8ef6a7f93 | [
"Apache-2.0"
] | 10 | 2017-04-10T21:40:22.000Z | 2022-02-21T16:50:10.000Z | #!/usr/bin/env python
# Guruprasad Ananda
# Refactored 2011 to use numpy instead of rpy, Kanwei Li
"""
This tool provides the SQL "group by" functionality.
"""
import commands
import random
import sys
import tempfile
from itertools import groupby
import numpy
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def mode(data):
counts = {}
for x in data:
counts[x] = counts.get(x, 0) + 1
maxcount = max(counts.values())
modelist = []
for x in counts:
if counts[x] == maxcount:
modelist.append( str(x) )
return ','.join(modelist)
def main():
inputfile = sys.argv[2]
ignorecase = int(sys.argv[4])
ops = []
cols = []
round_val = []
if sys.argv[5] != "None":
asciitodelete = sys.argv[5]
if asciitodelete:
oldfile = open(inputfile, 'r')
newinputfile = "input_cleaned.tsv"
newfile = open(newinputfile, 'w')
asciitodelete = asciitodelete.split(',')
for i in range(len(asciitodelete)):
asciitodelete[i] = chr(int(asciitodelete[i]))
for line in oldfile:
if line[0] not in asciitodelete:
newfile.write(line)
oldfile.close()
newfile.close()
inputfile = newinputfile
for var in sys.argv[6:]:
op, col, do_round = var.split()
ops.append(op)
cols.append(col)
round_val.append(do_round)
"""
At this point, ops, cols and rounds will look something like this:
ops: ['mean', 'min', 'c']
cols: ['1', '3', '4']
round_val: ['no', 'yes' 'no']
"""
try:
group_col = int(sys.argv[3]) - 1
except:
stop_err( "Group column not specified." )
tmpfile = tempfile.NamedTemporaryFile()
try:
"""
The -k option for the Posix sort command is as follows:
-k, --key=POS1[,POS2]
start a key at POS1, end it at POS2 (origin 1)
In other words, column positions start at 1 rather than 0, so
we need to add 1 to group_col.
if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
"""
case = ''
if ignorecase == 1:
case = '-f'
command_line = "sort -t ' ' %s -k%s,%s -o %s %s" % (case, group_col + 1, group_col + 1, tmpfile.name, inputfile)
except Exception as exc:
stop_err( 'Initialization error -> %s' % str(exc) )
error_code, stdout = commands.getstatusoutput(command_line)
if error_code != 0:
stop_err( "Sorting input dataset resulted in error: %s: %s" % ( error_code, stdout ))
fout = open(sys.argv[1], "w")
def is_new_item(line):
try:
item = line.strip().split("\t")[group_col]
except IndexError:
stop_err( "The following line didn't have %s columns: %s" % (group_col + 1, line) )
if ignorecase == 1:
return item.lower()
return item
for key, line_list in groupby(tmpfile, key=is_new_item):
op_vals = [ [] for _ in ops ]
out_str = key
for line in line_list:
fields = line.strip().split("\t")
for i, col in enumerate(cols):
col = int(col) - 1 # cXX from galaxy is 1-based
try:
val = fields[col].strip()
op_vals[i].append(val)
except IndexError:
sys.stderr.write( 'Could not access the value for column %s on line: "%s". Make sure file is tab-delimited.\n' % (col + 1, line) )
sys.exit( 1 )
# Generate string for each op for this group
for i, op in enumerate( ops ):
data = op_vals[i]
rval = ""
if op == "mode":
rval = mode( data )
elif op == "length":
rval = len( data )
elif op == "random":
rval = random.choice(data)
elif op in ['cat', 'cat_uniq']:
if op == 'cat_uniq':
data = numpy.unique(data)
rval = ','.join(data)
elif op == "unique":
rval = len( numpy.unique(data) )
else:
# some kind of numpy fn
try:
data = map(float, data)
except ValueError:
sys.stderr.write( "Operation %s expected number values but got %s instead.\n" % (op, data) )
sys.exit( 1 )
rval = getattr(numpy, op)( data )
if round_val[i] == 'yes':
rval = int(round(rval))
else:
rval = '%g' % rval
out_str += "\t%s" % rval
fout.write(out_str + "\n")
# Generate a useful info message.
msg = "--Group by c%d: " % (group_col + 1)
for i, op in enumerate(ops):
if op == 'cat':
op = 'concat'
elif op == 'cat_uniq':
op = 'concat_distinct'
elif op == 'length':
op = 'count'
elif op == 'unique':
op = 'count_distinct'
elif op == 'random':
op = 'randomly_pick'
msg += op + "[c" + cols[i] + "] "
print msg
fout.close()
tmpfile.close()
if __name__ == "__main__":
main()
| 30.531073 | 150 | 0.511288 |
"""
This tool provides the SQL "group by" functionality.
"""
import commands
import random
import sys
import tempfile
from itertools import groupby
import numpy
def stop_err(msg):
sys.stderr.write(msg)
sys.exit()
def mode(data):
counts = {}
for x in data:
counts[x] = counts.get(x, 0) + 1
maxcount = max(counts.values())
modelist = []
for x in counts:
if counts[x] == maxcount:
modelist.append( str(x) )
return ','.join(modelist)
def main():
inputfile = sys.argv[2]
ignorecase = int(sys.argv[4])
ops = []
cols = []
round_val = []
if sys.argv[5] != "None":
asciitodelete = sys.argv[5]
if asciitodelete:
oldfile = open(inputfile, 'r')
newinputfile = "input_cleaned.tsv"
newfile = open(newinputfile, 'w')
asciitodelete = asciitodelete.split(',')
for i in range(len(asciitodelete)):
asciitodelete[i] = chr(int(asciitodelete[i]))
for line in oldfile:
if line[0] not in asciitodelete:
newfile.write(line)
oldfile.close()
newfile.close()
inputfile = newinputfile
for var in sys.argv[6:]:
op, col, do_round = var.split()
ops.append(op)
cols.append(col)
round_val.append(do_round)
"""
At this point, ops, cols and rounds will look something like this:
ops: ['mean', 'min', 'c']
cols: ['1', '3', '4']
round_val: ['no', 'yes' 'no']
"""
try:
group_col = int(sys.argv[3]) - 1
except:
stop_err( "Group column not specified." )
tmpfile = tempfile.NamedTemporaryFile()
try:
"""
The -k option for the Posix sort command is as follows:
-k, --key=POS1[,POS2]
start a key at POS1, end it at POS2 (origin 1)
In other words, column positions start at 1 rather than 0, so
we need to add 1 to group_col.
if POS2 is not specified, the newer versions of sort will consider the entire line for sorting. To prevent this, we set POS2=POS1.
"""
case = ''
if ignorecase == 1:
case = '-f'
command_line = "sort -t ' ' %s -k%s,%s -o %s %s" % (case, group_col + 1, group_col + 1, tmpfile.name, inputfile)
except Exception as exc:
stop_err( 'Initialization error -> %s' % str(exc) )
error_code, stdout = commands.getstatusoutput(command_line)
if error_code != 0:
stop_err( "Sorting input dataset resulted in error: %s: %s" % ( error_code, stdout ))
fout = open(sys.argv[1], "w")
def is_new_item(line):
try:
item = line.strip().split("\t")[group_col]
except IndexError:
stop_err( "The following line didn't have %s columns: %s" % (group_col + 1, line) )
if ignorecase == 1:
return item.lower()
return item
for key, line_list in groupby(tmpfile, key=is_new_item):
op_vals = [ [] for _ in ops ]
out_str = key
for line in line_list:
fields = line.strip().split("\t")
for i, col in enumerate(cols):
col = int(col) - 1 # cXX from galaxy is 1-based
try:
val = fields[col].strip()
op_vals[i].append(val)
except IndexError:
sys.stderr.write( 'Could not access the value for column %s on line: "%s". Make sure file is tab-delimited.\n' % (col + 1, line) )
sys.exit( 1 )
# Generate string for each op for this group
for i, op in enumerate( ops ):
data = op_vals[i]
rval = ""
if op == "mode":
rval = mode( data )
elif op == "length":
rval = len( data )
elif op == "random":
rval = random.choice(data)
elif op in ['cat', 'cat_uniq']:
if op == 'cat_uniq':
data = numpy.unique(data)
rval = ','.join(data)
elif op == "unique":
rval = len( numpy.unique(data) )
else:
# some kind of numpy fn
try:
data = map(float, data)
except ValueError:
sys.stderr.write( "Operation %s expected number values but got %s instead.\n" % (op, data) )
sys.exit( 1 )
rval = getattr(numpy, op)( data )
if round_val[i] == 'yes':
rval = int(round(rval))
else:
rval = '%g' % rval
out_str += "\t%s" % rval
fout.write(out_str + "\n")
# Generate a useful info message.
msg = "--Group by c%d: " % (group_col + 1)
for i, op in enumerate(ops):
if op == 'cat':
op = 'concat'
elif op == 'cat_uniq':
op = 'concat_distinct'
elif op == 'length':
op = 'count'
elif op == 'unique':
op = 'count_distinct'
elif op == 'random':
op = 'randomly_pick'
msg += op + "[c" + cols[i] + "] "
print msg
fout.close()
tmpfile.close()
if __name__ == "__main__":
main()
| false | true |
f7f80538003027deb13f8bdc9fb62edef9688a8c | 1,879 | py | Python | 3rdparty/openmm/wrappers/python/tests/TestElement.py | merkys/MMB | 0531385b8367405e1188e31c3eef7aa4cc50170b | [
"MIT"
] | 5 | 2020-07-31T17:33:03.000Z | 2022-01-01T19:24:37.000Z | 3rdparty/openmm/wrappers/python/tests/TestElement.py | merkys/MMB | 0531385b8367405e1188e31c3eef7aa4cc50170b | [
"MIT"
] | 11 | 2020-06-16T05:05:42.000Z | 2022-03-30T09:59:14.000Z | 3rdparty/openmm/wrappers/python/tests/TestElement.py | merkys/MMB | 0531385b8367405e1188e31c3eef7aa4cc50170b | [
"MIT"
] | 9 | 2020-01-24T12:02:37.000Z | 2020-10-16T06:23:56.000Z | import pickle
import random
from simtk.unit import dalton, is_quantity
from simtk.openmm.app import element
import unittest
class TestElement(unittest.TestCase):
def test_immutable(self):
def modifyElement():
# this should not be allowed
element.sulfur.mass = 100*dalton
self.assertRaises(AttributeError, modifyElement)
def test_pickleable(self):
newsulfur = pickle.loads(pickle.dumps(element.sulfur))
# make sure that a new object is not created during the pickle/unpickle
# cycle
self.assertEqual(element.sulfur, newsulfur)
self.assertTrue(element.sulfur is newsulfur)
def test_attributes(self):
self.assertEqual(element.hydrogen.atomic_number, 1)
self.assertEqual(element.hydrogen.symbol, 'H')
self.assertEqual(element.hydrogen.name, 'hydrogen')
self.assertEqual(element.hydrogen.mass, 1.007947 * dalton)
def test_getByMass(self):
""" Tests the getByMass method """
def exhaustive_search(mass):
"""
Searches through all element symbols and finds the one with the
smallest mass difference
"""
min_diff = mass
closest_element = None
for elem in sorted(element.Element._elements_by_symbol.values(),
key=lambda x:x.mass):
diff = abs(elem.mass._value - mass)
if diff < min_diff:
min_diff = diff
closest_element = elem
return closest_element
# Check 500 random numbers between 0 and 200
for i in range(500):
mass = random.random() * 200
elem = element.Element.getByMass(mass)
self.assertTrue(elem is exhaustive_search(mass))
if __name__ == '__main__':
unittest.main()
| 35.45283 | 79 | 0.619478 | import pickle
import random
from simtk.unit import dalton, is_quantity
from simtk.openmm.app import element
import unittest
class TestElement(unittest.TestCase):
def test_immutable(self):
def modifyElement():
element.sulfur.mass = 100*dalton
self.assertRaises(AttributeError, modifyElement)
def test_pickleable(self):
newsulfur = pickle.loads(pickle.dumps(element.sulfur))
self.assertEqual(element.sulfur, newsulfur)
self.assertTrue(element.sulfur is newsulfur)
def test_attributes(self):
self.assertEqual(element.hydrogen.atomic_number, 1)
self.assertEqual(element.hydrogen.symbol, 'H')
self.assertEqual(element.hydrogen.name, 'hydrogen')
self.assertEqual(element.hydrogen.mass, 1.007947 * dalton)
def test_getByMass(self):
def exhaustive_search(mass):
min_diff = mass
closest_element = None
for elem in sorted(element.Element._elements_by_symbol.values(),
key=lambda x:x.mass):
diff = abs(elem.mass._value - mass)
if diff < min_diff:
min_diff = diff
closest_element = elem
return closest_element
for i in range(500):
mass = random.random() * 200
elem = element.Element.getByMass(mass)
self.assertTrue(elem is exhaustive_search(mass))
if __name__ == '__main__':
unittest.main()
| true | true |
f7f8054ffbb949d6a816f432ca63fe411102b096 | 5,278 | py | Python | qlib/contrib/data/highfreq_handler.py | SunsetWolf/qlib | 89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5 | [
"MIT"
] | 1 | 2021-12-14T13:48:38.000Z | 2021-12-14T13:48:38.000Z | qlib/contrib/data/highfreq_handler.py | SunsetWolf/qlib | 89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5 | [
"MIT"
] | null | null | null | qlib/contrib/data/highfreq_handler.py | SunsetWolf/qlib | 89972f6c6f9fa629b4f74093d4ba1e93c9f7a5e5 | [
"MIT"
] | null | null | null | from qlib.data.dataset.handler import DataHandler, DataHandlerLP
EPSILON = 1e-4
class HighFreqHandler(DataHandlerLP):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
infer_processors=[],
learn_processors=[],
fit_start_time=None,
fit_end_time=None,
drop_raw=True,
):
def check_transform_proc(proc_l):
new_l = []
for p in proc_l:
p["kwargs"].update(
{
"fit_start_time": fit_start_time,
"fit_end_time": fit_end_time,
}
)
new_l.append(p)
return new_l
infer_processors = check_transform_proc(infer_processors)
learn_processors = check_transform_proc(learn_processors)
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
infer_processors=infer_processors,
learn_processors=learn_processors,
drop_raw=drop_raw,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
def get_normalized_price_feature(price_field, shift=0):
# norm with the close price of 237th minute of yesterday.
if shift == 0:
template_norm = "{0}/DayLast(Ref({1}, 243))"
else:
template_norm = "Ref({0}, " + str(shift) + ")/DayLast(Ref({1}, 243))"
template_fillnan = "FFillNan({0})"
# calculate -> ffill -> remove paused
feature_ops = template_paused.format(
template_fillnan.format(
template_norm.format(template_if.format("$close", price_field), template_fillnan.format("$close"))
)
)
return feature_ops
fields += [get_normalized_price_feature("$open", 0)]
fields += [get_normalized_price_feature("$high", 0)]
fields += [get_normalized_price_feature("$low", 0)]
fields += [get_normalized_price_feature("$close", 0)]
fields += [get_normalized_price_feature("$vwap", 0)]
names += ["$open", "$high", "$low", "$close", "$vwap"]
fields += [get_normalized_price_feature("$open", 240)]
fields += [get_normalized_price_feature("$high", 240)]
fields += [get_normalized_price_feature("$low", 240)]
fields += [get_normalized_price_feature("$close", 240)]
fields += [get_normalized_price_feature("$vwap", 240)]
names += ["$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1"]
# calculate and fill nan with 0
template_gzero = "If(Ge({0}, 0), {0}, 0)"
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format("{0}/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume"))
)
)
]
names += ["$volume"]
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format(
"Ref({0}, 240)/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume")
)
)
)
]
names += ["$volume_1"]
return fields, names
class HighFreqBacktestHandler(DataHandler):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
):
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
# template_paused = "{0}"
template_fillnan = "FFillNan({0})"
fields += [
template_fillnan.format(template_paused.format("$close")),
]
names += ["$close0"]
fields += [
template_paused.format(
template_if.format(
template_fillnan.format("$close"),
"$vwap",
)
)
]
names += ["$vwap0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$volume"))]
names += ["$volume0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$factor"))]
names += ["$factor0"]
return fields, names
| 31.987879 | 118 | 0.504737 | from qlib.data.dataset.handler import DataHandler, DataHandlerLP
EPSILON = 1e-4
class HighFreqHandler(DataHandlerLP):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
infer_processors=[],
learn_processors=[],
fit_start_time=None,
fit_end_time=None,
drop_raw=True,
):
def check_transform_proc(proc_l):
new_l = []
for p in proc_l:
p["kwargs"].update(
{
"fit_start_time": fit_start_time,
"fit_end_time": fit_end_time,
}
)
new_l.append(p)
return new_l
infer_processors = check_transform_proc(infer_processors)
learn_processors = check_transform_proc(learn_processors)
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
infer_processors=infer_processors,
learn_processors=learn_processors,
drop_raw=drop_raw,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
def get_normalized_price_feature(price_field, shift=0):
if shift == 0:
template_norm = "{0}/DayLast(Ref({1}, 243))"
else:
template_norm = "Ref({0}, " + str(shift) + ")/DayLast(Ref({1}, 243))"
template_fillnan = "FFillNan({0})"
feature_ops = template_paused.format(
template_fillnan.format(
template_norm.format(template_if.format("$close", price_field), template_fillnan.format("$close"))
)
)
return feature_ops
fields += [get_normalized_price_feature("$open", 0)]
fields += [get_normalized_price_feature("$high", 0)]
fields += [get_normalized_price_feature("$low", 0)]
fields += [get_normalized_price_feature("$close", 0)]
fields += [get_normalized_price_feature("$vwap", 0)]
names += ["$open", "$high", "$low", "$close", "$vwap"]
fields += [get_normalized_price_feature("$open", 240)]
fields += [get_normalized_price_feature("$high", 240)]
fields += [get_normalized_price_feature("$low", 240)]
fields += [get_normalized_price_feature("$close", 240)]
fields += [get_normalized_price_feature("$vwap", 240)]
names += ["$open_1", "$high_1", "$low_1", "$close_1", "$vwap_1"]
template_gzero = "If(Ge({0}, 0), {0}, 0)"
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format("{0}/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume"))
)
)
]
names += ["$volume"]
fields += [
template_gzero.format(
template_paused.format(
"If(IsNull({0}), 0, {0})".format(
"Ref({0}, 240)/Ref(DayLast(Mean({0}, 7200)), 240)".format("$volume")
)
)
)
]
names += ["$volume_1"]
return fields, names
class HighFreqBacktestHandler(DataHandler):
def __init__(
self,
instruments="csi300",
start_time=None,
end_time=None,
):
data_loader = {
"class": "QlibDataLoader",
"kwargs": {
"config": self.get_feature_config(),
"swap_level": False,
"freq": "1min",
},
}
super().__init__(
instruments=instruments,
start_time=start_time,
end_time=end_time,
data_loader=data_loader,
)
def get_feature_config(self):
fields = []
names = []
template_if = "If(IsNull({1}), {0}, {1})"
template_paused = "Select(Gt($hx_paused_num, 1.001), {0})"
template_fillnan = "FFillNan({0})"
fields += [
template_fillnan.format(template_paused.format("$close")),
]
names += ["$close0"]
fields += [
template_paused.format(
template_if.format(
template_fillnan.format("$close"),
"$vwap",
)
)
]
names += ["$vwap0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$volume"))]
names += ["$volume0"]
fields += [template_paused.format("If(IsNull({0}), 0, {0})".format("$factor"))]
names += ["$factor0"]
return fields, names
| true | true |
f7f8061269ba51737ab58c9ec598582dd02691c2 | 124,434 | py | Python | tensorflow/python/ops/ragged/ragged_tensor.py | Saduf2019/tensorflow-1 | d36aab4a474da352accfca38edb5d0f1c584ed9b | [
"Apache-2.0"
] | 7 | 2022-03-04T21:14:47.000Z | 2022-03-22T23:07:39.000Z | tensorflow/python/ops/ragged/ragged_tensor.py | Martaw-code/tensorflow | f210b2b2f8489ffe97edac886238242288950439 | [
"Apache-2.0"
] | 1 | 2022-03-08T18:28:46.000Z | 2022-03-08T18:37:20.000Z | tensorflow/python/ops/ragged/ragged_tensor.py | Martaw-code/tensorflow | f210b2b2f8489ffe97edac886238242288950439 | [
"Apache-2.0"
] | null | null | null | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Classes for storing ragged tensors and their values."""
import functools
import operator
import typing
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.types import core as core_types
from tensorflow.python.types import internal as internal_types
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
# pylint: disable=protected-access
_convert_row_partition = RowPartition._convert_row_partition
# pylint: enable=protected-access
#===============================================================================
# RaggedTensor
#===============================================================================
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor,
internal_types.NativeObject):
"""Represents a ragged tensor.
A `RaggedTensor` is a tensor with one or more *ragged dimensions*, which are
dimensions whose slices may have different lengths. For example, the inner
(column) dimension of `rt=[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is ragged,
since the column slices (`rt[0, :]`, ..., `rt[4, :]`) have different lengths.
Dimensions whose slices all have the same length are called *uniform
dimensions*. The outermost dimension of a `RaggedTensor` is always uniform,
since it consists of a single slice (and so there is no possibility for
differing slice lengths).
The total number of dimensions in a `RaggedTensor` is called its *rank*,
and the number of ragged dimensions in a `RaggedTensor` is called its
*ragged-rank*. A `RaggedTensor`'s ragged-rank is fixed at graph creation
time: it can't depend on the runtime values of `Tensor`s, and can't vary
dynamically for different session runs.
Note that the `__init__` constructor is private. Please use one of the
following methods to construct a `RaggedTensor`:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
### Potentially Ragged Tensors
Many ops support both `Tensor`s and `RaggedTensor`s
(see [tf.ragged](https://www.tensorflow.org/api_docs/python/tf/ragged) for a
full listing). The term "potentially ragged tensor" may be used to refer to a
tensor that might be either a `Tensor` or a `RaggedTensor`. The ragged-rank
of a `Tensor` is zero.
### Documenting RaggedTensor Shapes
When documenting the shape of a RaggedTensor, ragged dimensions can be
indicated by enclosing them in parentheses. For example, the shape of
a 3-D `RaggedTensor` that stores the fixed-size word embedding for each
word in a sentence, for each sentence in a batch, could be written as
`[num_sentences, (num_words), embedding_size]`. The parentheses around
`(num_words)` indicate that dimension is ragged, and that the length
of each element list in that dimension may vary for each item.
### Component Tensors
Internally, a `RaggedTensor` consists of a concatenated list of values that
are partitioned into variable-length rows. In particular, each `RaggedTensor`
consists of:
* A `values` tensor, which concatenates the variable-length rows into a
flattened list. For example, the `values` tensor for
`[[3, 1, 4, 1], [], [5, 9, 2], [6], []]` is `[3, 1, 4, 1, 5, 9, 2, 6]`.
* A `row_splits` vector, which indicates how those flattened values are
divided into rows. In particular, the values for row `rt[i]` are stored
in the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
### Alternative Row-Partitioning Schemes
In addition to `row_splits`, ragged tensors provide support for five other
row-partitioning schemes:
* `row_lengths`: a vector with shape `[nrows]`, which specifies the length
of each row.
* `value_rowids` and `nrows`: `value_rowids` is a vector with shape
`[nvals]`, corresponding one-to-one with `values`, which specifies
each value's row index. In particular, the row `rt[row]` consists of the
values `rt.values[j]` where `value_rowids[j]==row`. `nrows` is an
integer scalar that specifies the number of rows in the
`RaggedTensor`. (`nrows` is used to indicate trailing empty rows.)
* `row_starts`: a vector with shape `[nrows]`, which specifies the start
offset of each row. Equivalent to `row_splits[:-1]`.
* `row_limits`: a vector with shape `[nrows]`, which specifies the stop
offset of each row. Equivalent to `row_splits[1:]`.
* `uniform_row_length`: A scalar tensor, specifying the length of every
row. This row-partitioning scheme may only be used if all rows have
the same length.
Example: The following ragged tensors are equivalent, and all represent the
nested list `[[3, 1, 4, 1], [], [5, 9, 2], [6], []]`.
>>> values = [3, 1, 4, 1, 5, 9, 2, 6]
>>> RaggedTensor.from_row_splits(values, row_splits=[0, 4, 4, 7, 8, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_lengths(values, row_lengths=[4, 0, 3, 1, 0])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_value_rowids(
... values, value_rowids=[0, 0, 0, 0, 2, 2, 2, 3], nrows=5)
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_starts(values, row_starts=[0, 4, 4, 7, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_row_limits(values, row_limits=[4, 4, 7, 8, 8])
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
>>> RaggedTensor.from_uniform_row_length(values, uniform_row_length=2)
<tf.RaggedTensor [[3, 1], [4, 1], [5, 9], [2, 6]]>
### Multiple Ragged Dimensions
`RaggedTensor`s with multiple ragged dimensions can be defined by using
a nested `RaggedTensor` for the `values` tensor. Each nested `RaggedTensor`
adds a single ragged dimension.
>>> inner_rt = RaggedTensor.from_row_splits( # =rt1 from above
... values=[3, 1, 4, 1, 5, 9, 2, 6], row_splits=[0, 4, 4, 7, 8, 8])
>>> outer_rt = RaggedTensor.from_row_splits(
... values=inner_rt, row_splits=[0, 3, 3, 5])
>>> print(outer_rt.to_list())
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
>>> print(outer_rt.ragged_rank)
2
The factory function `RaggedTensor.from_nested_row_splits` may be used to
construct a `RaggedTensor` with multiple ragged dimensions directly, by
providing a list of `row_splits` tensors:
>>> RaggedTensor.from_nested_row_splits(
... flat_values=[3, 1, 4, 1, 5, 9, 2, 6],
... nested_row_splits=([0, 3, 3, 5], [0, 4, 4, 7, 8, 8])).to_list()
[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]
### Uniform Inner Dimensions
`RaggedTensor`s with uniform inner dimensions can be defined
by using a multidimensional `Tensor` for `values`.
>>> rt = RaggedTensor.from_row_splits(values=tf.ones([5, 3], tf.int32),
... row_splits=[0, 2, 5])
>>> print(rt.to_list())
[[[1, 1, 1], [1, 1, 1]],
[[1, 1, 1], [1, 1, 1], [1, 1, 1]]]
>>> print(rt.shape)
(2, None, 3)
### Uniform Outer Dimensions
`RaggedTensor`s with uniform outer dimensions can be defined by using
one or more `RaggedTensor` with a `uniform_row_length` row-partitioning
tensor. For example, a `RaggedTensor` with shape `[2, 2, None]` can be
constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt6 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt6)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt6.shape)
(2, 2, None)
Note that `rt6` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt7 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt7.shape)
(2, None, None)
Uniform and ragged outer dimensions may be interleaved, meaning that a
tensor with any combination of ragged and uniform dimensions may be created.
For example, a RaggedTensor `t4` with shape `[3, None, 4, 8, None, 2]` could
be constructed as follows:
```python
t0 = tf.zeros([1000, 2]) # Shape: [1000, 2]
t1 = RaggedTensor.from_row_lengths(t0, [...]) # [160, None, 2]
t2 = RaggedTensor.from_uniform_row_length(t1, 8) # [20, 8, None, 2]
t3 = RaggedTensor.from_uniform_row_length(t2, 4) # [5, 4, 8, None, 2]
t4 = RaggedTensor.from_row_lengths(t3, [...]) # [3, None, 4, 8, None, 2]
```
"""
#=============================================================================
# Constructor (private)
#=============================================================================
@doc_controls.do_not_generate_docs
def __init__(self, values, row_partition, internal=False):
"""Creates a `RaggedTensor` with a specified partitioning for `values`.
This constructor is private -- please use one of the following ops to
build `RaggedTensor`s:
* `tf.RaggedTensor.from_row_lengths`
* `tf.RaggedTensor.from_value_rowids`
* `tf.RaggedTensor.from_row_splits`
* `tf.RaggedTensor.from_row_starts`
* `tf.RaggedTensor.from_row_limits`
* `tf.RaggedTensor.from_nested_row_splits`
* `tf.RaggedTensor.from_nested_row_lengths`
* `tf.RaggedTensor.from_nested_value_rowids`
Args:
values: A potentially ragged tensor of any dtype and shape `[nvals, ...]`.
row_partition: A `RowPartition` object, representing the arrangement of
the lists at the top level.
internal: True if the constructor is being called by one of the factory
methods. If false, an exception will be raised.
Raises:
ValueError: If internal = False. Note that this method is intended only
for internal use.
TypeError: If values is not a `RaggedTensor` or `Tensor`, or
row_partition is not a `RowPartition`.
"""
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
_assert_is_supported_ragged_values_type(values)
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
# Validate shapes.
values.shape.with_rank_at_least(1)
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
assert row_partition.dtype == values._row_partition.dtype
self._values = values
self._row_partition = row_partition
#=============================================================================
# Factory Methods
#=============================================================================
@classmethod
def _from_row_partition(cls, values, row_partition, validate=True):
"""Creates a `RaggedTensor` with a row partition.
This is used as a way for RaggedTensors to share row partitions.
The outer dimension of values must be equal to `partition.nvals()`.
Args:
values: A potentially ragged tensor.
row_partition: a `RowPartition`: can be shared between tensors.
validate: If true, then use assertions to check that the arguments form a
valid `RaggedTensor`.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If partition.nvals() != _nrows(values)
"""
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
values, row_partition = cls._convert_values_and_partition(
values, row_partition, "partition")
if row_partition.has_precomputed_value_rowids():
value_rowids_shape = row_partition.value_rowids().shape
values.shape[:1].assert_is_compatible_with(value_rowids_shape)
if validate:
msg = "Arguments to _from_row_partition do not form a valid RaggedTensor"
nvals = _nrows(values, row_partition.dtype)
checks = [
check_ops.assert_equal(
math_ops.cast(row_partition.nvals(), row_partition.dtype),
nvals,
message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_partition = row_partition.with_dependencies(checks)
return cls(values=values, internal=True, row_partition=row_partition)
@classmethod
@dispatch.add_dispatch_support
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `value_rowids`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values[i] for i in range(len(values)) if value_rowids[i] == row]
for row in range(nrows)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
value_rowids: A 1-D integer tensor with shape `[nvals]`, which corresponds
one-to-one with `values`, and specifies each value's row index. Must be
nonnegative, and must be sorted in ascending order.
nrows: An integer scalar specifying the number of rows. This should be
specified if the `RaggedTensor` may containing empty training rows. Must
be greater than `value_rowids[-1]` (or zero if `value_rowids` is empty).
Defaults to `value_rowids[-1] + 1` (or zero if `value_rowids` is empty).
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `nrows` is incompatible with `value_rowids`.
#### Example:
>>> print(tf.RaggedTensor.from_value_rowids(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... value_rowids=[0, 0, 0, 0, 2, 2, 2, 3],
... nrows=5))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
row_partition = RowPartition.from_value_rowids(
value_rowids=value_rowids,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_splits(cls, values, row_splits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_splits`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [values[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_splits: A 1-D integer tensor with shape `[nrows+1]`. Must not be
empty, and must be sorted in ascending order. `row_splits[0]` must be
zero and `row_splits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
Raises:
ValueError: If `row_splits` is an empty list.
#### Example:
>>> print(tf.RaggedTensor.from_row_splits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_splits=[0, 4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
row_partition = RowPartition.from_row_splits(
row_splits=row_splits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_lengths`.
The returned `RaggedTensor` corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(length)]
for length in row_lengths]
```
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_lengths: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative. `sum(row_lengths)` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_lengths(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_lengths=[4, 0, 3, 1, 0]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
row_partition = RowPartition.from_row_lengths(
row_lengths=row_lengths,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_starts(cls, values, row_starts, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_starts`.
Equivalent to: `from_row_splits(values, concat([row_starts, nvals]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_starts: A 1-D integer tensor with shape `[nrows]`. Must be
nonnegative and sorted in ascending order. If `nrows>0`, then
`row_starts[0]` must be zero.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_starts(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_starts=[0, 4, 4, 7, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_starts(
row_starts=row_starts,
nvals=_nrows(values),
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_limits(cls, values, row_limits, name=None, validate=True):
"""Creates a `RaggedTensor` with rows partitioned by `row_limits`.
Equivalent to: `from_row_splits(values, concat([0, row_limits]))`.
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
row_limits: A 1-D integer tensor with shape `[nrows]`. Must be sorted in
ascending order. If `nrows>0`, then `row_limits[-1]` must be `nvals`.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor`. `result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
#### Example:
>>> print(tf.RaggedTensor.from_row_limits(
... values=[3, 1, 4, 1, 5, 9, 2, 6],
... row_limits=[4, 4, 7, 8, 8]))
<tf.RaggedTensor [[3, 1, 4, 1], [], [5, 9, 2], [6], []]>
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_limits(
row_limits=row_limits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_uniform_row_length(cls,
values,
uniform_row_length,
nrows=None,
validate=True,
name=None):
"""Creates a `RaggedTensor` with rows partitioned by `uniform_row_length`.
This method can be used to create `RaggedTensor`s with multiple uniform
outer dimensions. For example, a `RaggedTensor` with shape `[2, 2, None]`
can be constructed with this method from a `RaggedTensor` values with shape
`[4, None]`:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(values.shape)
(4, None)
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> print(rt1)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt1.shape)
(2, 2, None)
Note that `rt1` only contains one ragged dimension (the innermost
dimension). In contrast, if `from_row_splits` is used to construct a similar
`RaggedTensor`, then that `RaggedTensor` will have two ragged dimensions:
>>> rt2 = tf.RaggedTensor.from_row_splits(values, [0, 2, 4])
>>> print(rt2.shape)
(2, None, None)
Args:
values: A potentially ragged tensor with shape `[nvals, ...]`.
uniform_row_length: A scalar integer tensor. Must be nonnegative. The
size of the outer axis of `values` must be evenly divisible by
`uniform_row_length`.
nrows: The number of rows in the constructed RaggedTensor. If not
specified, then it defaults to `nvals/uniform_row_length` (or `0` if
`uniform_row_length==0`). `nrows` only needs to be specified if
`uniform_row_length` might be zero. `uniform_row_length*nrows` must be
`nvals`.
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
name: A name prefix for the RaggedTensor (optional).
Returns:
A `RaggedTensor` that corresponds with the python list defined by:
```python
result = [[values.pop(0) for i in range(uniform_row_length)]
for _ in range(nrows)]
```
`result.rank = values.rank + 1`.
`result.ragged_rank = values.ragged_rank + 1`.
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromUniformRowLength",
[values, uniform_row_length, nrows]):
values = _convert_to_ragged_tensor_values(values)
uniform_row_length = _convert_row_partition(
uniform_row_length, "UniformRowLength",
_get_optional_partition_dtype(values))
nvals = _nvals_uniform_row_length(values, uniform_row_length)
row_partition = RowPartition.from_uniform_row_length(
uniform_row_length=uniform_row_length,
nvals=nvals,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `value_rowids` tensors.
Equivalent to:
```python
result = flat_values
for (rowids, nrows) in reversed(zip(nested_value_rowids, nested_nrows)):
result = from_value_rowids(result, rowids, nrows)
```
Args:
flat_values: A potentially ragged tensor.
nested_value_rowids: A list of 1-D integer tensors. The `i`th tensor is
used as the `value_rowids` for the `i`th ragged dimension.
nested_nrows: A list of integer scalars. The `i`th scalar is used as the
`nrows` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_value_rowids` is empty).
Raises:
ValueError: If `len(nested_values_rowids) != len(nested_nrows)`.
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError(f"Argument `nested_value_rowids` must be a list of "
f"Tensors. Received {nested_value_rowids}.")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError(f"Argument `nested_nrows` must be a list of "
f"Tensors. Received {nested_nrows}.")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError(
f"Argument `nested_nrows` must have the same length as "
f"argument `nested_value_rowids`. len(nested_nrows) = "
f"{len(nested_nrows)} vs. len(nested_values_rowids) = "
f"{len(nested_value_rowids)}.")
with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] +
list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(
result, value_rowids, nrows, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_splits` tensors.
Equivalent to:
```python
result = flat_values
for row_splits in reversed(nested_row_splits):
result = from_row_splits(result, row_splits)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_splits: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_splits` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_splits` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError(f"Argument `nested_row_splits` must be a list of "
f"Tensors. Received {nested_row_splits}.")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of `row_lengths` tensors.
Equivalent to:
```python
result = flat_values
for row_lengths in reversed(nested_row_lengths):
result = from_row_lengths(result, row_lengths)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_lengths: A list of 1-D integer tensors. The `i`th tensor is
used as the `row_lengths` for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError(f"Argument `nested_row_lengths` must be a list of "
f"Tensors. Received {nested_row_lengths}.")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _from_nested_row_partitions(cls,
flat_values,
nested_row_partitions,
name=None,
validate=True):
"""Creates a `RaggedTensor` from a nested list of row partitions.
Equivalent to:
```python
result = flat_values
for row_partition in reversed(nested_row_partitions):
result = _from_row_partition(result, row_partition)
```
Args:
flat_values: A potentially ragged tensor.
nested_row_partitions: A list of row partitions. The `i`th element is
used as the row partition for the `i`th ragged dimension.
name: A name prefix for the RaggedTensor (optional).
validate: If true, then use assertions to check that the arguments form
a valid `RaggedTensor`. Note: these assertions incur a runtime cost,
since they must be checked for each tensor value.
Returns:
A `RaggedTensor` (or `flat_values` if `nested_row_lengths` is empty).
"""
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_partitions, RowPartition):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
if isinstance(nested_row_partitions, ops.Tensor):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
with ops.name_scope(name, "RaggedFromNestedRowPartitions",
[flat_values] + list(nested_row_partitions)):
result = flat_values
for partition in reversed(nested_row_partitions):
result = cls._from_row_partition(result, partition, validate=validate)
return result
@classmethod
def _convert_values_and_partition(cls, values, row_partition, name):
"""Converts `values` and `partition` to Tensors.
If `values` is a `RaggedTensor`, then converts `values` and `partition`
to have compatible row-partitioning dtypes. In particular, if any of the
row partitioning tensors are `int64`, then all of the other row
partitioning tensors wil be cast to `int64` (if auto_cast_partition_dtype()
is true) or an error will be raised (if auto_cast_partition_dtype() is
false).
Args:
values: The `values` for the `RaggedTensor` being constructed.
row_partition: A RowPartition object for the `RaggedTensor` being
constructed.
name: The name of the RowPartition object.
Returns:
A tuple (values, partition).
"""
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
if values._row_partition.dtype != row_partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
# pylint: disable=protected-access
# TODO(edloper): get rid of the `name` parameter.
raise ValueError(
f"Argument `row_partition` of RaggedTensor with name: {name} "
f"must have same dtype as Argument `values`. "
f"({row_partition.dtype} vs. {values._row_partition.dtype}).")
values = values.with_row_splits_dtype(row_partition.dtype)
else:
values = _convert_to_ragged_tensor_values(values)
return (values, row_partition)
#=============================================================================
# Accessors
#=============================================================================
@property
def dtype(self):
"""The `DType` of values in this tensor."""
return self._values.dtype
@property
def shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).shape
TensorShape([2, None])
>>> tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).shape
TensorShape([2, None, 2])
"""
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
def get_shape(self):
"""The statically known shape of this ragged tensor.
Returns:
A `TensorShape` containing the statically known shape of this ragged
tensor. Ragged dimensions have a size of `None`.
Alias for `shape` property.
Examples:
>>> tf.ragged.constant([[0], [1, 2]]).get_shape()
TensorShape([2, None])
>>> tf.ragged.constant(
... [[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1).get_shape()
TensorShape([2, None, 2])
"""
return self.shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> values.ragged_rank
1
>>> rt = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> rt.ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
"""The concatenated rows for this ragged tensor.
`rt.values` is a potentially ragged tensor formed by flattening the two
outermost dimensions of `rt` into a single dimension.
`rt.values.shape = [nvals] + rt.shape[2:]` (where `nvals` is the
number of items in the outer two dimensions of `rt`).
`rt.ragged_rank = self.ragged_rank - 1`
Returns:
A potentially ragged tensor.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
return self._values
@property
def _nested_row_partitions(self):
"""Returns the row partitions for this `RaggedTensor`."""
partitions = [self._row_partition]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
# pylint: disable=protected-access
partitions.append(rt_values._row_partition)
rt_values = rt_values.values
return tuple(partitions)
@property
def row_splits(self):
"""The row-split indices for this ragged tensor's `values`.
`rt.row_splits` specifies where the values for each row begin and end in
`rt.values`. In particular, the values for row `rt[i]` are stored in
the slice `rt.values[rt.row_splits[i]:rt.row_splits[i+1]]`.
Returns:
A 1-D integer `Tensor` with shape `[self.nrows+1]`.
The returned tensor is non-empty, and is sorted in ascending order.
`self.row_splits[0]` is zero, and `self.row_splits[-1]` is equal to
`self.values.shape[0]`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.row_splits) # indices of row splits in rt.values
tf.Tensor([0 4 4 7 8 8], shape=(6,), dtype=int64)
"""
return self._row_partition.row_splits()
@property
def uniform_row_length(self):
"""The length of each row in this ragged tensor, or None if rows are ragged.
>>> rt1 = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> print(rt1.uniform_row_length) # rows are ragged.
None
>>> rt2 = tf.RaggedTensor.from_uniform_row_length(
... values=rt1, uniform_row_length=2)
>>> print(rt2)
<tf.RaggedTensor [[[1, 2, 3], [4]], [[5, 6], [7, 8, 9, 10]]]>
>>> print(rt2.uniform_row_length) # rows are not ragged (all have size 2).
tf.Tensor(2, shape=(), dtype=int64)
A RaggedTensor's rows are only considered to be uniform (i.e. non-ragged)
if it can be determined statically (at graph construction time) that the
rows all have the same length.
Returns:
A scalar integer `Tensor`, specifying the length of every row in this
ragged tensor (for ragged tensors whose rows are uniform); or `None`
(for ragged tensors whose rows are ragged).
"""
return self._row_partition.uniform_row_length()
@property
def flat_values(self):
"""The innermost `values` tensor for this ragged tensor.
Concretely, if `rt.values` is a `Tensor`, then `rt.flat_values` is
`rt.values`; otherwise, `rt.flat_values` is `rt.values.flat_values`.
Conceptually, `flat_values` is the tensor formed by flattening the
outermost dimension and all of the ragged dimensions into a single
dimension.
`rt.flat_values.shape = [nvals] + rt.shape[rt.ragged_rank + 1:]`
(where `nvals` is the number of items in the flattened dimensions).
Returns:
A `Tensor`.
#### Example:
>>> rt = tf.ragged.constant([[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]])
>>> print(rt.flat_values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
"""
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
"""A tuple containing the row_splits for all ragged dimensions.
`rt.nested_row_splits` is a tuple containing the `row_splits` tensors for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_row_splits = (rt.row_splits,) + value_splits` where:
* `value_splits = ()` if `rt.values` is a `Tensor`.
* `value_splits = rt.values.nested_row_splits` otherwise.
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, splits in enumerate(rt.nested_row_splits):
... print('Splits for dimension %d: %s' % (i+1, splits.numpy()))
Splits for dimension 1: [0 3]
Splits for dimension 2: [0 3 3 5]
Splits for dimension 3: [0 4 4 7 8 8]
"""
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
"""Returns the row indices for the `values` in this ragged tensor.
`rt.value_rowids()` corresponds one-to-one with the outermost dimension of
`rt.values`, and specifies the row containing each value. In particular,
the row `rt[row]` consists of the values `rt.values[j]` where
`rt.value_rowids()[j] == row`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer `Tensor` with shape `self.values.shape[:1]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.value_rowids()) # corresponds 1:1 with rt.values
tf.Tensor([0 0 0 0 2 2 2 3], shape=(8,), dtype=int64)
"""
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return self._row_partition.value_rowids()
def nested_value_rowids(self, name=None):
"""Returns a tuple containing the value_rowids for all ragged dimensions.
`rt.nested_value_rowids` is a tuple containing the `value_rowids` tensors
for
all ragged dimensions in `rt`, ordered from outermost to innermost. In
particular, `rt.nested_value_rowids = (rt.value_rowids(),) + value_ids`
where:
* `value_ids = ()` if `rt.values` is a `Tensor`.
* `value_ids = rt.values.nested_value_rowids` otherwise.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensor`s.
#### Example:
>>> rt = tf.ragged.constant(
... [[[[3, 1, 4, 1], [], [5, 9, 2]], [], [[6], []]]])
>>> for i, ids in enumerate(rt.nested_value_rowids()):
... print('row ids for dimension %d: %s' % (i+1, ids.numpy()))
row ids for dimension 1: [0 0 0]
row ids for dimension 2: [0 0 0 2 2]
row ids for dimension 3: [0 0 0 0 2 2 2 3]
"""
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_ids.append(rt_values.value_rowids())
rt_values = rt_values.values
return tuple(rt_nested_ids)
def nrows(self, out_type=None, name=None):
"""Returns the number of rows in this ragged tensor.
I.e., the size of the outermost dimension of the tensor.
Args:
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
name: A name prefix for the returned tensor (optional).
Returns:
A scalar `Tensor` with dtype `out_type`.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.nrows()) # rt has 5 rows.
tf.Tensor(5, shape=(), dtype=int64)
"""
with ops.name_scope(name, "RaggedNRows", [self]):
if out_type is None:
return self._row_partition.nrows()
else:
return math_ops.cast(self._row_partition.nrows(), dtype=out_type)
def row_starts(self, name=None):
"""Returns the start indices for rows in this ragged tensor.
These indices specify where the values for each row begin in
`self.values`. `rt.row_starts()` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_starts()) # indices of row starts in rt.values
tf.Tensor([0 4 4 7 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self._row_partition.row_starts()
def row_limits(self, name=None):
"""Returns the limit indices for rows in this ragged tensor.
These indices specify where the values for each row end in
`self.values`. `rt.row_limits(self)` is equal to `rt.row_splits[:-1]`.
Args:
name: A name prefix for the returned tensor (optional).
Returns:
A 1-D integer Tensor with shape `[nrows]`.
The returned tensor is nonnegative, and is sorted in ascending order.
#### Example:
>>> rt = tf.ragged.constant([[3, 1, 4, 1], [], [5, 9, 2], [6], []])
>>> print(rt.values)
tf.Tensor([3 1 4 1 5 9 2 6], shape=(8,), dtype=int32)
>>> print(rt.row_limits()) # indices of row limits in rt.values
tf.Tensor([4 4 7 8 8], shape=(5,), dtype=int64)
"""
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self._row_partition.row_limits()
def row_lengths(self, axis=1, name=None):
"""Returns the lengths of the rows in this ragged tensor.
`rt.row_lengths()[i]` indicates the number of values in the
`i`th row of `rt`.
Args:
axis: An integer constant indicating the axis whose row lengths should be
returned.
name: A name prefix for the returned tensor (optional).
Returns:
A potentially ragged integer Tensor with shape `self.shape[:axis]`.
Raises:
ValueError: If `axis` is out of bounds.
#### Example:
>>> rt = tf.ragged.constant(
... [[[3, 1, 4], [1]], [], [[5, 9], [2]], [[6]], []])
>>> print(rt.row_lengths()) # lengths of rows in rt
tf.Tensor([2 0 2 1 0], shape=(5,), dtype=int64)
>>> print(rt.row_lengths(axis=2)) # lengths of axis=2 rows.
<tf.RaggedTensor [[3, 1], [], [2, 1], [1], []]>
"""
if axis == 0:
return self._row_partition.nrows()
if axis == 1:
return self._row_partition.row_lengths()
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = array_ops.get_positive_axis(
axis, self.shape.rank, ndims_name="rank(self)")
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
"""Returns a tuple containing the row_lengths for all ragged dimensions.
`rt.nested_row_lengths()` is a tuple containing the `row_lengths` tensors
for all ragged dimensions in `rt`, ordered from outermost to innermost.
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A `tuple` of 1-D integer `Tensors`. The length of the tuple is equal to
`self.ragged_rank`.
"""
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
"""Returns the tight bounding box shape for this `RaggedTensor`.
Args:
axis: An integer scalar or vector indicating which axes to return the
bounding box for. If not specified, then the full bounding box is
returned.
name: A name prefix for the returned tensor (optional).
out_type: `dtype` for the returned tensor. Defaults to
`self.row_splits.dtype`.
Returns:
An integer `Tensor` (`dtype=self.row_splits.dtype`). If `axis` is not
specified, then `output` is a vector with
`output.shape=[self.shape.ndims]`. If `axis` is a scalar, then the
`output` is a scalar. If `axis` is a vector, then `output` is a vector,
where `output[i]` is the bounding size for dimension `axis[i]`.
#### Example:
>>> rt = tf.ragged.constant([[1, 2, 3, 4], [5], [], [6, 7, 8, 9], [10]])
>>> rt.bounding_shape().numpy()
array([5, 4])
"""
if out_type is None:
out_type = self._row_partition.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
# Optimized special cases for when axis=0 or axis=1:
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
result = math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
if out_type != self._row_partition.dtype:
result = math_ops.cast(result, out_type)
return result
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = [splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
]
inner_dimensions = flat_values_shape[1:]
if out_type != self._row_partition.dtype:
ragged_dimensions = [
math_ops.cast(d, out_type) for d in ragged_dimensions
]
bbox = array_ops.concat(
[array_ops.stack(ragged_dimensions), inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
#=============================================================================
# Transformation
#=============================================================================
def with_values(self, new_values):
"""Returns a copy of `self` with `values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor to use as the `values` for the
returned `RaggedTensor`. Must have `rank > 0`, and must have the same
number of rows as `self.values`.
Returns:
A `RaggedTensor`. `result.rank = 1 + new_values.rank`.
`result.ragged_rank = 1 + new_values.ragged_rank`
"""
new_values = _convert_to_ragged_tensor_values(new_values)
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_partition.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
values=new_values, row_partition=self._row_partition, internal=True)
def with_flat_values(self, new_values):
"""Returns a copy of `self` with `flat_values` replaced by `new_value`.
Preserves cached row-partitioning tensors such as `self.cached_nrows` and
`self.cached_value_rowids` if they have values.
Args:
new_values: Potentially ragged tensor that should replace
`self.flat_values`. Must have `rank > 0`, and must have the same number
of rows as `self.flat_values`.
Returns:
A `RaggedTensor`.
`result.rank = self.ragged_rank + new_values.rank`.
`result.ragged_rank = self.ragged_rank + new_values.ragged_rank`.
"""
if isinstance(self._values, RaggedTensor):
return self.with_values(self.values.with_flat_values(new_values))
else:
new_values = _convert_to_ragged_tensor_values(new_values)
return self.with_values(new_values)
def with_row_splits_dtype(self, dtype):
"""Returns a copy of this RaggedTensor with the given `row_splits` dtype.
For RaggedTensors with multiple ragged dimensions, the `row_splits` for all
nested `RaggedTensor` objects are cast to the given dtype.
Args:
dtype: The dtype for `row_splits`. One of `tf.int32` or `tf.int64`.
Returns:
A copy of this RaggedTensor, with the `row_splits` cast to the given
type.
"""
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError(f"Argument `row_splits` dtype must be int32 or int64. "
f"Received {dtype}.")
if self._row_partition.dtype == dtype:
return self
current_values = self._values
if isinstance(current_values, RaggedTensor):
return RaggedTensor(
values=current_values.with_row_splits_dtype(dtype),
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
else:
return RaggedTensor(
values=current_values,
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
def merge_dims(self, outer_axis, inner_axis):
"""Merges outer_axis...inner_axis into a single dimension.
Returns a copy of this RaggedTensor with the specified range of dimensions
flattened into a single dimension, with elements in row-major order.
#### Examples:
>>> rt = tf.ragged.constant([[[1, 2], [3]], [[4, 5, 6]]])
>>> print(rt.merge_dims(0, 1))
<tf.RaggedTensor [[1, 2], [3], [4, 5, 6]]>
>>> print(rt.merge_dims(1, 2))
<tf.RaggedTensor [[1, 2, 3], [4, 5, 6]]>
>>> print(rt.merge_dims(0, 2))
tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32)
To mimic the behavior of `np.flatten` (which flattens all dimensions), use
`rt.merge_dims(0, -1). To mimic the behavior of `tf.layers.Flatten` (which
flattens all dimensions except the outermost batch dimension), use
`rt.merge_dims(1, -1)`.
Args:
outer_axis: `int`: The first dimension in the range of dimensions to
merge. May be negative if `self.shape.rank` is statically known.
inner_axis: `int`: The last dimension in the range of dimensions to merge.
May be negative if `self.shape.rank` is statically known.
Returns:
A copy of this tensor, with the specified dimensions merged into a
single dimension. The shape of the returned tensor will be
`self.shape[:outer_axis] + [N] + self.shape[inner_axis + 1:]`, where `N`
is the total number of slices in the merged dimensions.
"""
outer_axis = array_ops.get_positive_axis(
outer_axis,
self.shape.rank,
axis_name="outer_axis",
ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis,
self.shape.rank,
axis_name="inner_axis",
ndims_name="rank(self)")
if not outer_axis <= inner_axis:
raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or "
f"equal to inner_axis ({inner_axis}).")
return merge_dims(self, outer_axis, inner_axis)
def _set_shape(self, shape):
"""Updates the static shape of `self` to be `shape`.
* If a dimension of `shape` has known rank, and is encoded via
partitioning, then this will update the corresponding partition to
define `_uniform_row_length` and `nrows`.
* If a dimension of `shape` has a known rank, and is encoded as one
of the `flat_values` dimensions, then `flat_values.set_shape()` will
be used to update its shape.
Warning: Using this method to assert an incorrect shape for a RaggedTensor
(i.e., one that's not consistent with its actual shape) can cause
segmentation faults and very difficult-to-diagnose behavior. Only use this
method if you are certain that the shape is correct.
Args:
shape: `tf.TensorShape` specifying the shape for this `RaggedTensor`.
"""
# TODO(edloper): Refactor this to not directly access private members
# of RowPartition.
# pylint: disable=protected-access
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
return # Nothing to do.
shape = shape.as_list()
# Outermost dimension
if shape[0] is not None:
self._row_partition._row_splits.set_shape(shape[0] + 1)
# Partitioned dimensions
dtype = self._row_partition.dtype
for i, partition in enumerate(self._nested_row_partitions):
size = shape[i + 1]
if size is not None:
if partition._uniform_row_length is not None:
old_row_length = tensor_util.constant_value(
partition._uniform_row_length)
if old_row_length is not None:
if size == old_row_length:
continue # already have shape info for this axis.
else:
raise ValueError(f"Inconsistent size for axis {i + 1}: "
f"{old_row_length} vs. {size}.")
partition._uniform_row_length = ops.convert_to_tensor(size, dtype)
if partition._nrows is None:
partition._nrows = array_ops.size(
partition._row_splits, out_type=dtype) - 1
# self.flat_values could be a CompositeTensor and doesn't have set_shape.
if hasattr(self.flat_values, "set_shape"):
# Inner dimensions
flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])
self.flat_values.set_shape(flat_shape)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
@dispatch.add_dispatch_support
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
"""Converts a `tf.Tensor` into a `RaggedTensor`.
The set of absent/default values may be specified using a vector of lengths
or a padding value (but not both). If `lengths` is specified, then the
output tensor will satisfy `output[row] = tensor[row][:lengths[row]]`. If
'lengths' is a list of lists or tuple of lists, those lists will be used
as nested row lengths. If `padding` is specified, then any row *suffix*
consisting entirely of `padding` will be excluded from the returned
`RaggedTensor`. If neither `lengths` nor `padding` is specified, then the
returned `RaggedTensor` will have no absent/default values.
Examples:
>>> dt = tf.constant([[5, 7, 0], [0, 3, 0], [6, 0, 0]])
>>> tf.RaggedTensor.from_tensor(dt)
<tf.RaggedTensor [[5, 7, 0], [0, 3, 0], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, lengths=[1, 0, 3])
<tf.RaggedTensor [[5], [], [6, 0, 0]]>
>>> tf.RaggedTensor.from_tensor(dt, padding=0)
<tf.RaggedTensor [[5, 7], [0, 3], [6]]>
>>> dt = tf.constant([[[5, 0], [7, 0], [0, 0]],
... [[0, 0], [3, 0], [0, 0]],
... [[6, 0], [0, 0], [0, 0]]])
>>> tf.RaggedTensor.from_tensor(dt, lengths=([2, 0, 3], [1, 1, 2, 0, 1]))
<tf.RaggedTensor [[[5], [7]], [], [[6, 0], [], [0]]]>
Args:
tensor: The `Tensor` to convert. Must have rank `ragged_rank + 1` or
higher.
lengths: An optional set of row lengths, specified using a 1-D integer
`Tensor` whose length is equal to `tensor.shape[0]` (the number of rows
in `tensor`). If specified, then `output[row]` will contain
`tensor[row][:lengths[row]]`. Negative lengths are treated as zero. You
may optionally pass a list or tuple of lengths to this argument, which
will be used as nested row lengths to construct a ragged tensor with
multiple ragged dimensions.
padding: An optional padding value. If specified, then any row suffix
consisting entirely of `padding` will be excluded from the returned
RaggedTensor. `padding` is a `Tensor` with the same dtype as `tensor`
and with `shape=tensor.shape[ragged_rank + 1:]`.
ragged_rank: Integer specifying the ragged rank for the returned
`RaggedTensor`. Must be greater than zero.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the specified `ragged_rank`. The shape of the
returned ragged tensor is compatible with the shape of `tensor`.
Raises:
ValueError: If both `lengths` and `padding` are specified.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify argument `lengths` or `padding`, but not both.")
if not isinstance(ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Received {ragged_rank}.")
if ragged_rank <= 0:
raise ValueError(f"Argument `ragged_rank` must be greater than 0. "
f"Received {ragged_rank}.")
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle nested row lengths.
if (lengths is not None and isinstance(lengths, (list, tuple)) and
len(lengths) and not isinstance(lengths[0], (int, float))):
if ragged_rank not in (1, len(lengths)):
# Note: we accept `ragged_rank=1` here because it's the default value;
# i.e., if the user passes in a tuple of lengths, but doesn't specify
# ragged_rank, then we should use that tuple to determine ragged_rank.
# We only want to complain if they pass in an explicit ragged_rank
# that doesn't match len(lengths).
raise ValueError(f"If Argument `lengths` is a tuple of row_lengths, "
f"argument `ragged_rank` must be "
f"len(lengths): {len(lengths)}. Received "
f"ragged_rank: {ragged_rank}.")
# Rather than reconstructing the tensor mask directly, we can
# recreate it as a boolean RaggedTensor, then densify that and use
# that as the mask to clear out the unused data in the passed tensor.
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(masked_data, lengths, validate=False)
# Handle ragged_rank>1 via recursion:
# If the output should have multiple ragged dimensions, then first
# flatten the tensor to eliminate all but the last ragged dimension,
# and recursively convert that flattened tensor. Then add on the splits
# for the dimensions that we flattened out.
if ragged_rank > 1:
if tensor.shape.is_fully_defined():
input_shape = tensor.shape.as_list()
# The total number of elements in each dimension. E.g., if
# input_shape=[3, 4, 5, 6], then dim[2] has 3*4*5 elements in total.
dim_size = np.cumprod(input_shape)
new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]
else:
dim_size = math_ops.cumprod(input_shape)
new_shape = array_ops.concat(
[[dim_size[ragged_rank - 1]], input_shape[ragged_rank:]], axis=0)
flattened = array_ops.reshape(tensor, new_shape)
result = cls.from_tensor(
flattened, lengths, padding, row_splits_dtype=row_splits_dtype)
for axis in range(ragged_rank - 1, 0, -1):
dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value
if dim_len is None:
dim_len = input_shape[axis]
else:
dim_len = constant_op.constant(dim_len, row_splits_dtype)
result = RaggedTensor.from_uniform_row_length(
values=result,
uniform_row_length=dim_len,
nrows=dim_size[axis - 1],
validate=False)
return result
# If padding was specified, then use it to find row lengths.
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
# Find places where the padding is equal to the tensor. (This will
# broadcast `padding` across the outermost 2 dimensions of `tensor`,
# so `has_default_value.shape = tensor.shape`.)
has_default_value = math_ops.equal(padding, tensor)
# If the padding isn't a scalar, then require that all values in the
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
# use reduce_all for both cases, becaue when you pass an empty `axis`
# list to reduce_all, it reduces all axes; but we want it to reduce no
# axes -- i.e., to be a no-op.)
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
# Use has_default to find the length of each row: for each
# non-default item in a row, calculate the length that the row needs to
# have to include that item; and then take the max of those values
# (across each row).
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault *
array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
# If we have lengths (either directly supplied, or computed from
# paddings), then use those to construct splits; and then use masking
# to get the corresponding values.
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
# If neither padding nor lengths were specified, then create a splits
# vector that contains no default values, and reshape the input tensor
# to form the values for the RaggedTensor.
values_shape = array_ops.concat(
[[input_shape[0] * input_shape[1]], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value
const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value
if const_nrows is not None:
nrows = constant_op.constant(const_nrows, row_splits_dtype)
else:
nrows = input_shape[0]
if const_ncols is not None:
ncols = constant_op.constant(const_ncols, row_splits_dtype)
else:
ncols = input_shape[1]
return RaggedTensor.from_uniform_row_length(
values=values, uniform_row_length=ncols, nrows=nrows, validate=False)
def to_tensor(self, default_value=None, name=None, shape=None):
"""Converts this `RaggedTensor` into a `tf.Tensor`.
If `shape` is specified, then the result is padded and/or truncated to
the specified shape.
Examples:
>>> rt = tf.ragged.constant([[9, 8, 7], [], [6, 5], [4]])
>>> print(rt.to_tensor())
tf.Tensor(
[[9 8 7] [0 0 0] [6 5 0] [4 0 0]], shape=(4, 3), dtype=int32)
>>> print(rt.to_tensor(shape=[5, 2]))
tf.Tensor(
[[9 8] [0 0] [6 5] [4 0] [0 0]], shape=(5, 2), dtype=int32)
Args:
default_value: Value to set for indices not specified in `self`. Defaults
to zero. `default_value` must be broadcastable to
`self.shape[self.ragged_rank + 1:]`.
name: A name prefix for the returned tensors (optional).
shape: The shape of the resulting dense tensor. In particular,
`result.shape[i]` is `shape[i]` (if `shape[i]` is not None), or
`self.bounding_shape(i)` (otherwise).`shape.rank` must be `None` or
equal to `self.rank`.
Returns:
A `Tensor` with shape `ragged.bounding_shape(self)` and the
values specified by the non-empty values in `self`. Empty values are
assigned `default_value`.
"""
with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)
row_partition_types = [x[0] for x in type_tensor_pairs]
row_partition_tensors = [x[1] for x in type_tensor_pairs]
if default_value is None:
default_value = array_ops.zeros((), self.dtype)
if (isinstance(shape, (list, tuple)) and
any(isinstance(v, ops.Tensor) for v in shape) and
all(isinstance(v, (int, ops.Tensor)) for v in shape)):
shape = array_ops.stack(shape)
shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)
tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=shape_tensor,
values=self.flat_values,
default_value=default_value,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors)
ragged_shape = self.shape
if ragged_shape.rank is not None and not isinstance(shape, ops.Tensor):
# Merged self.shape and shape, favoring the second one as it takes
# into account potential padding added to the output.
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
output_shape = ragged_shape
else:
# At this point we can assume that hshape.rank == ragged_shape.rank
# because otherwise it would have failed earlier.
output_shape = [
s1 if s1 is not None else s2
for (s1, s2) in zip(shape.as_list(), ragged_shape.as_list())
]
tensor.set_shape(output_shape)
return tensor
@classmethod
@dispatch.add_dispatch_support
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
"""Converts a 2D `tf.sparse.SparseTensor` to a `RaggedTensor`.
Each row of the `output` `RaggedTensor` will contain the explicit values
from the same row in `st_input`. `st_input` must be ragged-right. If not
it is not ragged-right, then an error will be generated.
Example:
>>> indices = [[0, 0], [0, 1], [0, 2], [1, 0], [3, 0]]
>>> st = tf.sparse.SparseTensor(indices=indices,
... values=[1, 2, 3, 4, 5],
... dense_shape=[4, 3])
>>> tf.RaggedTensor.from_sparse(st).to_list()
[[1, 2, 3], [4], [], [5]]
Currently, only two-dimensional `SparseTensors` are supported.
Args:
st_input: The sparse tensor to convert. Must have rank 2.
name: A name prefix for the returned tensors (optional).
row_splits_dtype: `dtype` for the returned `RaggedTensor`'s `row_splits`
tensor. One of `tf.int32` or `tf.int64`.
Returns:
A `RaggedTensor` with the same values as `st_input`.
`output.ragged_rank = rank(st_input) - 1`.
`output.shape = [st_input.dense_shape[0], None]`.
Raises:
ValueError: If the number of dimensions in `st_input` is not known
statically, or is not two.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError(f"Argument `st_input` must be of type SparseTensor, but "
f"is of type {type(st_input).__name__}.")
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2.")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
# Treat sparse row indices as segment ids to generate a splits tensor
# thta we can pair with the sparse tensor values. (Ignore sparse column
# indices.)
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
"""Converts this `RaggedTensor` into a `tf.sparse.SparseTensor`.
Example:
>>> rt = tf.ragged.constant([[1, 2, 3], [4], [], [5, 6]])
>>> print(rt.to_sparse())
SparseTensor(indices=tf.Tensor(
[[0 0] [0 1] [0 2] [1 0] [3 0] [3 1]],
shape=(6, 2), dtype=int64),
values=tf.Tensor([1 2 3 4 5 6], shape=(6,), dtype=int32),
dense_shape=tf.Tensor([4 3], shape=(2,), dtype=int64))
Args:
name: A name prefix for the returned tensors (optional).
Returns:
A SparseTensor with the same values as `self`.
"""
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
row_splits_dtype=dtypes.int64,
name=None):
"""Converts a `variant` Tensor into a `RaggedTensor`.
The input `variant` could be a scalar, meaning it encodes a single
`RaggedTensor` with ragged_rank `output_ragged_rank`. Alternatively it could
have an arbitrary rank, in which case each element is decoded into a
`RaggedTensor` with ragged_rank `input_ragged_rank` and these are then
stacked according to the input shape to output a single `RaggedTensor`
with ragged_rank `output_ragged_rank`. If `input_ragged_rank` is not
provided, it is inferred dynamically as `output_ragged_rank` -
`rank(variant)`. If `input_ragged_rank` is provided, the following must be
true: `output_ragged_rank` = `input_ragged_rank` + `rank(variant)`.
Example:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> et = rt._to_variant()
>>> stacked_et = tf.stack([et, et])
>>> tf.RaggedTensor._from_variant( # scalar input.
... et, dtype=tf.int32, output_ragged_rank=1).to_list()
[[0], [1, 2]]
>>> tf.RaggedTensor._from_variant( # batched input.
... stacked_et, dtype=tf.int32, output_ragged_rank=2).to_list()
[[[0], [1, 2]], [[0], [1, 2]]]
Args:
variant: A `variant` Tensor representing an encoded (possibly
nested-batched) `RaggedTensor`.
dtype: The dtype of the encoded `RaggedTensor`.
output_ragged_rank: The expected ragged rank of the output `RaggedTensor`.
input_ragged_rank: The ragged rank of each encoded `RaggedTensor`. This is
optional and inferred dynamically if not provided.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
name: A name prefix for the returned tensors (optional).
Returns:
A `RaggedTensor` of dtype `dtype` and ragged rank `output_ragged_rank`.
Raises:
ValueError: If the input rank is known, `input_ragged_rank` is provided
and `output_ragged_rank` = `input_ragged_rank` + `rank(variant)` does
not hold.
"""
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
f"Argument `output_ragged_rank` ({output_ragged_rank}) must be equal "
f"to `input_ragged_rank` + `variant.shape.ndims` "
f"({input_ragged_rank} + {variant.shape.ndims}).")
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype,
row_splits_dtype, name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
"""Converts this `RaggedTensor` into a `variant` Tensor.
If `batched_input` is `True`, then the `RaggedTensor` is unbatched along the
zero-th dimension, each component `RaggedTensor` is encoded into a scalar
`variant` Tensor, and these are stacked to return a 1-D `variant` Tensor.
If `batched_input` is `False`, then the `RaggedTensor` is encoded as is and
a scalar `variant` Tensor is returned.
Example:
>>> rt = tf.ragged.constant([[[0]], [[1]], [[2]]])
>>> rt._to_variant().shape.as_list()
[]
>>> rt._to_variant(batched_input=True).shape.as_list()
[3]
Args:
batched_input: If `True`, the `RaggedTensor` is unbatched and converted to
a `variant` vector. Set to `False` by default.
name: A name prefix for the returned tensors (optional).
Returns:
A `variant` Tensor that encodes this `RaggedTensor`.
"""
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
#=============================================================================
# String Encoding
#=============================================================================
def __repr__(self):
if self._is_eager():
# The np.array2string in _formatter provides a separator argument, but
# doesn't handle recursive calls correctly. The np.printoptions handles
# recursive calls correctly, but doesn't provide a separator argument.
# Combines them together to print elements separated by comma, while
# avoiding the redundant array prefixes and dtypes. For example,
# the value of tf.ragged.constant([[1, 2], [3, 4]]) will look like
#
# [[1, 2],
# [3, 4]]
with np.printoptions(formatter={"all": _formatter}):
value_text = _formatter(self.numpy())
return f"<tf.RaggedTensor {value_text}>"
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self.values,
self.row_splits)
#=============================================================================
# Eager Execution Mode
#=============================================================================
def numpy(self):
"""Returns a numpy `array` with the values for this `RaggedTensor`.
Requires that this `RaggedTensor` was constructed in eager execution mode.
Ragged dimensions are encoded using numpy `arrays` with `dtype=object` and
`rank=1`, where each element is a single row.
#### Examples
In the following example, the value returned by `RaggedTensor.numpy()`
contains three numpy `array` objects: one for each row (with `rank=1` and
`dtype=int64`), and one to combine them (with `rank=1` and `dtype=object`):
>>> tf.ragged.constant([[1, 2, 3], [4, 5]], dtype=tf.int64).numpy()
array([array([1, 2, 3]), array([4, 5])], dtype=object)
Uniform dimensions are encoded using multidimensional numpy `array`s. In
the following example, the value returned by `RaggedTensor.numpy()` contains
a single numpy `array` object, with `rank=2` and `dtype=int64`:
>>> tf.ragged.constant([[1, 2, 3], [4, 5, 6]], dtype=tf.int64).numpy()
array([[1, 2, 3], [4, 5, 6]])
Returns:
A numpy `array`.
"""
if not self._is_eager():
raise ValueError("RaggedTensor.numpy() is only supported in eager mode.")
values = self.values.numpy()
splits = self.row_splits.numpy()
rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
if not rows:
return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)
# Note: if `rows` have ragged lengths, then they will be stored in a
# np.ndarray with dtype=object and rank=1. If they have uniform lengths,
# they will be combined into a single np.ndarray with dtype=row.dtype and
# rank=row.rank+1.
#
# Manually set dtype as numpy now complains when given ragged rows.
has_variable_length_rows = any(len(row) != len(rows[0]) for row in rows)
dtype = np.object_ if has_variable_length_rows else None
return np.array(rows, dtype=dtype)
def to_list(self):
"""Returns a nested Python `list` with the values for this `RaggedTensor`.
Requires that `rt` was constructed in eager execution mode.
Returns:
A nested Python `list`.
"""
if not isinstance(self.row_splits, ops.EagerTensor):
raise ValueError("to_list can only be used in eager mode.")
row_splits = self.row_splits.numpy().tolist()
values = self.values
if isinstance(values, RaggedTensor):
return [
values[row_splits[i]:row_splits[i + 1]].to_list()
for i in range(len(row_splits) - 1)
]
else:
# Convert values to a Python list.
if hasattr(values, "numpy"):
values_as_list = values.numpy().tolist()
elif hasattr(values, "to_list"):
values_as_list = values.to_list()
else:
raise ValueError("values must be convertible to a list")
return [
values_as_list[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)
]
def _eager_value(self):
"""Returns a RaggedTensorValue for self. Requires self._is_eager()=true."""
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
"""Returns True if values & row_splits Tensors are all `EagerTensor`s."""
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
#=============================================================================
# Operators
#=============================================================================
# To avoid circular dependencies, we define stub methods for operators here,
# and then override them when the ragged_operators module is imported.
def _overloaded_operator(name): # pylint: disable=no-self-argument
def stub(*args, **kwargs):
del args, kwargs
raise ValueError(
f"You must import 'tensorflow.python.ops.ragged.ragged_ops' "
f"before using RaggedTensor.{name}.")
return stub
__getitem__ = _overloaded_operator("__getitem__")
__ge__ = _overloaded_operator("__ge__")
__gt__ = _overloaded_operator("__gt__")
__le__ = _overloaded_operator("__le__")
__lt__ = _overloaded_operator("__lt__")
__and__ = _overloaded_operator("__and__")
__rand__ = _overloaded_operator("__rand__")
__invert__ = _overloaded_operator("__invert__")
__ror__ = _overloaded_operator("__ror__")
__or__ = _overloaded_operator("__or__")
__xor__ = _overloaded_operator("__xor__")
__rxor__ = _overloaded_operator("__rxor__")
__abs__ = _overloaded_operator("__abs__")
__add__ = _overloaded_operator("__add__")
__radd__ = _overloaded_operator("__radd__")
__div__ = _overloaded_operator("__div__")
__rdiv__ = _overloaded_operator("__rdiv__")
__floordiv__ = _overloaded_operator("__floordiv__")
__rfloordiv__ = _overloaded_operator("__rfloordiv__")
__mod__ = _overloaded_operator("__mod__")
__rmod__ = _overloaded_operator("__rmod__")
__mul__ = _overloaded_operator("__mul__")
__rmul__ = _overloaded_operator("__rmul__")
__neg__ = _overloaded_operator("__neg__")
__pow__ = _overloaded_operator("__pow__")
__rpow__ = _overloaded_operator("__rpow__")
__sub__ = _overloaded_operator("__sub__")
__rsub__ = _overloaded_operator("__rsub__")
__truediv__ = _overloaded_operator("__truediv__")
__rtruediv__ = _overloaded_operator("__rtruediv__")
del _overloaded_operator
#=============================================================================
# Name Scope
#=============================================================================
# This private function is used by ops.name_scope to ensure that all of the
# input tensors for the scope belong to the same graph. Defining this means
# that you may include `RaggedTensor` objects in the name_scope `values`
# list.
def _as_graph_element(self):
"""Convert `self` to a graph element."""
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
#=============================================================================
# Composite Tensor
#=============================================================================
@property
def _type_spec(self):
return RaggedTensorSpec.from_value(self)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
def is_ragged(value):
"""Returns true if `value` is a ragged tensor or ragged tensor value."""
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
"""Return a copy of `tensors` with row_splits all having the same dtype.
Args:
*tensors: A list of Tensors or RaggedTensors.
**kwargs: If 'return_dtype=True', then return a tuple (dtype, tensors),
where `dtype` is the data type used by row-splits, and `tensors` is the
converted list of `Tensors` and `RaggedTensors`.
Returns:
The converted list of `Tensors` and `RaggedTensors`.
"""
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError(f"Unexpected keyword args {kwargs}.")
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(
t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor
) else t
for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
#===============================================================================
# RaggedTensorSpec
#===============================================================================
@tf_export("RaggedTensorSpec")
@type_spec.register("tf.RaggedTensorSpec")
class RaggedTensorSpec(type_spec.BatchableTypeSpec):
"""Type specification for a `tf.RaggedTensor`."""
__slots__ = [
"_shape", "_dtype", "_ragged_rank", "_row_splits_dtype",
"_flat_values_spec"
]
@property
def dtype(self):
"""The `tf.dtypes.DType` specified by this type for the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([["a"], ["b", "c"]], dtype=tf.string)
>>> tf.type_spec_from_value(rt).dtype
tf.string
Returns:
A `tf.dtypes.DType` of the values in the RaggedTensor.
"""
return self._dtype
@property
def shape(self):
"""The statically known shape of the RaggedTensor.
Examples:
>>> rt = tf.ragged.constant([[0], [1, 2]])
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None])
>>> rt = tf.ragged.constant([[[0, 1]], [[1, 2], [3, 4]]], ragged_rank=1)
>>> tf.type_spec_from_value(rt).shape
TensorShape([2, None, 2])
Returns:
A `tf.TensorShape` containing the statically known shape of the
RaggedTensor. Ragged dimensions have a size of `None`.
"""
return self._shape
@property
def ragged_rank(self):
"""The number of times the RaggedTensor's flat_values is partitioned.
Defaults to `shape.ndims - 1`.
Examples:
>>> values = tf.ragged.constant([[1, 2, 3], [4], [5, 6], [7, 8, 9, 10]])
>>> tf.type_spec_from_value(values).ragged_rank
1
>>> rt1 = tf.RaggedTensor.from_uniform_row_length(values, 2)
>>> tf.type_spec_from_value(rt1).ragged_rank
2
Returns:
A Python `int` indicating the number of times the underlying `flat_values`
Tensor has been partitioned to add a new dimension.
I.e., `tf.rank(rt) = tf.rank(rt.flat_values) + rt.ragged_rank`.
"""
return self._ragged_rank
@property
def row_splits_dtype(self):
"""The `tf.dtypes.DType` of the RaggedTensor's `row_splits`.
Examples:
>>> rt = tf.ragged.constant([[1, 2, 3], [4]], row_splits_dtype=tf.int64)
>>> tf.type_spec_from_value(rt).row_splits_dtype
tf.int64
Returns:
A `tf.dtypes.DType` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
"""
return self._row_splits_dtype
@property
def flat_values_spec(self):
"""The `TypeSpec` of the flat_values of RaggedTensor.
Returns:
- The TypeSpec of flat_values.
- None when the flat_values is a Tensor.
"""
return self._flat_values_spec
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else ops.Tensor
def __init__(self,
shape=None,
dtype=dtypes.float32,
ragged_rank=None,
row_splits_dtype=dtypes.int64,
flat_values_spec=None):
"""Constructs a type specification for a `tf.RaggedTensor`.
Args:
shape: The shape of the RaggedTensor, or `None` to allow any shape. If a
shape is specified, then all ragged dimensions must have size `None`.
dtype: `tf.DType` of values in the RaggedTensor.
ragged_rank: Python integer, the number of times the RaggedTensor's
flat_values is partitioned. Defaults to `shape.ndims - 1`.
row_splits_dtype: `dtype` for the RaggedTensor's `row_splits` tensor. One
of `tf.int32` or `tf.int64`.
flat_values_spec: TypeSpec for flat_value of the RaggedTensor. It shall be
provided when the flat_values is a CompositeTensor rather then Tensor.
If both `dtype` and `flat_values_spec` and are provided, `dtype` must
be the same as `flat_values_spec.dtype`. (experimental)
"""
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError("dtype must be the same as flat_values_spec.dtype")
elif dtype is None:
raise ValueError(
"At least one of dtype or flat_values_spec must be provided")
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Recieved {ragged_rank}.")
if rank is not None:
if ragged_rank >= rank:
raise ValueError(f"Argument `ragged_rank` ({ragged_rank}) must be less "
f"than rank ({rank}).")
def is_compatible_with(self, spec_or_value):
# RaggedTensor with ragged_rank 0 can be compatible with raw flat_values.
if self._ragged_rank == 0:
if self._flat_values_spec is None:
if isinstance(spec_or_value, (ops.Tensor, tensor_spec.TensorSpec)):
return tensor_spec.TensorSpec(
self._shape, self._dtype).is_compatible_with(spec_or_value)
elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):
return self._flat_values_spec.is_compatible_with(spec_or_value)
return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)
def _serialize(self):
if self._flat_values_spec is None:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype)
else:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype, self._flat_values_spec)
@property
def _component_specs(self):
if self._ragged_rank == 0:
if self._flat_values_spec is not None:
return [self._flat_values_spec]
else:
return [tensor_spec.TensorSpec(self._shape, self._dtype)]
flat_values_spec = self._flat_values_spec
if flat_values_spec is None:
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
flat_values_spec = tensor_spec.TensorSpec(flat_values_shape, self._dtype)
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)
specs = ([
flat_values_spec,
tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)
] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
result = tensor_list[0]
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
for row_splits in reversed(tensor_list[1:]):
result = ragged_tensor_value.RaggedTensorValue(result, row_splits)
else:
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(
result,
RowPartition.from_row_splits(row_splits, validate=False),
internal=True)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape) # pylint: disable=protected-access
# TODO(xjun): MaskedTensor doesn't implement set_shape.
if self.flat_values_spec is not None and hasattr(result.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
elif isinstance(result, ops.Tensor):
result.set_shape(self._shape)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# TODO(edloper): Update gen_ragged_conversion_ops that convert to and
# from variant to include all of the row-partitioning tensors.
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError(f"Ragged rank of value {ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
if ragged_rank == 0:
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
(), value, batched_input=False)
]
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
def _to_batched_tensor_list(self, value):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError(f"Ragged rank of value {ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
if ragged_rank == 0:
# TODO(b/141789000) Update this to handle ragged_rank=0.
raise ValueError(
"_to_batched_tensor_list doesn't support ragged_rank=0 yet")
# pylint: disable=protected-access
return [value._to_variant(batched_input=True)]
def _from_compatible_tensor_list(self, tensor_list):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
if self._ragged_rank < 0:
raise ValueError(f"Argument `ragged_rank` must be non-negative. "
f"Received {self._ragged_rank}.")
result = RaggedTensor._from_variant( # pylint: disable=protected-access
tensor_list[0],
dtype=self._dtype,
row_splits_dtype=self._row_splits_dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape) # pylint: disable=protected-access
# TODO(xjun): MaskedTensor doesn't implement set_shape.
if self.flat_values_spec is not None and hasattr(self.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
else:
result.set_shape(self._shape)
return result
def _batch(self, batch_size):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype, self._ragged_rank + 1, self._row_splits_dtype)
def _unbatch(self):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
# Note: Negative ragged_rank is allowed here because the dataset could be
# subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is
# consistent. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,
self._row_splits_dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or
isinstance(value.flat_values, ops.Tensor)):
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
else:
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype,
flat_values_spec=type_spec.type_spec_from_value(value.flat_values))
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
"""Converts value to a `RaggedTensor` or `Tensor`.
* If `value` is a `RaggedTensor`, then return it as-is.
* If `value` is a `RaggedTensorValue`, return a corresponding constant
`RaggedTensor`.
* Otherwise, use `convert_to_tensor` to convert `value` to a `Tensor`.
Args:
value: A `RaggedTensor`, a `RaggedTensorValue`, or an object whose type has
a registered `Tensor` conversion function.
dtype: Optional element type for the returned tensor. If missing the type
is inferred from the type of `value`.
preferred_dtype: Optional element type for the returned tensor, used when
dtype is None. This argument has no effect if `value` is already a
tensor, or when conversion is not possible.
name: Optional name to use if a new `Tensor` is created.
Returns:
A `Tensor` or `RaggedTensor`.
"""
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(f"Tensor conversion requested dtype {dtype.name} for "
f"RaggedTensor with dtype {value.dtype.name}: {value}.")
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
dtype_hint=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor_v2_with_dispatch(
value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)
def _convert_to_ragged_tensor_values(value):
"""Converts value to supported RaggedTensor value.
* If `value` is an object of supported value type, then return it as-is.
* Otherwise convert it to Tensor or RaggedTensor.
Args:
value: An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types, or an object whose type has a registered `Tensor` conversion
function.
Returns:
An object of `Tensor`, `RaggedTensor` or registerred RaggedTensor
value types
"""
if _is_supported_ragged_values_type(value):
return value
else:
return convert_to_tensor_or_ragged_tensor(value, name="values")
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType:
"""Encoding of a static type for a `RaggedTensor`.
Use this type to express/declare that an output must have the type of
`RaggedTensor`.
"""
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
"""Initializes a RaggedTensorType object.
Args:
dtype: data type of the `RaggedTensor`'s inner values.
ragged_rank: ragged_rank of the declared `RaggedTensor`.
row_splits_dtype: data type for the `RaggedTensor`'s row splits.
One of: `tf.int32` or `tf.int64`.
"""
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
def __repr__(self):
return "RaggedTensorType(%r, %r, %r)" % (self.dtype, self.ragged_rank,
self.row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
"""Checks that the given SparseTensor.indices tensor is ragged-right.
Example: `indices = [[0, 0], [0, 1], [2, 0], [3, 1]]` is not ragged right
because the entry `[3, 1]` skips a cell.
Args:
indices: The SparseTensor indices to check.
Returns:
A list of control dependency op tensors.
"""
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
# index starts a new row (by definition), so its suffix should be zero.
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
"""Gradient for RaggedTensorToSparse."""
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
# No gradient for the RaggedTensor's nested_row_splits.
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
# the gradient for the SparseTensor's values.
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
def merge_dims(value, outer_axis, inner_axis):
"""Merges value[outer_axis...inner_axis] into a single dimension.
See `RaggedTensor.merge_dims()` for more details. This helper differs from
`RaggedTensor.merge_dims()` in that `value` may be a dense or ragged tensor.
Args:
value: A `RaggedTensor` or `Tensor`
outer_axis: `int`
inner_axis: `int`
Returns:
A flattened `RaggedTensor` or `Tensor`.
"""
if outer_axis == inner_axis:
return value
# Flatten outer dimensions of a RaggedTensor by just taking its values.
while outer_axis == 0 and isinstance(value, RaggedTensor):
value = value.values
inner_axis -= 1
if inner_axis == 0:
return value
# Flatten non-Ragged tensors using tf.reshape().
if not isinstance(value, RaggedTensor):
if value.shape.is_fully_defined():
old_shape = value.shape.as_list()
new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]
else:
old_shape = array_ops.shape(value)
new_shape = array_ops.concat(
[old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)
return array_ops.reshape(value, new_shape)
# Handle outer_axis>1 via recursion.
if outer_axis > 1:
return value.with_values(
merge_dims(value.values, outer_axis - 1, inner_axis - 1))
# At this point, we know outer_axis == 1, and value is a RaggedTensor.
# So we need to flatten the values and build a corresponding splits tensor.
new_values = value.values
new_splits = value.row_splits
for axis in range(outer_axis, inner_axis):
if isinstance(new_values, RaggedTensor):
# Flatten a single ragged dimension.
new_splits = array_ops.gather(new_values.row_splits, new_splits)
new_values = new_values.values
else:
# Flatten all remaining dense dimensions.
shape_split = inner_axis - axis + 1
if new_values.shape.is_fully_defined():
old_shape = new_values.shape.as_list()
new_shape = [-1] + old_shape[shape_split:]
flat_size = _prod(old_shape[1:shape_split])
else:
old_shape = array_ops.shape(new_values)
new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)
flat_size = math_ops.cast(
math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)
new_values = array_ops.reshape(new_values, new_shape)
new_splits = new_splits * flat_size
break
return RaggedTensor.from_row_splits(new_values, new_splits)
def _prod(lst):
"""Returns the product of the numbers in a list."""
return functools.reduce(operator.mul, lst, 1)
def _get_row_partition_type_tensor_pairs_tail(partition):
"""Gets a row partition type tensor pair for the tail.
If value_rowid is defined, then it is used. Otherwise, row_splits
are used.
Args:
partition: a RowPartition.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
if partition.has_precomputed_value_rowids():
return ("VALUE_ROWIDS", partition.value_rowids())
else:
return ("ROW_SPLITS", partition.row_splits())
def _get_row_partition_type_tensor_pairs(rt_input):
"""Gets a list of the row partitions for rt_input.
If value_rowids are defined, then they are used. Otherwise, row_splits
are used. If the outermost level has value_rowids defind, then nrows is
also added.
Args:
rt_input: a ragged tensor.
Returns:
A list of (row_partition_type, row_partition_tensor) pairs.
"""
partitions = rt_input._nested_row_partitions # pylint: disable=protected-access
tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]
if partitions[0]._value_rowids is not None: # pylint: disable=protected-access
return [("FIRST_DIM_SIZE", partitions[0].nrows()),
("VALUE_ROWIDS", partitions[0].value_rowids())] + tail
else:
return [("ROW_SPLITS", partitions[0].row_splits())] + tail
def _shape_as_tensor(shape, dtype):
"""Takes shape and coerces it to a shape as a tensor.
If the object is already a tensor, simply passes it on (result is guaranteed
to be int64 or int32, but not necessarily dtype).
If not, creates a tensor of type dtype.
Result is either a scalar equal to -1 if the shape is unknown_rank.
Otherwise, it is a vector, where unknown dimensions are represented with a
value of -1.
In C++, see TensorShapeFromTensor for parsing shapes in kernels, and
InferenceContext::MakeShapeFromShapeTensorTreatScalarAsUnknownShape, for
use in the shape inference function.
Args:
shape: input to coerce from TensorShape, Tensor, None, List[Optional[Int]],
Tuple[Optional[Int]].
dtype: tf.int64 or tf.int32
Returns:
a scalar or vector tensor of dtype tf.int32 or tf.int64.
"""
if dtype != dtypes.int64 and dtype != dtypes.int32:
raise ValueError(f"Expected int64 or int32 for dtype: got {dtype}.")
if isinstance(shape, ops.Tensor):
if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:
return math_ops.cast(shape, dtype)
return shape
shape = tensor_shape.as_shape(shape)
if not shape:
# Imply rank is unknown using a -1 scalar.
return constant_op.constant(-1, dtype=dtype)
shape = [(-1 if x is None else x) for x in shape.as_list()]
# At this point, shape is List[Int].
return constant_op.constant(shape, dtype=dtype)
def _nvals_uniform_row_length(values, uniform_row_length):
"""Get the number of values for uniform row length constructor."""
const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value
if const_nvals is not None:
nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)
elif isinstance(values, RaggedTensor):
nvals = values.nrows(out_type=uniform_row_length.dtype)
else:
nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]
return nvals
def _get_optional_partition_dtype(values):
"""Returns the partition dtype, or None if None exists."""
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
return values._row_partition.dtype
return None
_SUPPORTED_RAGGED_VALUE_TYPES = (ops.Tensor, RaggedTensor)
# TODO(edloper): Consider whether we should change the registry to be on
# TypeSpecs rather than ValueTypes.
def _add_supported_value_type(cls):
"""Register the `cls` as supported value type of RaggedTenosr.
The cls must be a subclass of CompositeTensor, and must support:
- Properties:
- x.shape
- x.dtype
- Methods:
- x.__getitem__(idx) (method: returns a supported value type)
- x.set_shape(shape)
- Ops:
- tf.shape(x) -- tf.shape(x)[0] must be a tf.Tensor.
- tf.tile(x)
- assert_rank_at_least(x)
- tf.ones_like(x)
- tf.gather(params=x, indices=Tensor)
- tf.add(x, y)
- tf.boolean_mask(x, ...)
- @TODO(edloper): Complete this list
Note: the following RaggedTensor, RaggedTensorSpec methods & ops are not
currently supported unless `rt.values` is a RaggedTensor or a tf.Tensor:
- rt.to_tensor()
- rt.to_sparse_tensor()
- rt._to_variant()
- rt._from_variant()
- tf.ragged.cross([rt])
- tf.gather(params=x, indices=rt) # rt used for indices
- RaggedTensorSpec methods:
- _batch
- _unbatch
- _to_tensor_list
- _to_batched_tensor_list
- _from_compatible_tensor_list
Args:
cls: The type to be added to supported value types.
"""
if not issubclass(cls, composite_tensor.CompositeTensor):
raise ValueError(f"cls ({cls}) must be a subclass of CompositeTensor.")
if not hasattr(cls, "shape"):
raise ValueError("cls must support the `shape` property.")
if not hasattr(cls, "dtype"):
raise ValueError("cls must support the `dtype` property.")
global _SUPPORTED_RAGGED_VALUE_TYPES
_SUPPORTED_RAGGED_VALUE_TYPES += (cls,)
def _is_supported_ragged_values_type(value):
return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES)
def _assert_is_supported_ragged_values_type(value):
if not _is_supported_ragged_values_type(value):
ok_types = ", ".join(cls.__name__ for cls in _SUPPORTED_RAGGED_VALUE_TYPES)
raise TypeError(f"type(values) must be one of: {ok_types}, got {value}.")
def _formatter(x):
"""Separate Numpy array elements with comma."""
if isinstance(x, np.ndarray):
return np.array2string(x, separator=", ")
else:
return str(x)
# Type annotation indicating that a value is ragged. Includes RaggedTensor
# as well as the (deprecated) RaggedTensorValue class from TF 1.x.
Ragged = typing.Union[RaggedTensor, ragged_tensor_value.RaggedTensorValue]
# Type annotation indicating that a value is a ragged tensor, a dense tensor,
# or a value that can be converted to a tensor (e.g. np.array).
# TODO(edloper): Add Variable to TensorLike, and remove it from here.
RaggedOrDense = typing.Union[Ragged, core_types.TensorLike]
| 40.243855 | 82 | 0.650184 |
import functools
import operator
import typing
import numpy as np
from tensorflow.python import tf2
from tensorflow.python.client import session
from tensorflow.python.framework import composite_tensor
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.framework import tensor_spec
from tensorflow.python.framework import tensor_util
from tensorflow.python.framework import type_spec
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import gen_ragged_conversion_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops.ragged import ragged_config
from tensorflow.python.ops.ragged import ragged_tensor_value
from tensorflow.python.ops.ragged import ragged_util
from tensorflow.python.ops.ragged.row_partition import RowPartition
from tensorflow.python.types import core as core_types
from tensorflow.python.types import internal as internal_types
from tensorflow.python.util import dispatch
from tensorflow.python.util.tf_export import tf_export
from tensorflow.tools.docs import doc_controls
_convert_row_partition = RowPartition._convert_row_partition
@tf_export("RaggedTensor")
class RaggedTensor(composite_tensor.CompositeTensor,
internal_types.NativeObject):
@doc_controls.do_not_generate_docs
def __init__(self, values, row_partition, internal=False):
if not internal:
raise ValueError("RaggedTensor constructor is private; please use one "
"of the factory methods instead (e.g., "
"RaggedTensor.from_row_lengths())")
_assert_is_supported_ragged_values_type(values)
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
values.shape.with_rank_at_least(1)
if isinstance(values, RaggedTensor):
assert row_partition.dtype == values._row_partition.dtype
self._values = values
self._row_partition = row_partition
@classmethod
def _from_row_partition(cls, values, row_partition, validate=True):
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
values, row_partition = cls._convert_values_and_partition(
values, row_partition, "partition")
if row_partition.has_precomputed_value_rowids():
value_rowids_shape = row_partition.value_rowids().shape
values.shape[:1].assert_is_compatible_with(value_rowids_shape)
if validate:
msg = "Arguments to _from_row_partition do not form a valid RaggedTensor"
nvals = _nrows(values, row_partition.dtype)
checks = [
check_ops.assert_equal(
math_ops.cast(row_partition.nvals(), row_partition.dtype),
nvals,
message=msg),
]
if not isinstance(values, RaggedTensor):
checks.append(check_ops.assert_rank_at_least(values, 1))
row_partition = row_partition.with_dependencies(checks)
return cls(values=values, internal=True, row_partition=row_partition)
@classmethod
@dispatch.add_dispatch_support
def from_value_rowids(cls,
values,
value_rowids,
nrows=None,
name=None,
validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromValueRowIds",
[values, value_rowids, nrows]):
row_partition = RowPartition.from_value_rowids(
value_rowids=value_rowids,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_splits(cls, values, row_splits, name=None, validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowSplits", [values, row_splits]):
row_partition = RowPartition.from_row_splits(
row_splits=row_splits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_lengths(cls, values, row_lengths, name=None, validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLengths", [values, row_lengths]):
row_partition = RowPartition.from_row_lengths(
row_lengths=row_lengths,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_starts(cls, values, row_starts, name=None, validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowStarts", [values, row_starts]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_starts(
row_starts=row_starts,
nvals=_nrows(values),
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_row_limits(cls, values, row_limits, name=None, validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromRowLimits", [values, row_limits]):
values = _convert_to_ragged_tensor_values(values)
row_partition = RowPartition.from_row_limits(
row_limits=row_limits,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_uniform_row_length(cls,
values,
uniform_row_length,
nrows=None,
validate=True,
name=None):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
with ops.name_scope(name, "RaggedFromUniformRowLength",
[values, uniform_row_length, nrows]):
values = _convert_to_ragged_tensor_values(values)
uniform_row_length = _convert_row_partition(
uniform_row_length, "UniformRowLength",
_get_optional_partition_dtype(values))
nvals = _nvals_uniform_row_length(values, uniform_row_length)
row_partition = RowPartition.from_uniform_row_length(
uniform_row_length=uniform_row_length,
nvals=nvals,
nrows=nrows,
validate=validate,
dtype_hint=_get_optional_partition_dtype(values))
return cls._from_row_partition(values, row_partition, validate=validate)
@classmethod
@dispatch.add_dispatch_support
def from_nested_value_rowids(cls,
flat_values,
nested_value_rowids,
nested_nrows=None,
name=None,
validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_value_rowids, ops.Tensor):
raise TypeError(f"Argument `nested_value_rowids` must be a list of "
f"Tensors. Received {nested_value_rowids}.")
if nested_nrows is None:
nested_nrows = [None] * len(nested_value_rowids)
else:
if isinstance(nested_nrows, ops.Tensor):
raise TypeError(f"Argument `nested_nrows` must be a list of "
f"Tensors. Received {nested_nrows}.")
if len(nested_nrows) != len(nested_value_rowids):
raise ValueError(
f"Argument `nested_nrows` must have the same length as "
f"argument `nested_value_rowids`. len(nested_nrows) = "
f"{len(nested_nrows)} vs. len(nested_values_rowids) = "
f"{len(nested_value_rowids)}.")
with ops.name_scope(name, "RaggedFromNestedValueRowIds", [flat_values] +
list(nested_value_rowids) + list(nested_nrows)):
result = flat_values
for value_rowids, nrows in reversed(
list(zip(nested_value_rowids, nested_nrows))):
result = cls.from_value_rowids(
result, value_rowids, nrows, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_splits(cls,
flat_values,
nested_row_splits,
name=None,
validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_splits, ops.Tensor):
raise TypeError(f"Argument `nested_row_splits` must be a list of "
f"Tensors. Received {nested_row_splits}.")
with ops.name_scope(name, "RaggedFromNestedRowSplits",
[flat_values] + list(nested_row_splits)):
result = flat_values
for splits in reversed(nested_row_splits):
result = cls.from_row_splits(result, splits, validate=validate)
return result
@classmethod
@dispatch.add_dispatch_support
def from_nested_row_lengths(cls,
flat_values,
nested_row_lengths,
name=None,
validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_lengths, ops.Tensor):
raise TypeError(f"Argument `nested_row_lengths` must be a list of "
f"Tensors. Received {nested_row_lengths}.")
with ops.name_scope(name, "RaggedFromNestedRowlengths",
[flat_values] + list(nested_row_lengths)):
result = flat_values
for lengths in reversed(nested_row_lengths):
result = cls.from_row_lengths(result, lengths, validate=validate)
return result
@classmethod
def _from_nested_row_partitions(cls,
flat_values,
nested_row_partitions,
name=None,
validate=True):
if not isinstance(validate, bool):
raise TypeError(f"Argument `validate` must have type bool. "
f"Received {validate}.")
if isinstance(nested_row_partitions, RowPartition):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
if isinstance(nested_row_partitions, ops.Tensor):
raise TypeError(f"Argument `nested_row_partitions` must be a list of "
f"RowPartitions. Received {nested_row_partitions}.")
with ops.name_scope(name, "RaggedFromNestedRowPartitions",
[flat_values] + list(nested_row_partitions)):
result = flat_values
for partition in reversed(nested_row_partitions):
result = cls._from_row_partition(result, partition, validate=validate)
return result
@classmethod
def _convert_values_and_partition(cls, values, row_partition, name):
if not isinstance(row_partition, RowPartition):
raise TypeError(f"Argument `row_partition` must be a RowPartition. "
f"Received {row_partition}.")
if isinstance(values, RaggedTensor):
if values._row_partition.dtype != row_partition.dtype:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError(
f"Argument `row_partition` of RaggedTensor with name: {name} "
f"must have same dtype as Argument `values`. "
f"({row_partition.dtype} vs. {values._row_partition.dtype}).")
values = values.with_row_splits_dtype(row_partition.dtype)
else:
values = _convert_to_ragged_tensor_values(values)
return (values, row_partition)
@property
def dtype(self):
return self._values.dtype
@property
def shape(self):
nrows = self._row_partition.static_nrows
ncols = self._row_partition.static_uniform_row_length
value_shape = self._values.shape[1:]
return tensor_shape.TensorShape([nrows, ncols]).concatenate(value_shape)
def get_shape(self):
return self.shape
@property
def ragged_rank(self):
values_is_ragged = isinstance(self._values, RaggedTensor)
return self._values.ragged_rank + 1 if values_is_ragged else 1
@property
def values(self):
return self._values
@property
def _nested_row_partitions(self):
partitions = [self._row_partition]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
partitions.append(rt_values._row_partition)
rt_values = rt_values.values
return tuple(partitions)
@property
def row_splits(self):
return self._row_partition.row_splits()
@property
def uniform_row_length(self):
return self._row_partition.uniform_row_length()
@property
def flat_values(self):
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_values = rt_values.values
return rt_values
@property
def nested_row_splits(self):
rt_nested_splits = [self.row_splits]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_splits.append(rt_values.row_splits)
rt_values = rt_values.values
return tuple(rt_nested_splits)
def value_rowids(self, name=None):
with ops.name_scope(name, "RaggedValueRowIds", [self]):
return self._row_partition.value_rowids()
def nested_value_rowids(self, name=None):
with ops.name_scope(name, "RaggedNestedValueRowIds", [self]):
rt_nested_ids = [self.value_rowids()]
rt_values = self.values
while isinstance(rt_values, RaggedTensor):
rt_nested_ids.append(rt_values.value_rowids())
rt_values = rt_values.values
return tuple(rt_nested_ids)
def nrows(self, out_type=None, name=None):
with ops.name_scope(name, "RaggedNRows", [self]):
if out_type is None:
return self._row_partition.nrows()
else:
return math_ops.cast(self._row_partition.nrows(), dtype=out_type)
def row_starts(self, name=None):
with ops.name_scope(name, "RaggedRowStarts", [self]):
return self._row_partition.row_starts()
def row_limits(self, name=None):
with ops.name_scope(name, "RaggedRowLimits", [self]):
return self._row_partition.row_limits()
def row_lengths(self, axis=1, name=None):
if axis == 0:
return self._row_partition.nrows()
if axis == 1:
return self._row_partition.row_lengths()
with ops.name_scope(name, "RaggedRowLengths", [self]):
axis = array_ops.get_positive_axis(
axis, self.shape.rank, ndims_name="rank(self)")
if axis == 0:
return self.nrows()
elif axis == 1:
splits = self.row_splits
return splits[1:] - splits[:-1]
elif isinstance(self.values, RaggedTensor):
return self.with_values(self.values.row_lengths(axis - 1))
else:
shape = array_ops.shape(self.values, out_type=self._row_partition.dtype)
return self.with_values(
array_ops.ones(shape[:axis - 1], self._row_partition.dtype) *
shape[axis - 1])
def nested_row_lengths(self, name=None):
with ops.name_scope(name, "RaggedNestedRowLengths", [self]):
rt_nested_row_lengths = []
rt = self
while isinstance(rt, RaggedTensor):
rt_nested_row_lengths.append(rt.row_lengths())
rt = rt.values
return tuple(rt_nested_row_lengths)
def bounding_shape(self, axis=None, name=None, out_type=None):
if out_type is None:
out_type = self._row_partition.dtype
else:
out_type = dtypes.as_dtype(out_type)
with ops.name_scope(name, "RaggedBoundingBox", [self, axis]):
nested_splits = self.nested_row_splits
rt_flat_values = self.flat_values
if isinstance(axis, int):
if axis == 0:
return array_ops.shape(nested_splits[0], out_type=out_type)[0] - 1
elif axis == 1:
result = math_ops.maximum(math_ops.reduce_max(self.row_lengths()), 0)
if out_type != self._row_partition.dtype:
result = math_ops.cast(result, out_type)
return result
splits_shape = array_ops.shape(self.row_splits, out_type=out_type)
flat_values_shape = array_ops.shape(rt_flat_values, out_type=out_type)
ragged_dimensions = [splits_shape[0] - 1] + [
math_ops.maximum(math_ops.reduce_max(splits[1:] - splits[:-1]), 0)
for splits in nested_splits
]
inner_dimensions = flat_values_shape[1:]
if out_type != self._row_partition.dtype:
ragged_dimensions = [
math_ops.cast(d, out_type) for d in ragged_dimensions
]
bbox = array_ops.concat(
[array_ops.stack(ragged_dimensions), inner_dimensions], axis=0)
return bbox if axis is None else array_ops.gather(bbox, axis)
def with_values(self, new_values):
new_values = _convert_to_ragged_tensor_values(new_values)
new_values.shape.with_rank_at_least(1)
self.values.shape[:1].assert_is_compatible_with(new_values.shape[:1])
if (isinstance(new_values, RaggedTensor) and
self._row_partition.dtype != new_values.row_splits.dtype):
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("self and new_values have mismatched row_splits "
"dtypes; use RaggedTensor.with_row_splits_dtype() to "
"convert them to compatible dtypes.")
new_values = new_values.with_row_splits_dtype(dtypes.int64)
return self.with_row_splits_dtype(dtypes.int64).with_values(new_values)
return RaggedTensor(
values=new_values, row_partition=self._row_partition, internal=True)
def with_flat_values(self, new_values):
if isinstance(self._values, RaggedTensor):
return self.with_values(self.values.with_flat_values(new_values))
else:
new_values = _convert_to_ragged_tensor_values(new_values)
return self.with_values(new_values)
def with_row_splits_dtype(self, dtype):
dtype = dtypes.as_dtype(dtype)
if dtype not in (dtypes.int32, dtypes.int64):
raise ValueError(f"Argument `row_splits` dtype must be int32 or int64. "
f"Received {dtype}.")
if self._row_partition.dtype == dtype:
return self
current_values = self._values
if isinstance(current_values, RaggedTensor):
return RaggedTensor(
values=current_values.with_row_splits_dtype(dtype),
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
else:
return RaggedTensor(
values=current_values,
row_partition=self._row_partition.with_row_splits_dtype(dtype),
internal=True)
def merge_dims(self, outer_axis, inner_axis):
outer_axis = array_ops.get_positive_axis(
outer_axis,
self.shape.rank,
axis_name="outer_axis",
ndims_name="rank(self)")
inner_axis = array_ops.get_positive_axis(
inner_axis,
self.shape.rank,
axis_name="inner_axis",
ndims_name="rank(self)")
if not outer_axis <= inner_axis:
raise ValueError(f"Expected outer_axis ({outer_axis}) to be less than or "
f"equal to inner_axis ({inner_axis}).")
return merge_dims(self, outer_axis, inner_axis)
def _set_shape(self, shape):
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
return
shape = shape.as_list()
if shape[0] is not None:
self._row_partition._row_splits.set_shape(shape[0] + 1)
dtype = self._row_partition.dtype
for i, partition in enumerate(self._nested_row_partitions):
size = shape[i + 1]
if size is not None:
if partition._uniform_row_length is not None:
old_row_length = tensor_util.constant_value(
partition._uniform_row_length)
if old_row_length is not None:
if size == old_row_length:
continue
else:
raise ValueError(f"Inconsistent size for axis {i + 1}: "
f"{old_row_length} vs. {size}.")
partition._uniform_row_length = ops.convert_to_tensor(size, dtype)
if partition._nrows is None:
partition._nrows = array_ops.size(
partition._row_splits, out_type=dtype) - 1
if hasattr(self.flat_values, "set_shape"):
# Inner dimensions
flat_shape = tensor_shape.as_shape([None] + shape[self.ragged_rank + 1:])
self.flat_values.set_shape(flat_shape)
#=============================================================================
# Tensor Type Conversions
#=============================================================================
@classmethod
@dispatch.add_dispatch_support
def from_tensor(cls,
tensor,
lengths=None,
padding=None,
ragged_rank=1,
name=None,
row_splits_dtype=dtypes.int64):
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if lengths is not None and padding is not None:
raise ValueError("Specify argument `lengths` or `padding`, but not both.")
if not isinstance(ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Received {ragged_rank}.")
if ragged_rank <= 0:
raise ValueError(f"Argument `ragged_rank` must be greater than 0. "
f"Received {ragged_rank}.")
with ops.name_scope(name, "RaggedFromTensor", [tensor, lengths, padding]):
tensor = ops.convert_to_tensor(tensor, name="tensor")
tensor.shape.with_rank_at_least(ragged_rank + 1)
input_shape = array_ops.shape(tensor, out_type=row_splits_dtype)
ncols = input_shape[1]
# Handle nested row lengths.
if (lengths is not None and isinstance(lengths, (list, tuple)) and
len(lengths) and not isinstance(lengths[0], (int, float))):
if ragged_rank not in (1, len(lengths)):
# Note: we accept `ragged_rank=1` here because it's the default value;
# ragged_rank, then we should use that tuple to determine ragged_rank.
# We only want to complain if they pass in an explicit ragged_rank
# that doesn't match len(lengths).
raise ValueError(f"If Argument `lengths` is a tuple of row_lengths, "
f"argument `ragged_rank` must be "
f"len(lengths): {len(lengths)}. Received "
f"ragged_rank: {ragged_rank}.")
tensor.shape.with_rank_at_least(len(lengths) + 1)
num_tokens = math_ops.reduce_sum(lengths[-1])
ones_mask = array_ops.ones([num_tokens], dtype=dtypes.bool)
ragged_mask = cls.from_nested_row_lengths(
ones_mask, lengths, validate=False)
dense_ragged_mask = ragged_mask.to_tensor(default_value=False)
masked_data = array_ops.boolean_mask(tensor, dense_ragged_mask)
return cls.from_nested_row_lengths(masked_data, lengths, validate=False)
if ragged_rank > 1:
if tensor.shape.is_fully_defined():
input_shape = tensor.shape.as_list()
dim_size = np.cumprod(input_shape)
new_shape = [dim_size[ragged_rank - 1]] + input_shape[ragged_rank:]
else:
dim_size = math_ops.cumprod(input_shape)
new_shape = array_ops.concat(
[[dim_size[ragged_rank - 1]], input_shape[ragged_rank:]], axis=0)
flattened = array_ops.reshape(tensor, new_shape)
result = cls.from_tensor(
flattened, lengths, padding, row_splits_dtype=row_splits_dtype)
for axis in range(ragged_rank - 1, 0, -1):
dim_len = tensor_shape.dimension_at_index(tensor.shape, axis).value
if dim_len is None:
dim_len = input_shape[axis]
else:
dim_len = constant_op.constant(dim_len, row_splits_dtype)
result = RaggedTensor.from_uniform_row_length(
values=result,
uniform_row_length=dim_len,
nrows=dim_size[axis - 1],
validate=False)
return result
if padding is not None:
padding = ops.convert_to_tensor(
padding, name="padding", dtype=tensor.dtype)
padding.shape.assert_is_compatible_with(tensor.shape[2:])
has_default_value = math_ops.equal(padding, tensor)
# padding match each item in the tensor. After this block of code,
# `has_default.shape = tensor.shape[:2]`. (Unfortunately, we can't just
tensor_rank = array_ops.rank(tensor)
reduce_axis = math_ops.range(2, tensor_rank)
has_default = control_flow_ops.cond(
tensor_rank > 2,
lambda: math_ops.reduce_all(has_default_value, axis=reduce_axis),
lambda: has_default_value)
has_default.set_shape(tensor_shape.TensorShape([None, None]))
has_default.set_shape(tensor.shape[:2])
has_nondefault = math_ops.logical_not(has_default)
has_nondefault = math_ops.cast(has_nondefault, row_splits_dtype)
length_for_nondefault_value = (
has_nondefault *
array_ops.expand_dims(math_ops.range(1, ncols + 1), 0))
lengths = math_ops.reduce_max(length_for_nondefault_value, axis=1)
if lengths is not None:
lengths = ragged_util.convert_to_int_tensor(lengths, "lengths",
row_splits_dtype)
lengths.shape.assert_has_rank(1)
lengths = math_ops.minimum(lengths, ncols)
lengths = math_ops.maximum(lengths, 0)
limits = math_ops.cumsum(lengths)
splits = array_ops.concat(
[array_ops.zeros([1], row_splits_dtype), limits], axis=0)
mask = array_ops.sequence_mask(lengths, maxlen=ncols)
values = array_ops.boolean_mask(tensor, mask)
return cls.from_row_splits(values, splits, validate=False)
values_shape = array_ops.concat(
[[input_shape[0] * input_shape[1]], input_shape[2:]], axis=0)
values = array_ops.reshape(tensor, values_shape)
const_nrows = tensor_shape.dimension_at_index(tensor.shape, 0).value
const_ncols = tensor_shape.dimension_at_index(tensor.shape, 1).value
if const_nrows is not None:
nrows = constant_op.constant(const_nrows, row_splits_dtype)
else:
nrows = input_shape[0]
if const_ncols is not None:
ncols = constant_op.constant(const_ncols, row_splits_dtype)
else:
ncols = input_shape[1]
return RaggedTensor.from_uniform_row_length(
values=values, uniform_row_length=ncols, nrows=nrows, validate=False)
def to_tensor(self, default_value=None, name=None, shape=None):
with ops.name_scope(name, "RaggedToTensor", [self, default_value, shape]):
if default_value is not None:
default_value = ops.convert_to_tensor(
default_value, name="default_value", dtype=self.dtype)
type_tensor_pairs = _get_row_partition_type_tensor_pairs(self)
row_partition_types = [x[0] for x in type_tensor_pairs]
row_partition_tensors = [x[1] for x in type_tensor_pairs]
if default_value is None:
default_value = array_ops.zeros((), self.dtype)
if (isinstance(shape, (list, tuple)) and
any(isinstance(v, ops.Tensor) for v in shape) and
all(isinstance(v, (int, ops.Tensor)) for v in shape)):
shape = array_ops.stack(shape)
shape_tensor = _shape_as_tensor(shape, row_partition_tensors[0].dtype)
tensor = gen_ragged_conversion_ops.ragged_tensor_to_tensor(
shape=shape_tensor,
values=self.flat_values,
default_value=default_value,
row_partition_types=row_partition_types,
row_partition_tensors=row_partition_tensors)
ragged_shape = self.shape
if ragged_shape.rank is not None and not isinstance(shape, ops.Tensor):
shape = tensor_shape.as_shape(shape)
if shape.rank is None:
output_shape = ragged_shape
else:
output_shape = [
s1 if s1 is not None else s2
for (s1, s2) in zip(shape.as_list(), ragged_shape.as_list())
]
tensor.set_shape(output_shape)
return tensor
@classmethod
@dispatch.add_dispatch_support
def from_sparse(cls, st_input, name=None, row_splits_dtype=dtypes.int64):
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if not sparse_tensor.is_sparse(st_input):
raise TypeError(f"Argument `st_input` must be of type SparseTensor, but "
f"is of type {type(st_input).__name__}.")
with ops.name_scope(name, "RaggedFromSparse", [st_input]):
st_input = sparse_tensor.convert_to_tensor_or_sparse_tensor(
st_input, name="st_input")
if st_input.dense_shape.shape.ndims is None:
static_rank_from_dense_shape = None
else:
static_rank_from_dense_shape = st_input.dense_shape.shape.dims[0].value
if st_input.indices.shape.ndims is None:
static_rank_from_indices = None
else:
static_rank_from_indices = st_input.indices.shape.dims[1].value
if static_rank_from_dense_shape != 2 and static_rank_from_indices != 2:
raise ValueError("rank(st_input) must be 2.")
with ops.control_dependencies(
_assert_sparse_indices_are_ragged_right(st_input.indices)):
segment_ids = math_ops.cast(st_input.indices[:, 0], row_splits_dtype)
num_segments = math_ops.cast(st_input.dense_shape[0], row_splits_dtype)
return cls.from_value_rowids(
st_input.values, segment_ids, num_segments, validate=False)
def to_sparse(self, name=None):
with ops.name_scope(name, "RaggedToSparse", [self]):
result = gen_ragged_conversion_ops.ragged_tensor_to_sparse(
self.nested_row_splits, self.flat_values, name=name)
return sparse_tensor.SparseTensor(result.sparse_indices,
result.sparse_values,
result.sparse_dense_shape)
@classmethod
def _from_variant(cls,
variant,
dtype,
output_ragged_rank,
input_ragged_rank=None,
row_splits_dtype=dtypes.int64,
name=None):
variant = ops.convert_to_tensor(
variant, name="variant", dtype=dtypes.variant)
if (variant.shape.ndims is not None and input_ragged_rank is not None and
output_ragged_rank != input_ragged_rank + variant.shape.ndims):
raise ValueError(
f"Argument `output_ragged_rank` ({output_ragged_rank}) must be equal "
f"to `input_ragged_rank` + `variant.shape.ndims` "
f"({input_ragged_rank} + {variant.shape.ndims}).")
input_ragged_rank = -1 if input_ragged_rank is None else input_ragged_rank
with ops.name_scope(
name, "RaggedFromVariant",
[variant, dtype, input_ragged_rank, output_ragged_rank]):
result = gen_ragged_conversion_ops.ragged_tensor_from_variant(
variant, input_ragged_rank, output_ragged_rank, dtype,
row_splits_dtype, name)
return cls.from_nested_row_splits(
result.output_dense_values,
result.output_nested_splits,
validate=False)
def _to_variant(self, batched_input=False, name=None):
with ops.name_scope(name, "RaggedToVariant", [self, batched_input]):
return gen_ragged_conversion_ops.ragged_tensor_to_variant(
self.nested_row_splits, self.flat_values, batched_input, name)
def __repr__(self):
if self._is_eager():
# recursive calls correctly, but doesn't provide a separator argument.
with np.printoptions(formatter={"all": _formatter}):
value_text = _formatter(self.numpy())
return f"<tf.RaggedTensor {value_text}>"
else:
return "tf.RaggedTensor(values=%s, row_splits=%s)" % (self.values,
self.row_splits)
def numpy(self):
if not self._is_eager():
raise ValueError("RaggedTensor.numpy() is only supported in eager mode.")
values = self.values.numpy()
splits = self.row_splits.numpy()
rows = [values[splits[i]:splits[i + 1]] for i in range(len(splits) - 1)]
if not rows:
return np.zeros((0, 0) + values.shape[1:], dtype=values.dtype)
has_variable_length_rows = any(len(row) != len(rows[0]) for row in rows)
dtype = np.object_ if has_variable_length_rows else None
return np.array(rows, dtype=dtype)
def to_list(self):
if not isinstance(self.row_splits, ops.EagerTensor):
raise ValueError("to_list can only be used in eager mode.")
row_splits = self.row_splits.numpy().tolist()
values = self.values
if isinstance(values, RaggedTensor):
return [
values[row_splits[i]:row_splits[i + 1]].to_list()
for i in range(len(row_splits) - 1)
]
else:
if hasattr(values, "numpy"):
values_as_list = values.numpy().tolist()
elif hasattr(values, "to_list"):
values_as_list = values.to_list()
else:
raise ValueError("values must be convertible to a list")
return [
values_as_list[row_splits[i]:row_splits[i + 1]]
for i in range(len(row_splits) - 1)
]
def _eager_value(self):
value = self.flat_values.numpy()
for row_splits in reversed(self.nested_row_splits):
value = ragged_tensor_value.RaggedTensorValue(value, row_splits.numpy())
return value
def _is_eager(self):
rt = self
while isinstance(rt, RaggedTensor):
if not isinstance(rt.row_splits, ops.EagerTensor):
return False
rt = rt.values
return isinstance(rt, ops.EagerTensor)
def _overloaded_operator(name):
def stub(*args, **kwargs):
del args, kwargs
raise ValueError(
f"You must import 'tensorflow.python.ops.ragged.ragged_ops' "
f"before using RaggedTensor.{name}.")
return stub
__getitem__ = _overloaded_operator("__getitem__")
__ge__ = _overloaded_operator("__ge__")
__gt__ = _overloaded_operator("__gt__")
__le__ = _overloaded_operator("__le__")
__lt__ = _overloaded_operator("__lt__")
__and__ = _overloaded_operator("__and__")
__rand__ = _overloaded_operator("__rand__")
__invert__ = _overloaded_operator("__invert__")
__ror__ = _overloaded_operator("__ror__")
__or__ = _overloaded_operator("__or__")
__xor__ = _overloaded_operator("__xor__")
__rxor__ = _overloaded_operator("__rxor__")
__abs__ = _overloaded_operator("__abs__")
__add__ = _overloaded_operator("__add__")
__radd__ = _overloaded_operator("__radd__")
__div__ = _overloaded_operator("__div__")
__rdiv__ = _overloaded_operator("__rdiv__")
__floordiv__ = _overloaded_operator("__floordiv__")
__rfloordiv__ = _overloaded_operator("__rfloordiv__")
__mod__ = _overloaded_operator("__mod__")
__rmod__ = _overloaded_operator("__rmod__")
__mul__ = _overloaded_operator("__mul__")
__rmul__ = _overloaded_operator("__rmul__")
__neg__ = _overloaded_operator("__neg__")
__pow__ = _overloaded_operator("__pow__")
__rpow__ = _overloaded_operator("__rpow__")
__sub__ = _overloaded_operator("__sub__")
__rsub__ = _overloaded_operator("__rsub__")
__truediv__ = _overloaded_operator("__truediv__")
__rtruediv__ = _overloaded_operator("__rtruediv__")
del _overloaded_operator
def _as_graph_element(self):
values = self.values
while isinstance(values, RaggedTensor):
values = values.values
return values
@property
def _type_spec(self):
return RaggedTensorSpec.from_value(self)
def _shape_invariant_to_type_spec(self, shape):
return RaggedTensorSpec(shape, self.dtype, self.ragged_rank,
self.row_splits.dtype)
def consumers(self):
return self._consumers()
def is_ragged(value):
return isinstance(value,
(RaggedTensor, ragged_tensor_value.RaggedTensorValue))
def match_row_splits_dtypes(*tensors, **kwargs):
return_dtype = kwargs.pop("return_dtype", False)
if kwargs:
raise ValueError(f"Unexpected keyword args {kwargs}.")
has_int32 = False
has_int64 = False
for tensor in tensors:
if isinstance(tensor, RaggedTensor):
if tensor.row_splits.dtype == dtypes.int32:
has_int32 = True
else:
has_int64 = True
if has_int32 and has_int64:
if not ragged_config.auto_cast_partition_dtype():
raise ValueError("Input RaggedTensors have mismatched row_splits dtypes; "
"use RaggedTensor.with_row_splits_dtype() to convert "
"them to compatible dtypes.")
dtype = dtypes.int64
tensors = tuple(
t.with_row_splits_dtype(dtypes.int64) if isinstance(t, RaggedTensor
) else t
for t in tensors)
elif has_int32:
dtype = dtypes.int32
else:
dtype = dtypes.int64
if return_dtype:
return (dtype, tensors)
else:
return tensors
@tf_export("RaggedTensorSpec")
@type_spec.register("tf.RaggedTensorSpec")
class RaggedTensorSpec(type_spec.BatchableTypeSpec):
__slots__ = [
"_shape", "_dtype", "_ragged_rank", "_row_splits_dtype",
"_flat_values_spec"
]
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
@property
def ragged_rank(self):
return self._ragged_rank
@property
def row_splits_dtype(self):
return self._row_splits_dtype
@property
def flat_values_spec(self):
return self._flat_values_spec
@property
def value_type(self):
return RaggedTensor if self._ragged_rank > 0 else ops.Tensor
def __init__(self,
shape=None,
dtype=dtypes.float32,
ragged_rank=None,
row_splits_dtype=dtypes.int64,
flat_values_spec=None):
self._shape = tensor_shape.as_shape(shape)
self._row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
if flat_values_spec is not None:
if dtype is None:
dtype = flat_values_spec.dtype
elif dtype != flat_values_spec.dtype:
raise ValueError("dtype must be the same as flat_values_spec.dtype")
elif dtype is None:
raise ValueError(
"At least one of dtype or flat_values_spec must be provided")
self._dtype = dtypes.as_dtype(dtype)
self._flat_values_spec = flat_values_spec
rank = self._shape.ndims
if ragged_rank is None:
if rank is None:
raise ValueError("Must specify ragged_rank or "
"a shape with a known rank.")
ragged_rank = rank - 1
self._ragged_rank = ragged_rank
if not isinstance(self._ragged_rank, int):
raise TypeError(f"Argument `ragged_rank` must be an int. "
f"Recieved {ragged_rank}.")
if rank is not None:
if ragged_rank >= rank:
raise ValueError(f"Argument `ragged_rank` ({ragged_rank}) must be less "
f"than rank ({rank}).")
def is_compatible_with(self, spec_or_value):
if self._ragged_rank == 0:
if self._flat_values_spec is None:
if isinstance(spec_or_value, (ops.Tensor, tensor_spec.TensorSpec)):
return tensor_spec.TensorSpec(
self._shape, self._dtype).is_compatible_with(spec_or_value)
elif not isinstance(spec_or_value, (RaggedTensor, RaggedTensorSpec)):
return self._flat_values_spec.is_compatible_with(spec_or_value)
return super(RaggedTensorSpec, self).is_compatible_with(spec_or_value)
def _serialize(self):
if self._flat_values_spec is None:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype)
else:
return (self._shape, self._dtype, self._ragged_rank,
self._row_splits_dtype, self._flat_values_spec)
@property
def _component_specs(self):
if self._ragged_rank == 0:
if self._flat_values_spec is not None:
return [self._flat_values_spec]
else:
return [tensor_spec.TensorSpec(self._shape, self._dtype)]
flat_values_spec = self._flat_values_spec
if flat_values_spec is None:
flat_values_shape = tensor_shape.TensorShape([None]).concatenate(
self._shape[self._ragged_rank + 1:])
flat_values_spec = tensor_spec.TensorSpec(flat_values_shape, self._dtype)
outer_dim = tensor_shape.dimension_at_index(self._shape, 0)
outer_splits_shape = [None if outer_dim is None else outer_dim + 1]
inner_splits_spec = tensor_spec.TensorSpec([None], self._row_splits_dtype)
specs = ([
flat_values_spec,
tensor_spec.TensorSpec(outer_splits_shape, self._row_splits_dtype)
] + [inner_splits_spec for _ in range(self._ragged_rank - 1)])
return specs
def _to_components(self, value):
if is_ragged(value):
return [value.flat_values] + list(value.nested_row_splits)
else:
return [value]
def _from_components(self, tensor_list):
result = tensor_list[0]
if (all(isinstance(t, np.ndarray) for t in tensor_list) and
not tf2.enabled()):
for row_splits in reversed(tensor_list[1:]):
result = ragged_tensor_value.RaggedTensorValue(result, row_splits)
else:
if isinstance(tensor_list[0], np.ndarray):
tensor_list = [ops.convert_to_tensor(t) for t in tensor_list]
result = tensor_list[0]
for row_splits in reversed(tensor_list[1:]):
result = RaggedTensor(
result,
RowPartition.from_row_splits(row_splits, validate=False),
internal=True)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape)
if self.flat_values_spec is not None and hasattr(result.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
elif isinstance(result, ops.Tensor):
result.set_shape(self._shape)
return result
# The RaggedTensorSpec tensor_list encoding uses to/from_variant ops
# to (un)box the component tensors in a way that allows for batching &
# unbatching.
@property
def _flat_tensor_specs(self):
# NOTE(mishragaurav): The default flat shape of a boxed `RaggedTensor` is
# `[]` (scalar), but a `RaggedTensorSpec` can also represent a batch of
# boxed `RaggedTensor` objects with shape `(...)` (and batches of batches,
# etc.), so the flat shape must be unknown.
return [tensor_spec.TensorSpec(None, dtypes.variant)]
def _to_tensor_list(self, value):
# TODO(edloper): Update gen_ragged_conversion_ops that convert to and
# from variant to include all of the row-partitioning tensors.
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError(f"Ragged rank of value {ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
if ragged_rank == 0:
return [
gen_ragged_conversion_ops.ragged_tensor_to_variant(
(), value, batched_input=False)
]
# pylint: disable=protected-access
return [value._to_variant(batched_input=False)]
def _to_batched_tensor_list(self, value):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
ragged_rank = value.ragged_rank if isinstance(value, RaggedTensor) else 0
if ragged_rank != self._ragged_rank:
raise ValueError(f"Ragged rank of value {ragged_rank} does not match "
f"ragged rank of type {self._ragged_rank}.")
if ragged_rank == 0:
# TODO(b/141789000) Update this to handle ragged_rank=0.
raise ValueError(
"_to_batched_tensor_list doesn't support ragged_rank=0 yet")
return [value._to_variant(batched_input=True)]
def _from_compatible_tensor_list(self, tensor_list):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
if self._ragged_rank < 0:
raise ValueError(f"Argument `ragged_rank` must be non-negative. "
f"Received {self._ragged_rank}.")
result = RaggedTensor._from_variant(
tensor_list[0],
dtype=self._dtype,
row_splits_dtype=self._row_splits_dtype,
output_ragged_rank=self._ragged_rank)
if self._shape.ndims is not None:
if isinstance(result, RaggedTensor):
result._set_shape(self._shape)
if self.flat_values_spec is not None and hasattr(self.flat_values,
"set_shape"):
result.flat_values.set_shape(self.flat_values_spec.shape)
else:
result.set_shape(self._shape)
return result
def _batch(self, batch_size):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
return RaggedTensorSpec(
tensor_shape.TensorShape([batch_size]).concatenate(self._shape),
self._dtype, self._ragged_rank + 1, self._row_splits_dtype)
def _unbatch(self):
if self._flat_values_spec is not None:
raise ValueError("Customized value_type is not supported.")
# Note: Negative ragged_rank is allowed here because the dataset could be
# subsequently batched again. If ragged_rank > 1, assume row_splits_dtype is
# consistent. Errors are handled in
# RaggedTensorSpec._from_compatible_tensor_list()
return RaggedTensorSpec(self._shape[1:], self._dtype, self._ragged_rank - 1,
self._row_splits_dtype)
def _to_legacy_output_types(self):
return self._dtype
def _to_legacy_output_shapes(self):
return self._shape
def _to_legacy_output_classes(self):
return self
@classmethod
def from_value(cls, value):
if (isinstance(value, ragged_tensor_value.RaggedTensorValue) or
isinstance(value.flat_values, ops.Tensor)):
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype)
else:
return cls(
shape=value.shape,
dtype=value.values.dtype,
ragged_rank=value.ragged_rank,
row_splits_dtype=value.row_splits.dtype,
flat_values_spec=type_spec.type_spec_from_value(value.flat_values))
type_spec.register_type_spec_from_value_converter(
ragged_tensor_value.RaggedTensorValue, RaggedTensorSpec.from_value)
#===============================================================================
# Convert value -> tensor
#===============================================================================
def convert_to_tensor_or_ragged_tensor(value,
dtype=None,
preferred_dtype=None,
name=None):
if isinstance(value, RaggedTensor):
if dtype and not dtype.is_compatible_with(value.dtype):
raise ValueError(f"Tensor conversion requested dtype {dtype.name} for "
f"RaggedTensor with dtype {value.dtype.name}: {value}.")
return value
elif isinstance(value, ragged_tensor_value.RaggedTensorValue):
with ops.name_scope(name, "ConvertToTensorOrRaggedTensor", []):
flat_values = ops.convert_to_tensor(
value=value.flat_values,
dtype=dtype,
dtype_hint=preferred_dtype,
name="flat_values")
return RaggedTensor.from_nested_row_splits(
flat_values, value.nested_row_splits, validate=False)
else:
return ops.convert_to_tensor_v2_with_dispatch(
value=value, dtype=dtype, dtype_hint=preferred_dtype, name=name)
def _convert_to_ragged_tensor_values(value):
if _is_supported_ragged_values_type(value):
return value
else:
return convert_to_tensor_or_ragged_tensor(value, name="values")
#===============================================================================
# Register RaggedTensor for use with session.run.
#===============================================================================
def _ragged_tensor_value_from_components(components):
components = list(components)
value = components.pop()
while components:
value = ragged_tensor_value.RaggedTensorValue(value, components.pop())
return value
def _ragged_tensor_session_fetch(rt):
components = rt.nested_row_splits + (rt.flat_values,)
return (components, _ragged_tensor_value_from_components)
def _ragged_tensor_session_feed(feed_key, feed_val):
key_components = feed_key.nested_row_splits + (feed_key.flat_values,)
val_components = feed_val.nested_row_splits + (feed_val.flat_values,)
return zip(key_components, val_components)
def _ragged_tensor_session_feed_for_partial_run(feed_key):
return feed_key.nested_row_splits + (feed_key.flat_values,)
session.register_session_run_conversion_functions(
RaggedTensor, _ragged_tensor_session_fetch, _ragged_tensor_session_feed,
_ragged_tensor_session_feed_for_partial_run)
#===============================================================================
# RaggedTensorType
#===============================================================================
class RaggedTensorType:
def __init__(self, dtype, ragged_rank, row_splits_dtype=dtypes.int64):
row_splits_dtype = dtypes.as_dtype(row_splits_dtype)
self._dtype = dtype
self._ragged_rank = ragged_rank
self._row_splits_dtype = row_splits_dtype
dtype = property(lambda self: self._dtype)
ragged_rank = property(lambda self: self._ragged_rank)
row_splits_dtype = property(lambda self: self._row_splits_dtype)
def __repr__(self):
return "RaggedTensorType(%r, %r, %r)" % (self.dtype, self.ragged_rank,
self.row_splits_dtype)
#===============================================================================
# Helper Functions
#===============================================================================
def _assert_sparse_indices_are_ragged_right(indices):
index_prefix = indices[:, :-1]
index_suffix = indices[:, -1]
# Check whether each index is starting a new row in the innermost dimension
# (prefix[i] != prefix[i-1]) or continuing a row (prefix[i] == prefix[i-1]).
# (Note: this skips the first index; we will check that separately below.)
index_prefix_changed = math_ops.reduce_any(
math_ops.not_equal(index_prefix[1:], index_prefix[:-1]), axis=1)
# Check two cases:
# * For indices that start a new row: index_suffix[i] must be zero.
# * For indices that continue a row: index_suffix[i] must be equal to
# index_suffix[i-1]+1.
index_ok = array_ops.where(
index_prefix_changed, math_ops.equal(index_suffix[1:], 0),
math_ops.equal(index_suffix[1:], index_suffix[:-1] + 1))
# Also check that the very first index didn't skip any cells. The first
sparse_indices_are_ragged_right = math_ops.logical_and(
math_ops.reduce_all(math_ops.equal(index_suffix[:1], 0)),
math_ops.reduce_all(index_ok))
message = [
"SparseTensor is not right-ragged", "SparseTensor.indices =", indices
]
return [control_flow_ops.Assert(sparse_indices_are_ragged_right, message)]
@ops.RegisterGradient("RaggedTensorToSparse")
def _ragged_tensor_to_sparse_gradient(op, unused_sparse_indices_grad,
sparse_values_grad,
unused_sparse_shape_grad):
op_inputs_nested_row_splits = op.inputs[:-1]
op_inputs_flat_values = op.inputs[-1]
nested_row_splits_gradient = [None] * len(op_inputs_nested_row_splits)
# Gradient for the RaggedTensor's flat_values is formed by reshaping
flat_values_shape = array_ops.shape(op_inputs_flat_values)
flat_values_gradient = array_ops.reshape(sparse_values_grad,
flat_values_shape)
return nested_row_splits_gradient + [flat_values_gradient]
def _assert_monotonic_increasing(tensor, message=None):
return check_ops.assert_non_negative(
tensor[1:] - tensor[:-1], message=message)
def _assert_zero(tensor, message=None):
return check_ops.assert_equal(
tensor, constant_op.constant(0, dtype=tensor.dtype), message=message)
def _nrows(tensor, out_type=dtypes.int32):
if isinstance(tensor, RaggedTensor):
return tensor.nrows(out_type=out_type)
else:
return array_ops.shape(tensor, out_type=out_type)[0]
def merge_dims(value, outer_axis, inner_axis):
if outer_axis == inner_axis:
return value
# Flatten outer dimensions of a RaggedTensor by just taking its values.
while outer_axis == 0 and isinstance(value, RaggedTensor):
value = value.values
inner_axis -= 1
if inner_axis == 0:
return value
# Flatten non-Ragged tensors using tf.reshape().
if not isinstance(value, RaggedTensor):
if value.shape.is_fully_defined():
old_shape = value.shape.as_list()
new_shape = old_shape[:outer_axis] + [-1] + old_shape[inner_axis + 1:]
else:
old_shape = array_ops.shape(value)
new_shape = array_ops.concat(
[old_shape[:outer_axis], [-1], old_shape[inner_axis + 1:]], axis=0)
return array_ops.reshape(value, new_shape)
# Handle outer_axis>1 via recursion.
if outer_axis > 1:
return value.with_values(
merge_dims(value.values, outer_axis - 1, inner_axis - 1))
# At this point, we know outer_axis == 1, and value is a RaggedTensor.
# So we need to flatten the values and build a corresponding splits tensor.
new_values = value.values
new_splits = value.row_splits
for axis in range(outer_axis, inner_axis):
if isinstance(new_values, RaggedTensor):
# Flatten a single ragged dimension.
new_splits = array_ops.gather(new_values.row_splits, new_splits)
new_values = new_values.values
else:
# Flatten all remaining dense dimensions.
shape_split = inner_axis - axis + 1
if new_values.shape.is_fully_defined():
old_shape = new_values.shape.as_list()
new_shape = [-1] + old_shape[shape_split:]
flat_size = _prod(old_shape[1:shape_split])
else:
old_shape = array_ops.shape(new_values)
new_shape = array_ops.concat([[-1], old_shape[shape_split:]], axis=0)
flat_size = math_ops.cast(
math_ops.reduce_prod(old_shape[1:shape_split]), new_splits.dtype)
new_values = array_ops.reshape(new_values, new_shape)
new_splits = new_splits * flat_size
break
return RaggedTensor.from_row_splits(new_values, new_splits)
def _prod(lst):
return functools.reduce(operator.mul, lst, 1)
def _get_row_partition_type_tensor_pairs_tail(partition):
if partition.has_precomputed_value_rowids():
return ("VALUE_ROWIDS", partition.value_rowids())
else:
return ("ROW_SPLITS", partition.row_splits())
def _get_row_partition_type_tensor_pairs(rt_input):
partitions = rt_input._nested_row_partitions # pylint: disable=protected-access
tail = [_get_row_partition_type_tensor_pairs_tail(x) for x in partitions[1:]]
if partitions[0]._value_rowids is not None: # pylint: disable=protected-access
return [("FIRST_DIM_SIZE", partitions[0].nrows()),
("VALUE_ROWIDS", partitions[0].value_rowids())] + tail
else:
return [("ROW_SPLITS", partitions[0].row_splits())] + tail
def _shape_as_tensor(shape, dtype):
if dtype != dtypes.int64 and dtype != dtypes.int32:
raise ValueError(f"Expected int64 or int32 for dtype: got {dtype}.")
if isinstance(shape, ops.Tensor):
if shape.dtype != dtypes.int64 and shape.dtype != dtypes.int32:
return math_ops.cast(shape, dtype)
return shape
shape = tensor_shape.as_shape(shape)
if not shape:
# Imply rank is unknown using a -1 scalar.
return constant_op.constant(-1, dtype=dtype)
shape = [(-1 if x is None else x) for x in shape.as_list()]
# At this point, shape is List[Int].
return constant_op.constant(shape, dtype=dtype)
def _nvals_uniform_row_length(values, uniform_row_length):
const_nvals = tensor_shape.dimension_at_index(values.shape, 0).value
if const_nvals is not None:
nvals = constant_op.constant(const_nvals, uniform_row_length.dtype)
elif isinstance(values, RaggedTensor):
nvals = values.nrows(out_type=uniform_row_length.dtype)
else:
nvals = array_ops.shape(values, out_type=uniform_row_length.dtype)[0]
return nvals
def _get_optional_partition_dtype(values):
if isinstance(values, RaggedTensor):
# pylint: disable=protected-access
return values._row_partition.dtype
return None
_SUPPORTED_RAGGED_VALUE_TYPES = (ops.Tensor, RaggedTensor)
# TODO(edloper): Consider whether we should change the registry to be on
# TypeSpecs rather than ValueTypes.
def _add_supported_value_type(cls):
if not issubclass(cls, composite_tensor.CompositeTensor):
raise ValueError(f"cls ({cls}) must be a subclass of CompositeTensor.")
if not hasattr(cls, "shape"):
raise ValueError("cls must support the `shape` property.")
if not hasattr(cls, "dtype"):
raise ValueError("cls must support the `dtype` property.")
global _SUPPORTED_RAGGED_VALUE_TYPES
_SUPPORTED_RAGGED_VALUE_TYPES += (cls,)
def _is_supported_ragged_values_type(value):
return isinstance(value, _SUPPORTED_RAGGED_VALUE_TYPES)
def _assert_is_supported_ragged_values_type(value):
if not _is_supported_ragged_values_type(value):
ok_types = ", ".join(cls.__name__ for cls in _SUPPORTED_RAGGED_VALUE_TYPES)
raise TypeError(f"type(values) must be one of: {ok_types}, got {value}.")
def _formatter(x):
if isinstance(x, np.ndarray):
return np.array2string(x, separator=", ")
else:
return str(x)
# Type annotation indicating that a value is ragged. Includes RaggedTensor
# as well as the (deprecated) RaggedTensorValue class from TF 1.x.
Ragged = typing.Union[RaggedTensor, ragged_tensor_value.RaggedTensorValue]
# Type annotation indicating that a value is a ragged tensor, a dense tensor,
# or a value that can be converted to a tensor (e.g. np.array).
# TODO(edloper): Add Variable to TensorLike, and remove it from here.
RaggedOrDense = typing.Union[Ragged, core_types.TensorLike]
| true | true |
f7f80656cbe77cd51aa948310feab649c559e78c | 5,548 | py | Python | tests/runtimes/test_base.py | jillnogold/mlrun | beff7da359b697156890e4eb45cb9a1bc9f16631 | [
"Apache-2.0"
] | null | null | null | tests/runtimes/test_base.py | jillnogold/mlrun | beff7da359b697156890e4eb45cb9a1bc9f16631 | [
"Apache-2.0"
] | null | null | null | tests/runtimes/test_base.py | jillnogold/mlrun | beff7da359b697156890e4eb45cb9a1bc9f16631 | [
"Apache-2.0"
] | null | null | null | import base64
import json
import os
import pytest
import mlrun.errors
from mlrun.config import config as mlconf
from mlrun.runtimes import KubejobRuntime
from mlrun.runtimes.pod import AutoMountType
class TestAutoMount:
def setup_method(self, method):
# set auto-mount to work as if this is an Iguazio system (otherwise it may try to mount PVC)
mlconf.igz_version = "1.1.1"
mlconf.storage.auto_mount_type = "auto"
mlconf.storage.auto_mount_params = ""
self.project = "test-project"
self.name = "test-function"
self.image_name = "mlrun/mlrun:latest"
self.artifact_path = "/tmp"
os.environ["V3IO_ACCESS_KEY"] = self.v3io_access_key = "1111-2222-3333-4444"
os.environ["V3IO_USERNAME"] = self.v3io_user = "test-user"
def _generate_runtime(self, disable_auto_mount=False):
runtime = KubejobRuntime()
runtime.spec.image = self.image_name
runtime.spec.disable_auto_mount = disable_auto_mount
return runtime
def _execute_run(self, runtime):
runtime.run(
name=self.name,
project=self.project,
artifact_path=self.artifact_path,
watch=False,
)
@pytest.mark.parametrize("cred_only", [True, False])
def test_auto_mount_v3io(self, cred_only, rundb_mock):
mlconf.storage.auto_mount_type = (
"v3io_credentials" if cred_only else "v3io_fuse"
)
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_v3io_mount_or_creds_configured(
self.v3io_user, self.v3io_access_key, cred_only=cred_only
)
# Check that disable-auto-mount works. Need a fresh runtime, to reset its mount-applied indication.
rundb_mock.reset()
runtime = self._generate_runtime(disable_auto_mount=True)
self._execute_run(runtime)
rundb_mock.assert_no_mount_or_creds_configured()
def test_fill_credentials(self, rundb_mock):
os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
] = "some-access-key"
runtime = self._generate_runtime()
self._execute_run(runtime)
assert (
runtime.metadata.credentials.access_key
== os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
]
)
del os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
]
def test_auto_mount_invalid_value(self):
# When invalid value is used, we explode
mlconf.storage.auto_mount_type = "something_wrong"
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
# When it's missing, we just use auto
mlconf.storage.auto_mount_type = None
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
assert auto_mount_type == AutoMountType.auto
@staticmethod
def _setup_pvc_mount():
mlconf.storage.auto_mount_type = "pvc"
return {
"pvc_name": "test_pvc",
"volume_name": "test_volume",
"volume_mount_path": "/mnt/test/path",
}
def test_run_with_automount_pvc(self, rundb_mock):
pvc_params = self._setup_pvc_mount()
# Verify that extra parameters get filtered out
pvc_params["invalid_param"] = "blublu"
# Try with a simple string
pvc_params_str = ",".join(
[f"{key}={value}" for key, value in pvc_params.items()]
)
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
rundb_mock.reset()
# Try with a base64 json dictionary
pvc_params_str = base64.b64encode(json.dumps(pvc_params).encode())
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
# Try with disable-auto-mount
rundb_mock.reset()
runtime = self._generate_runtime(disable_auto_mount=True)
self._execute_run(runtime)
rundb_mock.assert_no_mount_or_creds_configured()
# Try something that does not translate to a dictionary
bad_params_str = base64.b64encode(
json.dumps(["I'm", "not", "a", "dictionary"]).encode()
)
mlconf.storage.auto_mount_params = bad_params_str
with pytest.raises(TypeError):
mlconf.get_storage_auto_mount_params()
def test_auto_mount_function_with_pvc_config(self, rundb_mock):
pvc_params = self._setup_pvc_mount()
pvc_params_str = base64.b64encode(json.dumps(pvc_params).encode())
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
runtime.apply(mlrun.auto_mount())
assert runtime.spec.disable_auto_mount
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
os.environ.pop("V3IO_ACCESS_KEY", None)
# This won't work if mount type is not pvc
mlconf.storage.auto_mount_type = "auto"
with pytest.raises(
ValueError, match="failed to auto mount, need to set env vars"
):
runtime.apply(mlrun.auto_mount())
| 34.893082 | 107 | 0.665826 | import base64
import json
import os
import pytest
import mlrun.errors
from mlrun.config import config as mlconf
from mlrun.runtimes import KubejobRuntime
from mlrun.runtimes.pod import AutoMountType
class TestAutoMount:
def setup_method(self, method):
mlconf.igz_version = "1.1.1"
mlconf.storage.auto_mount_type = "auto"
mlconf.storage.auto_mount_params = ""
self.project = "test-project"
self.name = "test-function"
self.image_name = "mlrun/mlrun:latest"
self.artifact_path = "/tmp"
os.environ["V3IO_ACCESS_KEY"] = self.v3io_access_key = "1111-2222-3333-4444"
os.environ["V3IO_USERNAME"] = self.v3io_user = "test-user"
def _generate_runtime(self, disable_auto_mount=False):
runtime = KubejobRuntime()
runtime.spec.image = self.image_name
runtime.spec.disable_auto_mount = disable_auto_mount
return runtime
def _execute_run(self, runtime):
runtime.run(
name=self.name,
project=self.project,
artifact_path=self.artifact_path,
watch=False,
)
@pytest.mark.parametrize("cred_only", [True, False])
def test_auto_mount_v3io(self, cred_only, rundb_mock):
mlconf.storage.auto_mount_type = (
"v3io_credentials" if cred_only else "v3io_fuse"
)
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_v3io_mount_or_creds_configured(
self.v3io_user, self.v3io_access_key, cred_only=cred_only
)
rundb_mock.reset()
runtime = self._generate_runtime(disable_auto_mount=True)
self._execute_run(runtime)
rundb_mock.assert_no_mount_or_creds_configured()
def test_fill_credentials(self, rundb_mock):
os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
] = "some-access-key"
runtime = self._generate_runtime()
self._execute_run(runtime)
assert (
runtime.metadata.credentials.access_key
== os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
]
)
del os.environ[
mlrun.runtimes.constants.FunctionEnvironmentVariables.auth_session
]
def test_auto_mount_invalid_value(self):
mlconf.storage.auto_mount_type = "something_wrong"
with pytest.raises(mlrun.errors.MLRunInvalidArgumentError):
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
mlconf.storage.auto_mount_type = None
auto_mount_type = AutoMountType(mlconf.storage.auto_mount_type)
assert auto_mount_type == AutoMountType.auto
@staticmethod
def _setup_pvc_mount():
mlconf.storage.auto_mount_type = "pvc"
return {
"pvc_name": "test_pvc",
"volume_name": "test_volume",
"volume_mount_path": "/mnt/test/path",
}
def test_run_with_automount_pvc(self, rundb_mock):
pvc_params = self._setup_pvc_mount()
# Verify that extra parameters get filtered out
pvc_params["invalid_param"] = "blublu"
# Try with a simple string
pvc_params_str = ",".join(
[f"{key}={value}" for key, value in pvc_params.items()]
)
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
rundb_mock.reset()
# Try with a base64 json dictionary
pvc_params_str = base64.b64encode(json.dumps(pvc_params).encode())
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
# Try with disable-auto-mount
rundb_mock.reset()
runtime = self._generate_runtime(disable_auto_mount=True)
self._execute_run(runtime)
rundb_mock.assert_no_mount_or_creds_configured()
# Try something that does not translate to a dictionary
bad_params_str = base64.b64encode(
json.dumps(["I'm", "not", "a", "dictionary"]).encode()
)
mlconf.storage.auto_mount_params = bad_params_str
with pytest.raises(TypeError):
mlconf.get_storage_auto_mount_params()
def test_auto_mount_function_with_pvc_config(self, rundb_mock):
pvc_params = self._setup_pvc_mount()
pvc_params_str = base64.b64encode(json.dumps(pvc_params).encode())
mlconf.storage.auto_mount_params = pvc_params_str
runtime = self._generate_runtime()
runtime.apply(mlrun.auto_mount())
assert runtime.spec.disable_auto_mount
self._execute_run(runtime)
rundb_mock.assert_pvc_mount_configured(pvc_params)
os.environ.pop("V3IO_ACCESS_KEY", None)
mlconf.storage.auto_mount_type = "auto"
with pytest.raises(
ValueError, match="failed to auto mount, need to set env vars"
):
runtime.apply(mlrun.auto_mount())
| true | true |
f7f806f6320e69b58b10203ee8f6c15991f14131 | 1,032 | py | Python | src/lambda_codebase/account/handler.py | ottokruse/aws-deployment-framework | b8fa70f792ee36af137edd23c35c86db53999497 | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/account/handler.py | ottokruse/aws-deployment-framework | b8fa70f792ee36af137edd23c35c86db53999497 | [
"Apache-2.0"
] | null | null | null | src/lambda_codebase/account/handler.py | ottokruse/aws-deployment-framework | b8fa70f792ee36af137edd23c35c86db53999497 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
"""
The Account handler that is called when ADF is installed to initially create the deployment account if required
"""
try:
from main import lambda_handler # pylint: disable=unused-import
except Exception as err: # pylint: disable=broad-except
from urllib.request import Request, urlopen
import json
def lambda_handler(event, _context, prior_error=err):
response = dict(
LogicalResourceId=event["LogicalResourceId"],
PhysicalResourceId=event.get("PhysicalResourceId", "NOT_YET_CREATED"),
Status="FAILED",
RequestId=event["RequestId"],
StackId=event["StackId"],
Reason=str(prior_error),
)
urlopen(
Request(
event["ResponseURL"],
data=json.dumps(response).encode(),
headers={"content-type": ""},
method="PUT",
)
)
| 33.290323 | 111 | 0.61531 |
try:
from main import lambda_handler
except Exception as err:
from urllib.request import Request, urlopen
import json
def lambda_handler(event, _context, prior_error=err):
response = dict(
LogicalResourceId=event["LogicalResourceId"],
PhysicalResourceId=event.get("PhysicalResourceId", "NOT_YET_CREATED"),
Status="FAILED",
RequestId=event["RequestId"],
StackId=event["StackId"],
Reason=str(prior_error),
)
urlopen(
Request(
event["ResponseURL"],
data=json.dumps(response).encode(),
headers={"content-type": ""},
method="PUT",
)
)
| true | true |
f7f807b5891545a7d194856b5f69bf83b6db5cbb | 949 | py | Python | nb_third_party/jinja2/_ipysupport.py | djprmf/namebench | ffbd716edf957d3788390444d2ef475d8828391a | [
"Apache-2.0"
] | 460 | 2016-01-13T12:49:34.000Z | 2022-02-20T04:10:40.000Z | nb_third_party/jinja2/_ipysupport.py | djprmf/namebench | ffbd716edf957d3788390444d2ef475d8828391a | [
"Apache-2.0"
] | 24 | 2016-11-07T04:59:49.000Z | 2022-03-14T06:34:12.000Z | nb_third_party/jinja2/_ipysupport.py | djprmf/namebench | ffbd716edf957d3788390444d2ef475d8828391a | [
"Apache-2.0"
] | 148 | 2016-01-17T03:16:43.000Z | 2022-03-17T12:20:36.000Z | # -*- coding: utf-8 -*-
"""
jinja2._ipysupport
~~~~~~~~~~~~~~~~~~
IronPython support library. This library exports functionality from
the CLR to Python that is normally available in the standard library.
:copyright: (c) 2009 by the Jinja Team.
:license: BSD.
"""
from System import DateTime
from System.IO import Path, File, FileInfo
epoch = DateTime(1970, 1, 1)
class _PathModule(object):
"""A minimal path module."""
sep = str(Path.DirectorySeparatorChar)
altsep = str(Path.AltDirectorySeparatorChar)
pardir = '..'
def join(self, path, *args):
args = list(args[::-1])
while args:
path = Path.Combine(path, args.pop())
return path
def isfile(self, filename):
return File.Exists(filename)
def getmtime(self, filename):
info = FileInfo(filename)
return int((info.LastAccessTimeUtc - epoch).TotalSeconds)
path = _PathModule()
| 23.146341 | 73 | 0.633298 |
from System import DateTime
from System.IO import Path, File, FileInfo
epoch = DateTime(1970, 1, 1)
class _PathModule(object):
sep = str(Path.DirectorySeparatorChar)
altsep = str(Path.AltDirectorySeparatorChar)
pardir = '..'
def join(self, path, *args):
args = list(args[::-1])
while args:
path = Path.Combine(path, args.pop())
return path
def isfile(self, filename):
return File.Exists(filename)
def getmtime(self, filename):
info = FileInfo(filename)
return int((info.LastAccessTimeUtc - epoch).TotalSeconds)
path = _PathModule()
| true | true |
f7f809f35afb3f02db78e329db8de0b62d5678e0 | 910 | py | Python | seam_erasure/points_in_triangle.py | zfergus2/Seam-Minimization-Service | 2cfce0e4e29d3b40b0b8626994a6e6b89d5303cd | [
"MIT"
] | 98 | 2017-08-09T01:10:33.000Z | 2022-02-25T14:31:06.000Z | seam_erasure/points_in_triangle.py | zfergus2/Seam-Minimization-Service | 2cfce0e4e29d3b40b0b8626994a6e6b89d5303cd | [
"MIT"
] | 2 | 2019-10-10T17:50:12.000Z | 2020-01-26T11:30:33.000Z | seam_erasure/points_in_triangle.py | zfergus2/Seam-Minimization-Service | 2cfce0e4e29d3b40b0b8626994a6e6b89d5303cd | [
"MIT"
] | 16 | 2017-09-15T09:08:37.000Z | 2021-02-07T22:44:58.000Z | """
Utility file for testing if points are in a given triangle.
Written by Zachary Ferguson
"""
import numpy
def points_in_triangle(tri, points, tol=1e-8):
"""
Test if the points are inside the triangle.
Input:
tri - the triangle as a matrix where the rows are the xy points.
points - the points as a matrix where the rows are the xy points.
Returns a vector of boolean values.
"""
# B is the transformation from xy to barycentric coordinates
B = numpy.vstack([tri.T, numpy.ones(3)])
vecs = numpy.vstack([points.T, numpy.ones((1, points.shape[0]))])
# Convert the grid from XY locations to barycentric coordinates.
# This will only fail of the triangle is degenerate.
try:
coords = numpy.linalg.solve(B, vecs)
except Exception:
return numpy.zeros(points.shape[0]).astype(bool)
return numpy.all(coords >= -tol, axis=0)
| 28.4375 | 73 | 0.672527 |
import numpy
def points_in_triangle(tri, points, tol=1e-8):
B = numpy.vstack([tri.T, numpy.ones(3)])
vecs = numpy.vstack([points.T, numpy.ones((1, points.shape[0]))])
try:
coords = numpy.linalg.solve(B, vecs)
except Exception:
return numpy.zeros(points.shape[0]).astype(bool)
return numpy.all(coords >= -tol, axis=0)
| true | true |
f7f80a29f3606c4eea4afc31cd571abf27025986 | 860 | py | Python | tests/big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/test_big_map_ooP8uo.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2021-05-20T16:52:08.000Z | 2021-05-20T16:52:08.000Z | tests/big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/test_big_map_ooP8uo.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2020-12-30T16:44:56.000Z | 2020-12-30T16:44:56.000Z | tests/big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/test_big_map_ooP8uo.py | juztin/pytezos-1 | 7e608ff599d934bdcf129e47db43dbdb8fef9027 | [
"MIT"
] | 1 | 2022-03-20T19:01:00.000Z | 2022-03-20T19:01:00.000Z | from unittest import TestCase
from tests import get_data
from pytezos.michelson.contract import ContractStorage
class BigMapCodingTestooP8uo(TestCase):
def setUp(self):
self.maxDiff = None
def test_big_map_ooP8uo(self):
section = get_data(
path='big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/storage_section.json')
storage = ContractStorage(section)
big_map_diff = get_data(
path='big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/big_map_diff.json')
expected = [
dict(key=item['key'], value=item.get('value'))
for item in big_map_diff
]
big_map = storage.big_map_diff_decode(expected)
actual = storage.big_map_diff_encode(big_map)
self.assertEqual(expected, actual)
| 31.851852 | 105 | 0.683721 | from unittest import TestCase
from tests import get_data
from pytezos.michelson.contract import ContractStorage
class BigMapCodingTestooP8uo(TestCase):
def setUp(self):
self.maxDiff = None
def test_big_map_ooP8uo(self):
section = get_data(
path='big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/storage_section.json')
storage = ContractStorage(section)
big_map_diff = get_data(
path='big_map_diff/ooP8uoN2eMv1dcC8wdMgKvgHJBuQYARFXH4KMURfJdGgMZofC79/big_map_diff.json')
expected = [
dict(key=item['key'], value=item.get('value'))
for item in big_map_diff
]
big_map = storage.big_map_diff_decode(expected)
actual = storage.big_map_diff_encode(big_map)
self.assertEqual(expected, actual)
| true | true |
f7f80aa49a5b169520d0cefb8838f618904c93d2 | 8,607 | py | Python | preprocessing/her2_split_and_imbalance.py | minotm/NTA | cc8aba4ea46fe40ce621f1314d9798f54de41d5b | [
"MIT"
] | 1 | 2022-03-24T08:30:30.000Z | 2022-03-24T08:30:30.000Z | preprocessing/her2_split_and_imbalance.py | minotm/NTA | cc8aba4ea46fe40ce621f1314d9798f54de41d5b | [
"MIT"
] | null | null | null | preprocessing/her2_split_and_imbalance.py | minotm/NTA | cc8aba4ea46fe40ce621f1314d9798f54de41d5b | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created 2022
@author: Mason Minot
"""
from Levenshtein import distance as levenshtein_distance
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
print('Now Executing Trastuzumab Train/Val/Test Splitting...')
"""
This script serves to split the trastuzumab (her2) data into training, validation, and testing sets. The train
and validation sets are selected to be an edit distance of 7 or less from the wild type trastuzumab. The
test set is an edit distance of 8 or greater from the wild type
Inputs
----------
labeled trastuzumab data from Mason et. al 2021 github repo: https://github.com/dahjan/DMS_opt
mHER_H3_AgPos.csv
mHER_H3_AgNeg.csv
Outputs
-------
csv files containing training, validation, and testing sets
"""
def add_LD_to_df(antigen_ID, data_frame):
'''
Function to add Edit Distance (Levenshtein Distance) from wt for each sequence to dataframe
Parameters
----------
antigen_ID : str
corresponds to the antigen identity. in this case, her2
data_frame : pandas.DataFrame
dataframe containing all sequence & label data
Returns
-------
data_frame : pandas.DataFrame
input dataframe with an added column containing the Levenshtein Distance from the wild type
for each sequence in the dataframe
'''
if antigen_ID == 'her2':
wt_str = 'WGGDGFYAMK'
LD_arr = []
for i in range(len(data_frame)):
LD_arr.append( levenshtein_distance(wt_str, data_frame['AASeq'].iloc[i]) )
data_frame['LD'] = LD_arr
return data_frame
def class_balance_binary(data_frame):
'''
Function to class balance dataset
Parameters
----------
data_frame : pandas.DataFrame
dataframe containing all sequence & label data
Returns
-------
data_frame : pandas.DataFrame
class balanced dataframe. number of positive examples is equal to the number of negatives
'''
positives = data_frame[data_frame['AgClass'] == 1].copy()
negatives = data_frame[data_frame['AgClass'] == 0].copy()
min_list = min([len(ls) for ls in [positives, negatives]])
positives = positives[: int(np.round(min_list))]
negatives = negatives[: int(np.round(min_list))]
return positives, negatives
her2_path_local = '../data/her2/'
pos = pd.read_csv(her2_path_local + 'mHER_H3_AgPos.csv')
neg = pd.read_csv(her2_path_local + 'mHER_H3_AgNeg.csv')
def combine_df_list_and_shuffle(df_list, keep = False):
'''
combines two dataframes, drops duplicates, & shuffles
Parameters
----------
data_frame : pandas.DataFrame
dataframe containing all sequence & label data
keep: bool
whether or not to keep duplicates
Returns
-------
data_frame : pandas.DataFrame
combined, shuffled dataframe
'''
frames = df_list
common_cols = list(set.intersection(*(set(df.columns) for df in frames)))
combined_df = pd.concat([df[common_cols] for df in frames], ignore_index=True).drop_duplicates(subset='AASeq', keep=keep)
np.random.seed(0)
combined_df = combined_df.reindex(np.random.permutation(combined_df.index))
return combined_df
all_data_frames = [pos.copy(), neg.copy()]
data_frame = combine_df_list_and_shuffle(all_data_frames, keep = False)
data_frame = add_LD_to_df('her2', data_frame)
selected_LD_split = 7
train_df = data_frame[data_frame['LD'] <= selected_LD_split]
test_df_initial = data_frame[data_frame['LD'] > selected_LD_split]
#Function to drop duplicates from two dataframes
def drop_test_seqs(train_df, test_df, seq_name):
'''
Function serves as a check to prevent dataleakage between training & test or training & val sets
Parameters
----------
train_df : pandas.DataFrame
train dataframe
test_df : pandas.DataFrame
test dataframe
seq_name : str
corresponds to the dataframe column name containing sequences.
Returns
-------
out_df : TYPE
train dataframe without test sequences
'''
train_df = train_df.copy()
train_df['df'] = 'train'
test_df_copy = test_df.copy()
test_df_copy['df'] = 'test'
frames = [train_df.copy(),test_df_copy]
common_cols = list(set.intersection(*(set(df.columns) for df in frames)))
concat_df = pd.concat([df[common_cols] for df in frames], ignore_index=True)
concat_df = concat_df.drop_duplicates(subset=[seq_name],keep=False)
out_df = concat_df[concat_df['df'] == 'train']
return out_df
train_df = drop_test_seqs(train_df, test_df_initial, 'AASeq')
def drop_and_rename_columns(df):
df = df.copy()
df = df.rename(columns = {'AASeq': 'aaseq', 'AgClass': 'target'})
df = df.drop(columns = ['Unnamed: 0', 'Fraction', 'NucSeq', 'Count', 'df'])
return df
#Balance test set & save to csv
test_df = test_df_initial.copy()
test_df['df'] = 'test' #add to df to facilitate using the function below
test_df = drop_and_rename_columns(test_df)
test_positives = test_df[test_df['target'] == 1]
test_negs = test_df[test_df['target'] == 0].sample(n = int(len(test_positives)), random_state = 1)
test_df = test_positives.append(test_negs,ignore_index = True)
test_df = test_df.reindex(np.random.permutation(test_df.index))
out_path = '../data/her2/'
test_df.to_csv(out_path + 'her2_test.csv', index=False)
train_df = drop_and_rename_columns(train_df)
#Class balance full training data set & shuffle dataframe
initial_train_pos = train_df[train_df['target'] == 1]
initial_train_neg = train_df[train_df['target'] == 0]
initial_train_neg = initial_train_neg[initial_train_neg['LD'] != 3] #drop the single LD 3 seq from df. required for sklearn train test stratifying later in script
initial_train_pos = initial_train_pos[initial_train_pos['LD'] != 2] #drop the two LD 2 seq from df. required for sklearn train test stratifying later in script
minlen = min([len(initial_train_pos),len(initial_train_neg) ])
initial_train_pos = initial_train_pos.sample(n = minlen, random_state = 1)
initial_train_neg = initial_train_neg.sample(n = minlen, random_state = 1)
train_df = pd.DataFrame()
train_df = initial_train_pos.append(initial_train_neg, ignore_index = True)
train_df = train_df.sample(n = int(len(train_df)), random_state = 1)
#Batch training & val sets with different quantities of class imbalance using positives as the minority class
class_imbalance_qty_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0]
train_df_master_copy = train_df.copy()
for imbal_qty in class_imbalance_qty_list:
#artificially increase class imbalance in training set by downsampling positives
train_positives = train_df_master_copy[train_df_master_copy['target'] == 1]
train_negs = train_df_master_copy[train_df_master_copy['target'] == 0]
#new downsampling method using sklearn & edit distance
if imbal_qty != 1.0:
train_positives, x_discard, y_train, y_discard = train_test_split(train_positives, train_positives['target'], test_size = 1 - imbal_qty,
random_state = 1, shuffle = True, stratify = train_positives['LD'])
elif imbal_qty == 1.0:
train_truncated = train_positives
#split val set from training & maintain LD distribution per class
train_positives, val_positives, y_train, y_val = train_test_split(train_positives, train_positives['target'], test_size = 1 - 0.8,
random_state = 1, shuffle = True, stratify = train_positives['LD'])
train_negs, val_negs, y_train, y_val = train_test_split(train_negs, train_negs['target'], test_size = 1 - 0.8,
random_state = 1, shuffle = True, stratify = train_negs['LD'])
train_df = train_positives.append(train_negs,ignore_index = True)
train_df = train_df.reindex(np.random.permutation(train_df.index))
val_df = val_positives.append(val_negs,ignore_index = True)
val_df = val_df.reindex(np.random.permutation(val_df.index))
train_df = drop_test_seqs(train_df, val_df, 'aaseq')
train_df = train_df.drop(columns = ['df'])
train_df = train_df.drop(columns = ['LD'])
val_df = val_df.drop(columns = ['LD'])
out_str_train = out_path + 'her2_train_imbal_' + str(imbal_qty) + '.csv'
out_str_val = out_path + 'her2_val_imbal_' + str(imbal_qty) + '.csv'
train_df.to_csv(out_str_train, index=False)
val_df.to_csv(out_str_val, index=False)
| 37.585153 | 162 | 0.694551 |
from Levenshtein import distance as levenshtein_distance
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
print('Now Executing Trastuzumab Train/Val/Test Splitting...')
def add_LD_to_df(antigen_ID, data_frame):
if antigen_ID == 'her2':
wt_str = 'WGGDGFYAMK'
LD_arr = []
for i in range(len(data_frame)):
LD_arr.append( levenshtein_distance(wt_str, data_frame['AASeq'].iloc[i]) )
data_frame['LD'] = LD_arr
return data_frame
def class_balance_binary(data_frame):
positives = data_frame[data_frame['AgClass'] == 1].copy()
negatives = data_frame[data_frame['AgClass'] == 0].copy()
min_list = min([len(ls) for ls in [positives, negatives]])
positives = positives[: int(np.round(min_list))]
negatives = negatives[: int(np.round(min_list))]
return positives, negatives
her2_path_local = '../data/her2/'
pos = pd.read_csv(her2_path_local + 'mHER_H3_AgPos.csv')
neg = pd.read_csv(her2_path_local + 'mHER_H3_AgNeg.csv')
def combine_df_list_and_shuffle(df_list, keep = False):
frames = df_list
common_cols = list(set.intersection(*(set(df.columns) for df in frames)))
combined_df = pd.concat([df[common_cols] for df in frames], ignore_index=True).drop_duplicates(subset='AASeq', keep=keep)
np.random.seed(0)
combined_df = combined_df.reindex(np.random.permutation(combined_df.index))
return combined_df
all_data_frames = [pos.copy(), neg.copy()]
data_frame = combine_df_list_and_shuffle(all_data_frames, keep = False)
data_frame = add_LD_to_df('her2', data_frame)
selected_LD_split = 7
train_df = data_frame[data_frame['LD'] <= selected_LD_split]
test_df_initial = data_frame[data_frame['LD'] > selected_LD_split]
def drop_test_seqs(train_df, test_df, seq_name):
train_df = train_df.copy()
train_df['df'] = 'train'
test_df_copy = test_df.copy()
test_df_copy['df'] = 'test'
frames = [train_df.copy(),test_df_copy]
common_cols = list(set.intersection(*(set(df.columns) for df in frames)))
concat_df = pd.concat([df[common_cols] for df in frames], ignore_index=True)
concat_df = concat_df.drop_duplicates(subset=[seq_name],keep=False)
out_df = concat_df[concat_df['df'] == 'train']
return out_df
train_df = drop_test_seqs(train_df, test_df_initial, 'AASeq')
def drop_and_rename_columns(df):
df = df.copy()
df = df.rename(columns = {'AASeq': 'aaseq', 'AgClass': 'target'})
df = df.drop(columns = ['Unnamed: 0', 'Fraction', 'NucSeq', 'Count', 'df'])
return df
test_df = test_df_initial.copy()
test_df['df'] = 'test'
test_df = drop_and_rename_columns(test_df)
test_positives = test_df[test_df['target'] == 1]
test_negs = test_df[test_df['target'] == 0].sample(n = int(len(test_positives)), random_state = 1)
test_df = test_positives.append(test_negs,ignore_index = True)
test_df = test_df.reindex(np.random.permutation(test_df.index))
out_path = '../data/her2/'
test_df.to_csv(out_path + 'her2_test.csv', index=False)
train_df = drop_and_rename_columns(train_df)
initial_train_pos = train_df[train_df['target'] == 1]
initial_train_neg = train_df[train_df['target'] == 0]
initial_train_neg = initial_train_neg[initial_train_neg['LD'] != 3]
initial_train_pos = initial_train_pos[initial_train_pos['LD'] != 2]
minlen = min([len(initial_train_pos),len(initial_train_neg) ])
initial_train_pos = initial_train_pos.sample(n = minlen, random_state = 1)
initial_train_neg = initial_train_neg.sample(n = minlen, random_state = 1)
train_df = pd.DataFrame()
train_df = initial_train_pos.append(initial_train_neg, ignore_index = True)
train_df = train_df.sample(n = int(len(train_df)), random_state = 1)
class_imbalance_qty_list = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 0.95, 1.0]
train_df_master_copy = train_df.copy()
for imbal_qty in class_imbalance_qty_list:
train_positives = train_df_master_copy[train_df_master_copy['target'] == 1]
train_negs = train_df_master_copy[train_df_master_copy['target'] == 0]
if imbal_qty != 1.0:
train_positives, x_discard, y_train, y_discard = train_test_split(train_positives, train_positives['target'], test_size = 1 - imbal_qty,
random_state = 1, shuffle = True, stratify = train_positives['LD'])
elif imbal_qty == 1.0:
train_truncated = train_positives
train_positives, val_positives, y_train, y_val = train_test_split(train_positives, train_positives['target'], test_size = 1 - 0.8,
random_state = 1, shuffle = True, stratify = train_positives['LD'])
train_negs, val_negs, y_train, y_val = train_test_split(train_negs, train_negs['target'], test_size = 1 - 0.8,
random_state = 1, shuffle = True, stratify = train_negs['LD'])
train_df = train_positives.append(train_negs,ignore_index = True)
train_df = train_df.reindex(np.random.permutation(train_df.index))
val_df = val_positives.append(val_negs,ignore_index = True)
val_df = val_df.reindex(np.random.permutation(val_df.index))
train_df = drop_test_seqs(train_df, val_df, 'aaseq')
train_df = train_df.drop(columns = ['df'])
train_df = train_df.drop(columns = ['LD'])
val_df = val_df.drop(columns = ['LD'])
out_str_train = out_path + 'her2_train_imbal_' + str(imbal_qty) + '.csv'
out_str_val = out_path + 'her2_val_imbal_' + str(imbal_qty) + '.csv'
train_df.to_csv(out_str_train, index=False)
val_df.to_csv(out_str_val, index=False)
| true | true |
f7f80ad4ff5d6463387ea412e948855ae4b5eeef | 27,337 | py | Python | ingestion/tests/unit/test_source_connection.py | ShaileshParmar11/OpenMetadata | 994a83bb9598facd9b9a4575f42b5a3c4a212f8f | [
"Apache-2.0"
] | null | null | null | ingestion/tests/unit/test_source_connection.py | ShaileshParmar11/OpenMetadata | 994a83bb9598facd9b9a4575f42b5a3c4a212f8f | [
"Apache-2.0"
] | null | null | null | ingestion/tests/unit/test_source_connection.py | ShaileshParmar11/OpenMetadata | 994a83bb9598facd9b9a4575f42b5a3c4a212f8f | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Collate
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from metadata.generated.schema.entity.services.connections.database.athenaConnection import (
AthenaConnection,
AthenaScheme,
)
from metadata.generated.schema.entity.services.connections.database.clickhouseConnection import (
ClickhouseConnection,
ClickhouseScheme,
)
from metadata.generated.schema.entity.services.connections.database.databricksConnection import (
DatabricksConnection,
DatabricksScheme,
)
from metadata.generated.schema.entity.services.connections.database.db2Connection import (
Db2Connection,
Db2Scheme,
)
from metadata.generated.schema.entity.services.connections.database.druidConnection import (
DruidConnection,
DruidScheme,
)
from metadata.generated.schema.entity.services.connections.database.hiveConnection import (
HiveConnection,
HiveScheme,
)
from metadata.generated.schema.entity.services.connections.database.mariaDBConnection import (
MariaDBConnection,
MariaDBScheme,
)
from metadata.generated.schema.entity.services.connections.database.mssqlConnection import (
MssqlConnection,
MssqlScheme,
)
from metadata.generated.schema.entity.services.connections.database.mysqlConnection import (
MysqlConnection,
MySQLScheme,
)
from metadata.generated.schema.entity.services.connections.database.pinotDBConnection import (
PinotDBConnection,
PinotDBScheme,
)
from metadata.generated.schema.entity.services.connections.database.postgresConnection import (
PostgresConnection,
PostgresScheme,
)
from metadata.generated.schema.entity.services.connections.database.prestoConnection import (
PrestoConnection,
PrestoScheme,
)
from metadata.generated.schema.entity.services.connections.database.redshiftConnection import (
RedshiftConnection,
RedshiftScheme,
)
from metadata.generated.schema.entity.services.connections.database.singleStoreConnection import (
SingleStoreConnection,
SingleStoreScheme,
)
from metadata.generated.schema.entity.services.connections.database.snowflakeConnection import (
SnowflakeConnection,
SnowflakeScheme,
)
from metadata.generated.schema.entity.services.connections.database.trinoConnection import (
TrinoConnection,
TrinoScheme,
)
from metadata.generated.schema.entity.services.connections.database.verticaConnection import (
VerticaConnection,
VerticaScheme,
)
from metadata.generated.schema.security.credentials import awsCredentials
from metadata.utils.source_connections import get_connection_args, get_connection_url
class SouceConnectionTest(TestCase):
def test_databricks_url_without_db(self):
expected_result = (
"databricks+connector://token:KlivDTACWXKmZVfN1qIM@1.1.1.1:443"
)
databricks_conn_obj = DatabricksConnection(
scheme=DatabricksScheme.databricks_connector,
hostPort="1.1.1.1:443",
token="KlivDTACWXKmZVfN1qIM",
)
assert expected_result == get_connection_url(databricks_conn_obj)
def test_databricks_url_with_db(self):
expected_result = (
"databricks+connector://token:KlivDTACWXKmZVfN1qIM@1.1.1.1:443/default"
)
databricks_conn_obj = DatabricksConnection(
scheme=DatabricksScheme.databricks_connector,
hostPort="1.1.1.1:443",
token="KlivDTACWXKmZVfN1qIM",
database="default",
)
assert expected_result == get_connection_url(databricks_conn_obj)
def test_hive_url(self):
expected_result = "hive://localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive, hostPort="localhost:10000", database="default"
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_custom_auth(self):
expected_result = "hive://username:password@localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
username="username",
password="password",
hostPort="localhost:10000",
database="default",
connectionArguments={"auth": "CUSTOM"},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_with_kerberos_auth(self):
expected_result = "hive://localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
hostPort="localhost:10000",
database="default",
connectionArguments={
"auth": "KERBEROS",
"kerberos_service_name": "hive",
},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_with_ldap_auth(self):
expected_result = "hive://username:password@localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
username="username",
password="password",
hostPort="localhost:10000",
database="default",
connectionArguments={"auth": "LDAP"},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_trino_url_without_params(self):
expected_url = "trino://username:pass@localhost:443/catalog"
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
)
assert expected_url == get_connection_url(trino_conn_obj)
def test_trino_conn_arguments(self):
# connection arguments without connectionArguments and without proxies
expected_args = {}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments=None,
scheme=TrinoScheme.trino,
)
assert expected_args == get_connection_args(trino_conn_obj)
# connection arguments with connectionArguments and without proxies
expected_args = {"user": "user-to-be-impersonated"}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=TrinoScheme.trino,
)
assert expected_args == get_connection_args(trino_conn_obj)
# connection arguments without connectionArguments and with proxies
expected_args = {}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments=None,
proxies={"http": "foo.bar:3128", "http://host.name": "foo.bar:4012"},
scheme=TrinoScheme.trino,
)
conn_args = get_connection_args(trino_conn_obj)
assert "http_session" in conn_args
conn_args.pop("http_session")
assert expected_args == conn_args
# connection arguments with connectionArguments and with proxies
expected_args = {"user": "user-to-be-impersonated"}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
proxies={"http": "foo.bar:3128", "http://host.name": "foo.bar:4012"},
scheme=TrinoScheme.trino,
)
conn_args = get_connection_args(trino_conn_obj)
assert "http_session" in conn_args
conn_args.pop("http_session")
assert expected_args == conn_args
def test_trino_url_with_params(self):
expected_url = "trino://username:pass@localhost:443/catalog?param=value"
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
params={"param": "value"},
)
assert expected_url == get_connection_url(trino_conn_obj)
def test_trino_with_proxies(self):
test_proxies = {"http": "http_proxy", "https": "https_proxy"}
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
proxies=test_proxies,
)
assert (
test_proxies
== get_connection_args(trino_conn_obj).get("http_session").proxies
)
def test_vertica_url(self):
expected_url = (
"vertica+vertica_python://username:password@localhost:5443/database"
)
vertica_conn_obj = VerticaConnection(
scheme=VerticaScheme.vertica_vertica_python,
hostPort="localhost:5443",
username="username",
password="password",
database="database",
)
assert expected_url == get_connection_url(vertica_conn_obj)
def test_druid_url(self):
expected_url = "druid://localhost:8082/druid/v2/sql"
druid_conn_obj = DruidConnection(
scheme=DruidScheme.druid, hostPort="localhost:8082"
)
assert expected_url == get_connection_url(druid_conn_obj)
def test_pinotdb_url(self):
expected_url = (
"pinot://localhost:8099/query/sql?controller=http://localhost:9000/"
)
pinot_conn_obj = PinotDBConnection(
scheme=PinotDBScheme.pinot,
hostPort="localhost:8099",
pinotControllerHost="http://localhost:9000/",
)
assert expected_url == get_connection_url(pinot_conn_obj)
def test_mysql_url(self):
# connection arguments without db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306"
mysql_conn_obj = MysqlConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MySQLScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(mysql_conn_obj)
# connection arguments with db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306/default"
mysql_conn_obj = MysqlConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MySQLScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(mysql_conn_obj)
def test_clickhouse_url(self):
# connection arguments without db
expected_url = "clickhouse+http://username:@localhost:8123"
clickhouse_conn_obj = ClickhouseConnection(
username="username",
hostPort="localhost:8123",
scheme=ClickhouseScheme.clickhouse_http,
database=None,
)
assert expected_url == get_connection_url(clickhouse_conn_obj)
# connection arguments with db
expected_url = "clickhouse+http://username:@localhost:8123/default"
clickhouse_conn_obj = ClickhouseConnection(
username="username",
hostPort="localhost:8123",
scheme=ClickhouseScheme.clickhouse_http,
database="default",
)
assert expected_url == get_connection_url(clickhouse_conn_obj)
def test_mariadb_url(self):
# connection arguments without db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306"
mariadb_conn_obj = MariaDBConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MariaDBScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(mariadb_conn_obj)
# connection arguments with db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306/default"
mariadb_conn_obj = MariaDBConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MariaDBScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(mariadb_conn_obj)
def test_postgres_url(self):
# connection arguments without db
expected_url = "postgresql+psycopg2://openmetadata_user:@localhost:5432"
postgres_conn_obj = PostgresConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=PostgresScheme.postgresql_psycopg2,
database=None,
)
assert expected_url == get_connection_url(postgres_conn_obj)
# connection arguments witho db
expected_url = "postgresql+psycopg2://openmetadata_user:@localhost:5432/default"
postgres_conn_obj = PostgresConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=PostgresScheme.postgresql_psycopg2,
database="default",
)
assert expected_url == get_connection_url(postgres_conn_obj)
def test_redshift_url(self):
# connection arguments witho db
expected_url = "redshift+psycopg2://username:strong_password@cluster.name.region.redshift.amazonaws.com:5439/dev"
redshift_conn_obj = RedshiftConnection(
username="username",
password="strong_password",
hostPort="cluster.name.region.redshift.amazonaws.com:5439",
scheme=RedshiftScheme.redshift_psycopg2,
database="dev",
)
assert expected_url == get_connection_url(redshift_conn_obj)
def test_singleStore_url(self):
# connection arguments without db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:5432"
singleStore_conn_obj = SingleStoreConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=SingleStoreScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(singleStore_conn_obj)
# connection arguments with db
expected_url = "mysql+pymysql://openmetadata_user:@localhost:5432/default"
singleStore_conn_obj = SingleStoreConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=SingleStoreScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(singleStore_conn_obj)
def test_db2_url(self):
# connection arguments without db
expected_url = "db2+ibm_db://openmetadata_user:@localhost:50000"
db2_conn_obj = Db2Connection(
scheme=Db2Scheme.db2_ibm_db,
username="openmetadata_user",
hostPort="localhost:50000",
database=None,
)
assert expected_url == get_connection_url(db2_conn_obj)
# connection arguments with db
expected_url = "db2+ibm_db://openmetadata_user:@localhost:50000/default"
db2_conn_obj = Db2Connection(
username="openmetadata_user",
hostPort="localhost:50000",
scheme=Db2Scheme.db2_ibm_db,
database="default",
)
assert expected_url == get_connection_url(db2_conn_obj)
def test_snowflake_url(self):
# connection arguments without db
expected_url = "snowflake://coding:Abhi@ue18849.us-east-2.aws?account=ue18849.us-east-2.aws&warehouse=COMPUTE_WH"
snowflake_conn_obj = SnowflakeConnection(
scheme=SnowflakeScheme.snowflake,
username="coding",
password="Abhi",
warehouse="COMPUTE_WH",
account="ue18849.us-east-2.aws",
)
assert expected_url == get_connection_url(snowflake_conn_obj)
# connection arguments with db
expected_url = "snowflake://coding:Abhi@ue18849.us-east-2.aws/testdb?account=ue18849.us-east-2.aws&warehouse=COMPUTE_WH"
snowflake_conn_obj = SnowflakeConnection(
scheme=SnowflakeScheme.snowflake,
username="coding",
password="Abhi",
database="testdb",
warehouse="COMPUTE_WH",
account="ue18849.us-east-2.aws",
)
assert expected_url == get_connection_url(snowflake_conn_obj)
def test_mysql_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
mysql_conn_obj = MysqlConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=MySQLScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mysql_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
mysql_conn_obj = MysqlConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=MySQLScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mysql_conn_obj)
def test_clickhouse_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
clickhouse_conn_obj = ClickhouseConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=ClickhouseScheme.clickhouse_http,
)
assert expected_args == get_connection_args(clickhouse_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
clickhouse_conn_obj = ClickhouseConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=ClickhouseScheme.clickhouse_http,
)
assert expected_args == get_connection_args(clickhouse_conn_obj)
def test_mariadb_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
mariadb_conn_obj = MariaDBConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=MariaDBScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mariadb_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
mariadb_conn_obj = MariaDBConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=MariaDBScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mariadb_conn_obj)
def test_postgres_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
postgres_conn_obj = PostgresConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=PostgresScheme.postgresql_psycopg2,
)
assert expected_args == get_connection_args(postgres_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
postgres_conn_obj = PostgresConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=PostgresScheme.postgresql_psycopg2,
)
assert expected_args == get_connection_args(postgres_conn_obj)
def test_redshift_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
redshift_conn_obj = RedshiftConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=RedshiftScheme.redshift_psycopg2,
)
assert expected_args == get_connection_args(redshift_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
redshift_conn_obj = RedshiftConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=RedshiftScheme.redshift_psycopg2,
)
assert expected_args == get_connection_args(redshift_conn_obj)
def test_singleStore_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
singleStore_conn_obj = SingleStoreConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=SingleStoreScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(singleStore_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
singleStore_conn_obj = SingleStoreConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=SingleStoreScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(singleStore_conn_obj)
def test_db2_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
db2_conn_obj = Db2Connection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=Db2Scheme.db2_ibm_db,
)
assert expected_args == get_connection_args(db2_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
db2_conn_obj = Db2Connection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=Db2Scheme.db2_ibm_db,
)
assert expected_args == get_connection_args(db2_conn_obj)
def test_snowflake_conn_arguments(self):
# connection arguments without connectionArguments
expected_args = {}
snowflake_conn_obj = SnowflakeConnection(
username="user",
password=None,
database="tiny",
connectionArguments=None,
scheme=SnowflakeScheme.snowflake,
account="account.region_name.cloud_service",
)
assert expected_args == get_connection_args(snowflake_conn_obj)
# connection arguments with connectionArguments
expected_args = {"user": "user-to-be-impersonated"}
snowflake_conn_obj = SnowflakeConnection(
username="user",
password=None,
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=SnowflakeScheme.snowflake,
account="account.region_name.cloud_service",
)
assert expected_args == get_connection_args(snowflake_conn_obj)
def test_athena_url(self):
# connection arguments without db
awsCreds = awsCredentials.AWSCredentials(
awsAccessKeyId="key", awsRegion="us-east-2", awsSecretAccessKey="secret_key"
)
expected_url = "awsathena+rest://key:secret_key@athena.us-east-2.amazonaws.com:443?s3_staging_dir=s3athena-postgres&work_group=primary"
athena_conn_obj = AthenaConnection(
awsConfig=awsCreds,
s3StagingDir="s3athena-postgres",
workgroup="primary",
scheme=AthenaScheme.awsathena_rest,
database=None,
)
assert expected_url == get_connection_url(athena_conn_obj)
# connection arguments witho db
expected_url = "awsathena+rest://key:secret_key@athena.us-east-2.amazonaws.com:443/test?s3_staging_dir=s3athena-postgres&work_group=primary"
athena_conn_obj = AthenaConnection(
awsConfig=awsCreds,
s3StagingDir="s3athena-postgres",
workgroup="primary",
scheme=AthenaScheme.awsathena_rest,
database="test",
)
assert expected_url == get_connection_url(athena_conn_obj)
def test_mssql_url(self):
# connection arguments without db
expected_url = "mssql+pytds://sa:password@localhost:1433"
mssql_conn_obj = MssqlConnection(
username="sa",
password="password",
hostPort="localhost:1433",
scheme=MssqlScheme.mssql_pytds,
database=None,
)
assert expected_url == get_connection_url(mssql_conn_obj)
# connection arguments witho db
expected_url = "mssql+pytds://sa:password@localhost:1433/catalog_test"
mssql_conn_obj = MssqlConnection(
username="sa",
password="password",
hostPort="localhost:1433",
scheme=MssqlScheme.mssql_pytds,
database="catalog_test",
)
assert expected_url == get_connection_url(mssql_conn_obj)
def test_presto_url(self):
# connection arguments without db
expected_url = "presto://admin@localhost:8080/test_catalog"
presto_conn_obj = PrestoConnection(
username="admin",
hostPort="localhost:8080",
scheme=PrestoScheme.presto,
catalog="test_catalog",
)
assert expected_url == get_connection_url(presto_conn_obj)
| 38.502817 | 148 | 0.64934 |
from unittest import TestCase
from metadata.generated.schema.entity.services.connections.database.athenaConnection import (
AthenaConnection,
AthenaScheme,
)
from metadata.generated.schema.entity.services.connections.database.clickhouseConnection import (
ClickhouseConnection,
ClickhouseScheme,
)
from metadata.generated.schema.entity.services.connections.database.databricksConnection import (
DatabricksConnection,
DatabricksScheme,
)
from metadata.generated.schema.entity.services.connections.database.db2Connection import (
Db2Connection,
Db2Scheme,
)
from metadata.generated.schema.entity.services.connections.database.druidConnection import (
DruidConnection,
DruidScheme,
)
from metadata.generated.schema.entity.services.connections.database.hiveConnection import (
HiveConnection,
HiveScheme,
)
from metadata.generated.schema.entity.services.connections.database.mariaDBConnection import (
MariaDBConnection,
MariaDBScheme,
)
from metadata.generated.schema.entity.services.connections.database.mssqlConnection import (
MssqlConnection,
MssqlScheme,
)
from metadata.generated.schema.entity.services.connections.database.mysqlConnection import (
MysqlConnection,
MySQLScheme,
)
from metadata.generated.schema.entity.services.connections.database.pinotDBConnection import (
PinotDBConnection,
PinotDBScheme,
)
from metadata.generated.schema.entity.services.connections.database.postgresConnection import (
PostgresConnection,
PostgresScheme,
)
from metadata.generated.schema.entity.services.connections.database.prestoConnection import (
PrestoConnection,
PrestoScheme,
)
from metadata.generated.schema.entity.services.connections.database.redshiftConnection import (
RedshiftConnection,
RedshiftScheme,
)
from metadata.generated.schema.entity.services.connections.database.singleStoreConnection import (
SingleStoreConnection,
SingleStoreScheme,
)
from metadata.generated.schema.entity.services.connections.database.snowflakeConnection import (
SnowflakeConnection,
SnowflakeScheme,
)
from metadata.generated.schema.entity.services.connections.database.trinoConnection import (
TrinoConnection,
TrinoScheme,
)
from metadata.generated.schema.entity.services.connections.database.verticaConnection import (
VerticaConnection,
VerticaScheme,
)
from metadata.generated.schema.security.credentials import awsCredentials
from metadata.utils.source_connections import get_connection_args, get_connection_url
class SouceConnectionTest(TestCase):
def test_databricks_url_without_db(self):
expected_result = (
"databricks+connector://token:KlivDTACWXKmZVfN1qIM@1.1.1.1:443"
)
databricks_conn_obj = DatabricksConnection(
scheme=DatabricksScheme.databricks_connector,
hostPort="1.1.1.1:443",
token="KlivDTACWXKmZVfN1qIM",
)
assert expected_result == get_connection_url(databricks_conn_obj)
def test_databricks_url_with_db(self):
expected_result = (
"databricks+connector://token:KlivDTACWXKmZVfN1qIM@1.1.1.1:443/default"
)
databricks_conn_obj = DatabricksConnection(
scheme=DatabricksScheme.databricks_connector,
hostPort="1.1.1.1:443",
token="KlivDTACWXKmZVfN1qIM",
database="default",
)
assert expected_result == get_connection_url(databricks_conn_obj)
def test_hive_url(self):
expected_result = "hive://localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive, hostPort="localhost:10000", database="default"
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_custom_auth(self):
expected_result = "hive://username:password@localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
username="username",
password="password",
hostPort="localhost:10000",
database="default",
connectionArguments={"auth": "CUSTOM"},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_with_kerberos_auth(self):
expected_result = "hive://localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
hostPort="localhost:10000",
database="default",
connectionArguments={
"auth": "KERBEROS",
"kerberos_service_name": "hive",
},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_hive_url_with_ldap_auth(self):
expected_result = "hive://username:password@localhost:10000/default"
hive_conn_obj = HiveConnection(
scheme=HiveScheme.hive.value,
username="username",
password="password",
hostPort="localhost:10000",
database="default",
connectionArguments={"auth": "LDAP"},
)
assert expected_result == get_connection_url(hive_conn_obj)
def test_trino_url_without_params(self):
expected_url = "trino://username:pass@localhost:443/catalog"
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
)
assert expected_url == get_connection_url(trino_conn_obj)
def test_trino_conn_arguments(self):
expected_args = {}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments=None,
scheme=TrinoScheme.trino,
)
assert expected_args == get_connection_args(trino_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=TrinoScheme.trino,
)
assert expected_args == get_connection_args(trino_conn_obj)
expected_args = {}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments=None,
proxies={"http": "foo.bar:3128", "http://host.name": "foo.bar:4012"},
scheme=TrinoScheme.trino,
)
conn_args = get_connection_args(trino_conn_obj)
assert "http_session" in conn_args
conn_args.pop("http_session")
assert expected_args == conn_args
expected_args = {"user": "user-to-be-impersonated"}
trino_conn_obj = TrinoConnection(
username="user",
password=None,
hostPort="localhost:443",
catalog="tpcds",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
proxies={"http": "foo.bar:3128", "http://host.name": "foo.bar:4012"},
scheme=TrinoScheme.trino,
)
conn_args = get_connection_args(trino_conn_obj)
assert "http_session" in conn_args
conn_args.pop("http_session")
assert expected_args == conn_args
def test_trino_url_with_params(self):
expected_url = "trino://username:pass@localhost:443/catalog?param=value"
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
params={"param": "value"},
)
assert expected_url == get_connection_url(trino_conn_obj)
def test_trino_with_proxies(self):
test_proxies = {"http": "http_proxy", "https": "https_proxy"}
trino_conn_obj = TrinoConnection(
scheme=TrinoScheme.trino,
hostPort="localhost:443",
username="username",
password="pass",
catalog="catalog",
proxies=test_proxies,
)
assert (
test_proxies
== get_connection_args(trino_conn_obj).get("http_session").proxies
)
def test_vertica_url(self):
expected_url = (
"vertica+vertica_python://username:password@localhost:5443/database"
)
vertica_conn_obj = VerticaConnection(
scheme=VerticaScheme.vertica_vertica_python,
hostPort="localhost:5443",
username="username",
password="password",
database="database",
)
assert expected_url == get_connection_url(vertica_conn_obj)
def test_druid_url(self):
expected_url = "druid://localhost:8082/druid/v2/sql"
druid_conn_obj = DruidConnection(
scheme=DruidScheme.druid, hostPort="localhost:8082"
)
assert expected_url == get_connection_url(druid_conn_obj)
def test_pinotdb_url(self):
expected_url = (
"pinot://localhost:8099/query/sql?controller=http://localhost:9000/"
)
pinot_conn_obj = PinotDBConnection(
scheme=PinotDBScheme.pinot,
hostPort="localhost:8099",
pinotControllerHost="http://localhost:9000/",
)
assert expected_url == get_connection_url(pinot_conn_obj)
def test_mysql_url(self):
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306"
mysql_conn_obj = MysqlConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MySQLScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(mysql_conn_obj)
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306/default"
mysql_conn_obj = MysqlConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MySQLScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(mysql_conn_obj)
def test_clickhouse_url(self):
expected_url = "clickhouse+http://username:@localhost:8123"
clickhouse_conn_obj = ClickhouseConnection(
username="username",
hostPort="localhost:8123",
scheme=ClickhouseScheme.clickhouse_http,
database=None,
)
assert expected_url == get_connection_url(clickhouse_conn_obj)
expected_url = "clickhouse+http://username:@localhost:8123/default"
clickhouse_conn_obj = ClickhouseConnection(
username="username",
hostPort="localhost:8123",
scheme=ClickhouseScheme.clickhouse_http,
database="default",
)
assert expected_url == get_connection_url(clickhouse_conn_obj)
def test_mariadb_url(self):
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306"
mariadb_conn_obj = MariaDBConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MariaDBScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(mariadb_conn_obj)
expected_url = "mysql+pymysql://openmetadata_user:@localhost:3306/default"
mariadb_conn_obj = MariaDBConnection(
username="openmetadata_user",
hostPort="localhost:3306",
scheme=MariaDBScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(mariadb_conn_obj)
def test_postgres_url(self):
expected_url = "postgresql+psycopg2://openmetadata_user:@localhost:5432"
postgres_conn_obj = PostgresConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=PostgresScheme.postgresql_psycopg2,
database=None,
)
assert expected_url == get_connection_url(postgres_conn_obj)
expected_url = "postgresql+psycopg2://openmetadata_user:@localhost:5432/default"
postgres_conn_obj = PostgresConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=PostgresScheme.postgresql_psycopg2,
database="default",
)
assert expected_url == get_connection_url(postgres_conn_obj)
def test_redshift_url(self):
expected_url = "redshift+psycopg2://username:strong_password@cluster.name.region.redshift.amazonaws.com:5439/dev"
redshift_conn_obj = RedshiftConnection(
username="username",
password="strong_password",
hostPort="cluster.name.region.redshift.amazonaws.com:5439",
scheme=RedshiftScheme.redshift_psycopg2,
database="dev",
)
assert expected_url == get_connection_url(redshift_conn_obj)
def test_singleStore_url(self):
expected_url = "mysql+pymysql://openmetadata_user:@localhost:5432"
singleStore_conn_obj = SingleStoreConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=SingleStoreScheme.mysql_pymysql,
database=None,
)
assert expected_url == get_connection_url(singleStore_conn_obj)
expected_url = "mysql+pymysql://openmetadata_user:@localhost:5432/default"
singleStore_conn_obj = SingleStoreConnection(
username="openmetadata_user",
hostPort="localhost:5432",
scheme=SingleStoreScheme.mysql_pymysql,
database="default",
)
assert expected_url == get_connection_url(singleStore_conn_obj)
def test_db2_url(self):
expected_url = "db2+ibm_db://openmetadata_user:@localhost:50000"
db2_conn_obj = Db2Connection(
scheme=Db2Scheme.db2_ibm_db,
username="openmetadata_user",
hostPort="localhost:50000",
database=None,
)
assert expected_url == get_connection_url(db2_conn_obj)
expected_url = "db2+ibm_db://openmetadata_user:@localhost:50000/default"
db2_conn_obj = Db2Connection(
username="openmetadata_user",
hostPort="localhost:50000",
scheme=Db2Scheme.db2_ibm_db,
database="default",
)
assert expected_url == get_connection_url(db2_conn_obj)
def test_snowflake_url(self):
expected_url = "snowflake://coding:Abhi@ue18849.us-east-2.aws?account=ue18849.us-east-2.aws&warehouse=COMPUTE_WH"
snowflake_conn_obj = SnowflakeConnection(
scheme=SnowflakeScheme.snowflake,
username="coding",
password="Abhi",
warehouse="COMPUTE_WH",
account="ue18849.us-east-2.aws",
)
assert expected_url == get_connection_url(snowflake_conn_obj)
expected_url = "snowflake://coding:Abhi@ue18849.us-east-2.aws/testdb?account=ue18849.us-east-2.aws&warehouse=COMPUTE_WH"
snowflake_conn_obj = SnowflakeConnection(
scheme=SnowflakeScheme.snowflake,
username="coding",
password="Abhi",
database="testdb",
warehouse="COMPUTE_WH",
account="ue18849.us-east-2.aws",
)
assert expected_url == get_connection_url(snowflake_conn_obj)
def test_mysql_conn_arguments(self):
expected_args = {}
mysql_conn_obj = MysqlConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=MySQLScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mysql_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
mysql_conn_obj = MysqlConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=MySQLScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mysql_conn_obj)
def test_clickhouse_conn_arguments(self):
expected_args = {}
clickhouse_conn_obj = ClickhouseConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=ClickhouseScheme.clickhouse_http,
)
assert expected_args == get_connection_args(clickhouse_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
clickhouse_conn_obj = ClickhouseConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=ClickhouseScheme.clickhouse_http,
)
assert expected_args == get_connection_args(clickhouse_conn_obj)
def test_mariadb_conn_arguments(self):
expected_args = {}
mariadb_conn_obj = MariaDBConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=MariaDBScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mariadb_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
mariadb_conn_obj = MariaDBConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=MariaDBScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(mariadb_conn_obj)
def test_postgres_conn_arguments(self):
expected_args = {}
postgres_conn_obj = PostgresConnection(
username="user",
password=None,
hostPort="localhost:443",
database=None,
connectionArguments=None,
scheme=PostgresScheme.postgresql_psycopg2,
)
assert expected_args == get_connection_args(postgres_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
postgres_conn_obj = PostgresConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=PostgresScheme.postgresql_psycopg2,
)
assert expected_args == get_connection_args(postgres_conn_obj)
def test_redshift_conn_arguments(self):
expected_args = {}
redshift_conn_obj = RedshiftConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=RedshiftScheme.redshift_psycopg2,
)
assert expected_args == get_connection_args(redshift_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
redshift_conn_obj = RedshiftConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=RedshiftScheme.redshift_psycopg2,
)
assert expected_args == get_connection_args(redshift_conn_obj)
def test_singleStore_conn_arguments(self):
expected_args = {}
singleStore_conn_obj = SingleStoreConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=SingleStoreScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(singleStore_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
singleStore_conn_obj = SingleStoreConnection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=SingleStoreScheme.mysql_pymysql,
)
assert expected_args == get_connection_args(singleStore_conn_obj)
def test_db2_conn_arguments(self):
expected_args = {}
db2_conn_obj = Db2Connection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments=None,
scheme=Db2Scheme.db2_ibm_db,
)
assert expected_args == get_connection_args(db2_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
db2_conn_obj = Db2Connection(
username="user",
password=None,
hostPort="localhost:443",
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=Db2Scheme.db2_ibm_db,
)
assert expected_args == get_connection_args(db2_conn_obj)
def test_snowflake_conn_arguments(self):
expected_args = {}
snowflake_conn_obj = SnowflakeConnection(
username="user",
password=None,
database="tiny",
connectionArguments=None,
scheme=SnowflakeScheme.snowflake,
account="account.region_name.cloud_service",
)
assert expected_args == get_connection_args(snowflake_conn_obj)
expected_args = {"user": "user-to-be-impersonated"}
snowflake_conn_obj = SnowflakeConnection(
username="user",
password=None,
database="tiny",
connectionArguments={"user": "user-to-be-impersonated"},
scheme=SnowflakeScheme.snowflake,
account="account.region_name.cloud_service",
)
assert expected_args == get_connection_args(snowflake_conn_obj)
def test_athena_url(self):
awsCreds = awsCredentials.AWSCredentials(
awsAccessKeyId="key", awsRegion="us-east-2", awsSecretAccessKey="secret_key"
)
expected_url = "awsathena+rest://key:secret_key@athena.us-east-2.amazonaws.com:443?s3_staging_dir=s3athena-postgres&work_group=primary"
athena_conn_obj = AthenaConnection(
awsConfig=awsCreds,
s3StagingDir="s3athena-postgres",
workgroup="primary",
scheme=AthenaScheme.awsathena_rest,
database=None,
)
assert expected_url == get_connection_url(athena_conn_obj)
expected_url = "awsathena+rest://key:secret_key@athena.us-east-2.amazonaws.com:443/test?s3_staging_dir=s3athena-postgres&work_group=primary"
athena_conn_obj = AthenaConnection(
awsConfig=awsCreds,
s3StagingDir="s3athena-postgres",
workgroup="primary",
scheme=AthenaScheme.awsathena_rest,
database="test",
)
assert expected_url == get_connection_url(athena_conn_obj)
def test_mssql_url(self):
expected_url = "mssql+pytds://sa:password@localhost:1433"
mssql_conn_obj = MssqlConnection(
username="sa",
password="password",
hostPort="localhost:1433",
scheme=MssqlScheme.mssql_pytds,
database=None,
)
assert expected_url == get_connection_url(mssql_conn_obj)
expected_url = "mssql+pytds://sa:password@localhost:1433/catalog_test"
mssql_conn_obj = MssqlConnection(
username="sa",
password="password",
hostPort="localhost:1433",
scheme=MssqlScheme.mssql_pytds,
database="catalog_test",
)
assert expected_url == get_connection_url(mssql_conn_obj)
def test_presto_url(self):
expected_url = "presto://admin@localhost:8080/test_catalog"
presto_conn_obj = PrestoConnection(
username="admin",
hostPort="localhost:8080",
scheme=PrestoScheme.presto,
catalog="test_catalog",
)
assert expected_url == get_connection_url(presto_conn_obj)
| true | true |
f7f80aec02b08289770e63c9425a37283bb4fbab | 2,044 | py | Python | ThirdParty/pybluez2-macos_fix/examples/simple/asynchronous-inquiry.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 2 | 2021-11-13T14:16:06.000Z | 2022-01-12T06:07:32.000Z | ThirdParty/pybluez-master/examples/simple/asynchronous-inquiry.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | null | null | null | ThirdParty/pybluez-master/examples/simple/asynchronous-inquiry.py | zhaocy14/SmartWalker | b025a7b4a2b305838a22fe4e6116ddb951c4d7bf | [
"MIT"
] | 3 | 2021-08-30T04:40:39.000Z | 2022-01-09T11:34:04.000Z | #!/usr/bin/env python3
"""PyBluez simple example asyncronous-inquiry.py
Demonstration of how to do asynchronous device discovery by subclassing
the DeviceDiscoverer class
Linux only (5/5/2006)
Author: Albert Huang <albert@csail.mit.edu>
$Id: asynchronous-inquiry.py 405 2006-05-06 00:39:50Z albert $
"""
import select
import bluetooth
class MyDiscoverer(bluetooth.DeviceDiscoverer):
def pre_inquiry(self):
self.done = False
def device_discovered(self, address, device_class, rssi, name):
print("{} - {}".format(address, name))
# get some information out of the device class and display it.
# voodoo magic specified at:
# https://www.bluetooth.org/foundry/assignnumb/document/baseband
major_classes = ("Miscellaneous",
"Computer",
"Phone",
"LAN/Network Access Point",
"Audio/Video",
"Peripheral",
"Imaging")
major_class = (device_class >> 8) & 0xf
if major_class < 7:
print(" " + major_classes[major_class])
else:
print(" Uncategorized")
print(" Services:")
service_classes = ((16, "positioning"),
(17, "networking"),
(18, "rendering"),
(19, "capturing"),
(20, "object transfer"),
(21, "audio"),
(22, "telephony"),
(23, "information"))
for bitpos, classname in service_classes:
if device_class & (1 << (bitpos-1)):
print(" ", classname)
print(" RSSI:", rssi)
def inquiry_complete(self):
self.done = True
d = MyDiscoverer()
d.find_devices(lookup_names=True)
readfiles = [d, ]
while True:
rfds = select.select(readfiles, [], [])[0]
if d in rfds:
d.process_event()
if d.done:
break
| 28 | 72 | 0.526908 |
import select
import bluetooth
class MyDiscoverer(bluetooth.DeviceDiscoverer):
def pre_inquiry(self):
self.done = False
def device_discovered(self, address, device_class, rssi, name):
print("{} - {}".format(address, name))
major_classes = ("Miscellaneous",
"Computer",
"Phone",
"LAN/Network Access Point",
"Audio/Video",
"Peripheral",
"Imaging")
major_class = (device_class >> 8) & 0xf
if major_class < 7:
print(" " + major_classes[major_class])
else:
print(" Uncategorized")
print(" Services:")
service_classes = ((16, "positioning"),
(17, "networking"),
(18, "rendering"),
(19, "capturing"),
(20, "object transfer"),
(21, "audio"),
(22, "telephony"),
(23, "information"))
for bitpos, classname in service_classes:
if device_class & (1 << (bitpos-1)):
print(" ", classname)
print(" RSSI:", rssi)
def inquiry_complete(self):
self.done = True
d = MyDiscoverer()
d.find_devices(lookup_names=True)
readfiles = [d, ]
while True:
rfds = select.select(readfiles, [], [])[0]
if d in rfds:
d.process_event()
if d.done:
break
| true | true |
f7f80b7f66713349e306f8d26cffecfe7c33203c | 341 | py | Python | test_utils/__init__.py | nedbat-test-external/api-doc-tools | 54a0c81a676b075d372e15705b6d0a133b098e1c | [
"Apache-2.0"
] | 4 | 2020-04-10T03:07:41.000Z | 2021-12-02T04:47:19.000Z | test_utils/__init__.py | nedbat-test-external/api-doc-tools | 54a0c81a676b075d372e15705b6d0a133b098e1c | [
"Apache-2.0"
] | 68 | 2019-10-29T20:22:28.000Z | 2021-12-09T08:07:44.000Z | test_utils/__init__.py | openedx/api-doc-tools | 65e2989324f079dc9b9c652042730cd01d7b3709 | [
"Apache-2.0"
] | 6 | 2020-04-30T19:04:26.000Z | 2021-06-08T08:25:25.000Z | """
Test utilities.
Since pytest discourages putting __init__.py into test directory (i.e. making tests a
package) one cannot import from anywhere under tests folder.
However, some utility classes/methods might be useful in multiple test modules
(i.e. factoryboy factories, base test classes).
So this package is the place to put them.
"""
| 34.1 | 85 | 0.782991 | true | true | |
f7f80c2ebf3335ee369f21e6a5be40468a97f63a | 6,328 | py | Python | tests/components/knx/test_select.py | EuleMitKeule/core | 3af54d96c7dcc3f7087d1196e6ab0db029301ee7 | [
"Apache-2.0"
] | 6 | 2017-11-15T09:56:41.000Z | 2021-01-24T15:12:09.000Z | tests/components/knx/test_select.py | EuleMitKeule/core | 3af54d96c7dcc3f7087d1196e6ab0db029301ee7 | [
"Apache-2.0"
] | 87 | 2020-07-15T13:43:35.000Z | 2022-03-23T07:43:10.000Z | tests/components/knx/test_select.py | EuleMitKeule/core | 3af54d96c7dcc3f7087d1196e6ab0db029301ee7 | [
"Apache-2.0"
] | 2 | 2020-06-06T21:55:32.000Z | 2022-03-06T04:18:21.000Z | """Test KNX select."""
from unittest.mock import patch
import pytest
from homeassistant.components.knx.const import (
CONF_PAYLOAD,
CONF_PAYLOAD_LENGTH,
CONF_RESPOND_TO_READ,
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
KNX_ADDRESS,
)
from homeassistant.components.knx.schema import SelectSchema
from homeassistant.const import CONF_NAME, STATE_UNKNOWN
from homeassistant.core import HomeAssistant, State
from .conftest import KNXTestKit
async def test_select_dpt_2_simple(hass: HomeAssistant, knx: KNXTestKit):
"""Test simple KNX select."""
_options = [
{CONF_PAYLOAD: 0b00, SelectSchema.CONF_OPTION: "No control"},
{CONF_PAYLOAD: 0b10, SelectSchema.CONF_OPTION: "Control - Off"},
{CONF_PAYLOAD: 0b11, SelectSchema.CONF_OPTION: "Control - On"},
]
test_address = "1/1/1"
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: test_address,
CONF_SYNC_STATE: False,
CONF_PAYLOAD_LENGTH: 0,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
assert len(hass.states.async_all()) == 1
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
# select an option
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "Control - Off"},
blocking=True,
)
await knx.assert_write(test_address, 0b10)
state = hass.states.get("select.test")
assert state.state == "Control - Off"
# select another option
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "No control"},
blocking=True,
)
await knx.assert_write(test_address, 0b00)
state = hass.states.get("select.test")
assert state.state == "No control"
# don't answer to GroupValueRead requests by default
await knx.receive_read(test_address)
await knx.assert_no_telegram()
# update from KNX
await knx.receive_write(test_address, 0b11)
state = hass.states.get("select.test")
assert state.state == "Control - On"
# update from KNX with undefined value
await knx.receive_write(test_address, 0b01)
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
# select invalid option
with pytest.raises(ValueError):
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "invalid"},
blocking=True,
)
await knx.assert_no_telegram()
async def test_select_dpt_2_restore(hass: HomeAssistant, knx: KNXTestKit):
"""Test KNX select with passive_address and respond_to_read restoring state."""
_options = [
{CONF_PAYLOAD: 0b00, SelectSchema.CONF_OPTION: "No control"},
{CONF_PAYLOAD: 0b10, SelectSchema.CONF_OPTION: "Control - Off"},
{CONF_PAYLOAD: 0b11, SelectSchema.CONF_OPTION: "Control - On"},
]
test_address = "1/1/1"
test_passive_address = "3/3/3"
fake_state = State("select.test", "Control - On")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: [test_address, test_passive_address],
CONF_RESPOND_TO_READ: True,
CONF_PAYLOAD_LENGTH: 0,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
# restored state - doesn't send telegram
state = hass.states.get("select.test")
assert state.state == "Control - On"
await knx.assert_telegram_count(0)
# respond with restored state
await knx.receive_read(test_address)
await knx.assert_response(test_address, 3)
# don't respond to passive address
await knx.receive_read(test_passive_address)
await knx.assert_no_telegram()
async def test_select_dpt_20_103_all_options(hass: HomeAssistant, knx: KNXTestKit):
"""Test KNX select with state_address, passive_address and respond_to_read."""
_options = [
{CONF_PAYLOAD: 0, SelectSchema.CONF_OPTION: "Auto"},
{CONF_PAYLOAD: 1, SelectSchema.CONF_OPTION: "Legio protect"},
{CONF_PAYLOAD: 2, SelectSchema.CONF_OPTION: "Normal"},
{CONF_PAYLOAD: 3, SelectSchema.CONF_OPTION: "Reduced"},
{CONF_PAYLOAD: 4, SelectSchema.CONF_OPTION: "Off"},
]
test_address = "1/1/1"
test_state_address = "2/2/2"
test_passive_address = "3/3/3"
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: [test_address, test_passive_address],
CONF_STATE_ADDRESS: test_state_address,
CONF_RESPOND_TO_READ: True,
CONF_PAYLOAD_LENGTH: 1,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
assert len(hass.states.async_all()) == 1
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
# StateUpdater initialize state
await knx.assert_read(test_state_address)
await knx.receive_response(test_state_address, (2,))
state = hass.states.get("select.test")
assert state.state == "Normal"
# select an option
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "Legio protect"},
blocking=True,
)
await knx.assert_write(test_address, (1,))
state = hass.states.get("select.test")
assert state.state == "Legio protect"
# answer to GroupValueRead requests
await knx.receive_read(test_address)
await knx.assert_response(test_address, (1,))
# update from KNX state_address
await knx.receive_write(test_state_address, (3,))
state = hass.states.get("select.test")
assert state.state == "Reduced"
# update from KNX passive_address
await knx.receive_write(test_passive_address, (4,))
state = hass.states.get("select.test")
assert state.state == "Off"
| 33.305263 | 83 | 0.645702 | from unittest.mock import patch
import pytest
from homeassistant.components.knx.const import (
CONF_PAYLOAD,
CONF_PAYLOAD_LENGTH,
CONF_RESPOND_TO_READ,
CONF_STATE_ADDRESS,
CONF_SYNC_STATE,
KNX_ADDRESS,
)
from homeassistant.components.knx.schema import SelectSchema
from homeassistant.const import CONF_NAME, STATE_UNKNOWN
from homeassistant.core import HomeAssistant, State
from .conftest import KNXTestKit
async def test_select_dpt_2_simple(hass: HomeAssistant, knx: KNXTestKit):
_options = [
{CONF_PAYLOAD: 0b00, SelectSchema.CONF_OPTION: "No control"},
{CONF_PAYLOAD: 0b10, SelectSchema.CONF_OPTION: "Control - Off"},
{CONF_PAYLOAD: 0b11, SelectSchema.CONF_OPTION: "Control - On"},
]
test_address = "1/1/1"
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: test_address,
CONF_SYNC_STATE: False,
CONF_PAYLOAD_LENGTH: 0,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
assert len(hass.states.async_all()) == 1
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "Control - Off"},
blocking=True,
)
await knx.assert_write(test_address, 0b10)
state = hass.states.get("select.test")
assert state.state == "Control - Off"
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "No control"},
blocking=True,
)
await knx.assert_write(test_address, 0b00)
state = hass.states.get("select.test")
assert state.state == "No control"
await knx.receive_read(test_address)
await knx.assert_no_telegram()
# update from KNX
await knx.receive_write(test_address, 0b11)
state = hass.states.get("select.test")
assert state.state == "Control - On"
# update from KNX with undefined value
await knx.receive_write(test_address, 0b01)
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
# select invalid option
with pytest.raises(ValueError):
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "invalid"},
blocking=True,
)
await knx.assert_no_telegram()
async def test_select_dpt_2_restore(hass: HomeAssistant, knx: KNXTestKit):
_options = [
{CONF_PAYLOAD: 0b00, SelectSchema.CONF_OPTION: "No control"},
{CONF_PAYLOAD: 0b10, SelectSchema.CONF_OPTION: "Control - Off"},
{CONF_PAYLOAD: 0b11, SelectSchema.CONF_OPTION: "Control - On"},
]
test_address = "1/1/1"
test_passive_address = "3/3/3"
fake_state = State("select.test", "Control - On")
with patch(
"homeassistant.helpers.restore_state.RestoreEntity.async_get_last_state",
return_value=fake_state,
):
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: [test_address, test_passive_address],
CONF_RESPOND_TO_READ: True,
CONF_PAYLOAD_LENGTH: 0,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
# restored state - doesn't send telegram
state = hass.states.get("select.test")
assert state.state == "Control - On"
await knx.assert_telegram_count(0)
await knx.receive_read(test_address)
await knx.assert_response(test_address, 3)
await knx.receive_read(test_passive_address)
await knx.assert_no_telegram()
async def test_select_dpt_20_103_all_options(hass: HomeAssistant, knx: KNXTestKit):
_options = [
{CONF_PAYLOAD: 0, SelectSchema.CONF_OPTION: "Auto"},
{CONF_PAYLOAD: 1, SelectSchema.CONF_OPTION: "Legio protect"},
{CONF_PAYLOAD: 2, SelectSchema.CONF_OPTION: "Normal"},
{CONF_PAYLOAD: 3, SelectSchema.CONF_OPTION: "Reduced"},
{CONF_PAYLOAD: 4, SelectSchema.CONF_OPTION: "Off"},
]
test_address = "1/1/1"
test_state_address = "2/2/2"
test_passive_address = "3/3/3"
await knx.setup_integration(
{
SelectSchema.PLATFORM_NAME: {
CONF_NAME: "test",
KNX_ADDRESS: [test_address, test_passive_address],
CONF_STATE_ADDRESS: test_state_address,
CONF_RESPOND_TO_READ: True,
CONF_PAYLOAD_LENGTH: 1,
SelectSchema.CONF_OPTIONS: _options,
}
}
)
assert len(hass.states.async_all()) == 1
state = hass.states.get("select.test")
assert state.state is STATE_UNKNOWN
# StateUpdater initialize state
await knx.assert_read(test_state_address)
await knx.receive_response(test_state_address, (2,))
state = hass.states.get("select.test")
assert state.state == "Normal"
# select an option
await hass.services.async_call(
"select",
"select_option",
{"entity_id": "select.test", "option": "Legio protect"},
blocking=True,
)
await knx.assert_write(test_address, (1,))
state = hass.states.get("select.test")
assert state.state == "Legio protect"
# answer to GroupValueRead requests
await knx.receive_read(test_address)
await knx.assert_response(test_address, (1,))
# update from KNX state_address
await knx.receive_write(test_state_address, (3,))
state = hass.states.get("select.test")
assert state.state == "Reduced"
# update from KNX passive_address
await knx.receive_write(test_passive_address, (4,))
state = hass.states.get("select.test")
assert state.state == "Off"
| true | true |
f7f80d337861a1cd114f639b31d30d328e3a00e8 | 319 | py | Python | shopyo/modules/box__default/settings/helpers.py | Attakay78/shopyo | a9f6e4a56e36edc9f5588323ebb5f9103c343b1f | [
"MIT"
] | 235 | 2019-06-30T22:21:29.000Z | 2022-03-31T06:12:12.000Z | shopyo/modules/box__default/settings/helpers.py | Attakay78/shopyo | a9f6e4a56e36edc9f5588323ebb5f9103c343b1f | [
"MIT"
] | 441 | 2019-06-26T20:07:58.000Z | 2021-05-05T17:44:23.000Z | shopyo/modules/box__default/settings/helpers.py | Attakay78/shopyo | a9f6e4a56e36edc9f5588323ebb5f9103c343b1f | [
"MIT"
] | 157 | 2019-06-26T22:30:39.000Z | 2022-03-22T09:06:24.000Z | from modules.box__default.settings.models import Settings
def get_setting(name):
"""
Used as key-value lookup from Settings table
Parameters
----------
name: str
name of key
Returns
-------
str
value of key
"""
s = Settings.query.get(name)
return s.value
| 15.95 | 57 | 0.579937 | from modules.box__default.settings.models import Settings
def get_setting(name):
s = Settings.query.get(name)
return s.value
| true | true |
f7f80d43df1289d0b16648e52eebd8018d919380 | 4,657 | py | Python | techminer2/slope_chart.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | techminer2/slope_chart.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | techminer2/slope_chart.py | jdvelasq/techminer-api | d2bb7d20c326f2fe7cc06d7005dfb3f2053ea1da | [
"MIT"
] | null | null | null | """
Slope Chart
===============================================================================
"""
import textwrap
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
# from .plots.multiindex2text import multindex2text
TEXTLEN = 35
def slope_chart(
matrix,
figsize=(6, 6),
cmap="Greys",
cmap_by="Reds",
fontsize=9,
):
matrix = matrix.copy()
if isinstance(matrix.columns, pd.MultiIndex):
matrix.columns = multindex2text(matrix.columns)
if isinstance(matrix.index, pd.MultiIndex):
matrix.index = multindex2text(matrix.index)
matplotlib.rc("font", size=fontsize)
fig = plt.Figure(figsize=figsize)
ax = fig.subplots()
cmap = plt.cm.get_cmap(cmap)
cmap_by = plt.cm.get_cmap(cmap_by)
m = len(matrix.index)
n = len(matrix.columns)
maxmn = max(m, n)
yleft = (maxmn - m) / 2.0 + np.linspace(0, m, m)
yright = (maxmn - n) / 2.0 + np.linspace(0, n, n)
ax.vlines(
x=1,
ymin=-1,
ymax=maxmn + 1,
color="gray",
alpha=0.7,
linewidth=1,
linestyles="dotted",
)
ax.vlines(
x=3,
ymin=-1,
ymax=maxmn + 1,
color="gray",
alpha=0.7,
linewidth=1,
linestyles="dotted",
)
#
# Dibuja los ejes para las conexiones
#
ax.scatter(x=[1] * m, y=yleft, s=1)
ax.scatter(x=[3] * n, y=yright, s=1)
#
# Dibuja las conexiones
#
maxlink = matrix.max().max()
minlink = matrix.values.ravel()
minlink = min([v for v in minlink if v > 0])
for idx, index in enumerate(matrix.index):
for icol, col in enumerate(matrix.columns):
link = matrix.loc[index, col]
if link > 0:
ax.plot(
[1, 3],
[yleft[idx], yright[icol]],
c="k",
linewidth=0.5 + 4 * (link - minlink) / (maxlink - minlink),
alpha=0.5 + 0.5 * (link - minlink) / (maxlink - minlink),
)
#
# Sizes
#
left_sizes = [int(t.split(" ")[-1].split(":")[0]) for t in matrix.index]
right_sizes = [int(t.split(" ")[-1].split(":")[0]) for t in matrix.columns]
min_size = min(left_sizes + right_sizes)
max_size = max(left_sizes + right_sizes)
left_sizes = [
150 + 2000 * (t - min_size) / (max_size - min_size) for t in left_sizes
]
right_sizes = [
150 + 2000 * (t - min_size) / (max_size - min_size) for t in right_sizes
]
#
# Colors
#
left_colors = [int(t.split(" ")[-1].split(":")[1]) for t in matrix.index]
right_colors = [int(t.split(" ")[-1].split(":")[1]) for t in matrix.columns]
min_color = min(left_colors + right_colors)
max_color = max(left_colors + right_colors)
left_colors = [
cmap_by(0.1 + 0.9 * (t - min_color) / (max_color - min_color))
for t in left_colors
]
right_colors = [
cmap(0.1 + 0.9 * (t - min_color) / (max_color - min_color))
for t in right_colors
]
ax.scatter(
x=[1] * m,
y=yleft,
s=left_sizes,
c=left_colors,
zorder=10,
linewidths=1,
edgecolors="k",
)
for idx, text in enumerate(matrix.index):
ax.plot([0.7, 1.0], [yleft[idx], yleft[idx]], "-", c="grey")
for idx, text in enumerate(matrix.index):
ax.text(
0.7,
yleft[idx],
text,
fontsize=10,
ha="right",
va="center",
zorder=10,
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
)
#
# right y-axis
#
ax.scatter(
x=[3] * n,
y=yright,
s=right_sizes,
c=right_colors,
zorder=10,
linewidths=1,
edgecolors="k",
)
for idx, text in enumerate(matrix.columns):
ax.plot([3.0, 3.3], [yright[idx], yright[idx]], "-", c="grey")
for idx, text in enumerate(matrix.columns):
ax.text(
3.3,
yright[idx],
text,
fontsize=10,
ha="left",
va="center",
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
zorder=11,
)
#
# Figure size
#
# expand_ax_limits(ax)
ax.invert_yaxis()
ax.axis("off")
fig.set_tight_layout(True)
return fig
| 23.760204 | 80 | 0.49388 | import textwrap
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
TEXTLEN = 35
def slope_chart(
matrix,
figsize=(6, 6),
cmap="Greys",
cmap_by="Reds",
fontsize=9,
):
matrix = matrix.copy()
if isinstance(matrix.columns, pd.MultiIndex):
matrix.columns = multindex2text(matrix.columns)
if isinstance(matrix.index, pd.MultiIndex):
matrix.index = multindex2text(matrix.index)
matplotlib.rc("font", size=fontsize)
fig = plt.Figure(figsize=figsize)
ax = fig.subplots()
cmap = plt.cm.get_cmap(cmap)
cmap_by = plt.cm.get_cmap(cmap_by)
m = len(matrix.index)
n = len(matrix.columns)
maxmn = max(m, n)
yleft = (maxmn - m) / 2.0 + np.linspace(0, m, m)
yright = (maxmn - n) / 2.0 + np.linspace(0, n, n)
ax.vlines(
x=1,
ymin=-1,
ymax=maxmn + 1,
color="gray",
alpha=0.7,
linewidth=1,
linestyles="dotted",
)
ax.vlines(
x=3,
ymin=-1,
ymax=maxmn + 1,
color="gray",
alpha=0.7,
linewidth=1,
linestyles="dotted",
)
ax.scatter(x=[1] * m, y=yleft, s=1)
ax.scatter(x=[3] * n, y=yright, s=1)
maxlink = matrix.max().max()
minlink = matrix.values.ravel()
minlink = min([v for v in minlink if v > 0])
for idx, index in enumerate(matrix.index):
for icol, col in enumerate(matrix.columns):
link = matrix.loc[index, col]
if link > 0:
ax.plot(
[1, 3],
[yleft[idx], yright[icol]],
c="k",
linewidth=0.5 + 4 * (link - minlink) / (maxlink - minlink),
alpha=0.5 + 0.5 * (link - minlink) / (maxlink - minlink),
)
left_sizes = [int(t.split(" ")[-1].split(":")[0]) for t in matrix.index]
right_sizes = [int(t.split(" ")[-1].split(":")[0]) for t in matrix.columns]
min_size = min(left_sizes + right_sizes)
max_size = max(left_sizes + right_sizes)
left_sizes = [
150 + 2000 * (t - min_size) / (max_size - min_size) for t in left_sizes
]
right_sizes = [
150 + 2000 * (t - min_size) / (max_size - min_size) for t in right_sizes
]
left_colors = [int(t.split(" ")[-1].split(":")[1]) for t in matrix.index]
right_colors = [int(t.split(" ")[-1].split(":")[1]) for t in matrix.columns]
min_color = min(left_colors + right_colors)
max_color = max(left_colors + right_colors)
left_colors = [
cmap_by(0.1 + 0.9 * (t - min_color) / (max_color - min_color))
for t in left_colors
]
right_colors = [
cmap(0.1 + 0.9 * (t - min_color) / (max_color - min_color))
for t in right_colors
]
ax.scatter(
x=[1] * m,
y=yleft,
s=left_sizes,
c=left_colors,
zorder=10,
linewidths=1,
edgecolors="k",
)
for idx, text in enumerate(matrix.index):
ax.plot([0.7, 1.0], [yleft[idx], yleft[idx]], "-", c="grey")
for idx, text in enumerate(matrix.index):
ax.text(
0.7,
yleft[idx],
text,
fontsize=10,
ha="right",
va="center",
zorder=10,
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
)
ax.scatter(
x=[3] * n,
y=yright,
s=right_sizes,
c=right_colors,
zorder=10,
linewidths=1,
edgecolors="k",
)
for idx, text in enumerate(matrix.columns):
ax.plot([3.0, 3.3], [yright[idx], yright[idx]], "-", c="grey")
for idx, text in enumerate(matrix.columns):
ax.text(
3.3,
yright[idx],
text,
fontsize=10,
ha="left",
va="center",
bbox=dict(
facecolor="w",
alpha=1.0,
edgecolor="gray",
boxstyle="round,pad=0.5",
),
zorder=11,
)
ax.invert_yaxis()
ax.axis("off")
fig.set_tight_layout(True)
return fig
| true | true |
f7f80d7b69236ed01043222376908325e591ac85 | 2,761 | py | Python | src/math_py/imo_shl/imo_shl_2015_n2.py | kapsitis/math | f21b172d4a58ec8ba25003626de02bfdda946cdc | [
"BSD-3-Clause"
] | null | null | null | src/math_py/imo_shl/imo_shl_2015_n2.py | kapsitis/math | f21b172d4a58ec8ba25003626de02bfdda946cdc | [
"BSD-3-Clause"
] | 3 | 2020-07-20T03:40:52.000Z | 2022-02-10T21:50:18.000Z | src/math_py/imo_shl/imo_shl_2015_n2.py | kapsitis/math | f21b172d4a58ec8ba25003626de02bfdda946cdc | [
"BSD-3-Clause"
] | null | null | null | '''
Creative Commons, CC-BY 4.0
@author: kalvis
'''
import math
from imo_shl import numtheory_utils
# Return the product of all the numbers [m+1,n]
def prod_all(m,n):
result = 1
for k in list(range(m+1,n+1)):
result = result*k
return result
# Return factorial n! factorization as prime powers
def fact_factorial(n):
if n == 2:
return [(2,1)]
plist = list()
result = list()
for p in range(2,n+1):
if numtheory_utils.is_prime(p):
plist.append(p)
for pp in plist:
k = numtheory_utils.lagrange(n,pp)
result.append((pp,k))
return result
# Multiply/merge two lists of prime factors [(p1,k1),...]
def mul_merge(l1,l2):
result = list()
ll1 = list()
for elt in l1:
ll1.append(elt[0])
ll2 = list()
for elt in l2:
ll2.append(elt[0])
ll = list(set(ll1).union(set(ll2)))
ll.sort()
for elt in ll:
e1 = numtheory_utils.lookup(l1,elt)
e2 = numtheory_utils.lookup(l2,elt)
result.append((elt,e1+e2))
return result
# Return T, if list [(p1,k1),(p1,k2)...] is divisible by d
def is_divisible(lst, d):
cur_d = d
for pp in lst:
p = pp[0]
k = pp[1]
while k > 0 and cur_d % p == 0:
cur_d = cur_d // p
k = k - 1
if cur_d == 1:
return 'T'
else:
return 'F'
def main():
minA = 1
maxA = 1000
max_diff = 0
vect = [0,0,0,0,0,0,0,0,0,0,0]
for a in range(minA,maxA):
found = False
mylst = list()
lower = a+6
upper = a+7
for b in range(lower,upper):
d = prod_all(a,b)+1
bb = fact_factorial(b)
divisible = is_divisible(bb,d)
mylst.append(divisible)
if divisible == 'T':
vect[b-a] = vect[b-a] + 1
if b-a > 1 and divisible == 'T':
found = True
if b - a > max_diff:
max_diff = b - a
if found:
for b in range(lower,upper):
is_div = mylst[b-lower]
print('(%3d,%3d)-%s ' % (a,b,is_div), end='')
print('')
print('max_diff = %d' % max_diff)
print('vect is %s' % vect)
# 7
ddd = 7 * 79 * 1109 * 3119 * 3821 * 5381 * 9787
ss = (1 + 13092*13093*13094*13095*13096) // ddd
print('ss = %d' % ss)
b1 = numtheory_utils.is_prime(1109)
b2 = numtheory_utils.is_prime(3119)
b3 = numtheory_utils.is_prime(3821)
b4 = numtheory_utils.is_prime(5381)
b5 = numtheory_utils.is_prime(9787)
print('(%s,%s,%s,%s,%s)' % (b1,b2,b3,b4,b5))
if __name__ == '__main__':
main()
| 24.433628 | 77 | 0.505614 |
import math
from imo_shl import numtheory_utils
def prod_all(m,n):
result = 1
for k in list(range(m+1,n+1)):
result = result*k
return result
def fact_factorial(n):
if n == 2:
return [(2,1)]
plist = list()
result = list()
for p in range(2,n+1):
if numtheory_utils.is_prime(p):
plist.append(p)
for pp in plist:
k = numtheory_utils.lagrange(n,pp)
result.append((pp,k))
return result
def mul_merge(l1,l2):
result = list()
ll1 = list()
for elt in l1:
ll1.append(elt[0])
ll2 = list()
for elt in l2:
ll2.append(elt[0])
ll = list(set(ll1).union(set(ll2)))
ll.sort()
for elt in ll:
e1 = numtheory_utils.lookup(l1,elt)
e2 = numtheory_utils.lookup(l2,elt)
result.append((elt,e1+e2))
return result
def is_divisible(lst, d):
cur_d = d
for pp in lst:
p = pp[0]
k = pp[1]
while k > 0 and cur_d % p == 0:
cur_d = cur_d // p
k = k - 1
if cur_d == 1:
return 'T'
else:
return 'F'
def main():
minA = 1
maxA = 1000
max_diff = 0
vect = [0,0,0,0,0,0,0,0,0,0,0]
for a in range(minA,maxA):
found = False
mylst = list()
lower = a+6
upper = a+7
for b in range(lower,upper):
d = prod_all(a,b)+1
bb = fact_factorial(b)
divisible = is_divisible(bb,d)
mylst.append(divisible)
if divisible == 'T':
vect[b-a] = vect[b-a] + 1
if b-a > 1 and divisible == 'T':
found = True
if b - a > max_diff:
max_diff = b - a
if found:
for b in range(lower,upper):
is_div = mylst[b-lower]
print('(%3d,%3d)-%s ' % (a,b,is_div), end='')
print('')
print('max_diff = %d' % max_diff)
print('vect is %s' % vect)
ddd = 7 * 79 * 1109 * 3119 * 3821 * 5381 * 9787
ss = (1 + 13092*13093*13094*13095*13096) // ddd
print('ss = %d' % ss)
b1 = numtheory_utils.is_prime(1109)
b2 = numtheory_utils.is_prime(3119)
b3 = numtheory_utils.is_prime(3821)
b4 = numtheory_utils.is_prime(5381)
b5 = numtheory_utils.is_prime(9787)
print('(%s,%s,%s,%s,%s)' % (b1,b2,b3,b4,b5))
if __name__ == '__main__':
main()
| true | true |
f7f80d82fc95e7673867310865f45088387388fa | 1,841 | py | Python | nucypher/tests/crypto/test_api.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | nucypher/tests/crypto/test_api.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | nucypher/tests/crypto/test_api.py | kanzeparov/NuCypher | 0d7e349872909d0cacfd66583d018d722587b2e7 | [
"FTL",
"CNRI-Python"
] | null | null | null | """
This file is part of nucypher.
nucypher is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
nucypher is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with nucypher. If not, see <https://www.gnu.org/licenses/>.
"""
import unittest
import sha3
from nucypher.crypto import api
class TestCrypto(unittest.TestCase):
def test_secure_random(self):
rand1 = api.secure_random(10)
rand2 = api.secure_random(10)
self.assertNotEqual(rand1, rand2)
self.assertEqual(bytes, type(rand1))
self.assertEqual(bytes, type(rand2))
self.assertEqual(10, len(rand1))
self.assertEqual(10, len(rand2))
def test_secure_random_range(self):
output = [api.secure_random_range(1, 3) for _ in range(20)]
# Test that highest output can be max-1
self.assertNotIn(3, output)
# Test that min is present
output = [api.secure_random_range(1, 2) for _ in range(20)]
self.assertNotIn(2, output)
self.assertIn(1, output)
def test_keccak_digest(self):
data = b'this is a test'
digest1 = sha3.keccak_256(data).digest()
digest2 = api.keccak_digest(data)
self.assertEqual(digest1, digest2)
# Test iterables
data = data.split()
digest1 = sha3.keccak_256(b''.join(data)).digest()
digest2 = api.keccak_digest(*data)
self.assertEqual(digest1, digest2)
| 29.222222 | 68 | 0.688213 |
import unittest
import sha3
from nucypher.crypto import api
class TestCrypto(unittest.TestCase):
def test_secure_random(self):
rand1 = api.secure_random(10)
rand2 = api.secure_random(10)
self.assertNotEqual(rand1, rand2)
self.assertEqual(bytes, type(rand1))
self.assertEqual(bytes, type(rand2))
self.assertEqual(10, len(rand1))
self.assertEqual(10, len(rand2))
def test_secure_random_range(self):
output = [api.secure_random_range(1, 3) for _ in range(20)]
self.assertNotIn(3, output)
output = [api.secure_random_range(1, 2) for _ in range(20)]
self.assertNotIn(2, output)
self.assertIn(1, output)
def test_keccak_digest(self):
data = b'this is a test'
digest1 = sha3.keccak_256(data).digest()
digest2 = api.keccak_digest(data)
self.assertEqual(digest1, digest2)
data = data.split()
digest1 = sha3.keccak_256(b''.join(data)).digest()
digest2 = api.keccak_digest(*data)
self.assertEqual(digest1, digest2)
| true | true |
f7f80e0618a4abd812854dc0db8f17851032284c | 16,057 | py | Python | programmes_python/CommandeEnTension.py | 3sigma/T-Quad-Commande-Moteurs-en-Tension | 4d302f2a4a0a1a12264b967a991b1824830c5556 | [
"MIT"
] | null | null | null | programmes_python/CommandeEnTension.py | 3sigma/T-Quad-Commande-Moteurs-en-Tension | 4d302f2a4a0a1a12264b967a991b1824830c5556 | [
"MIT"
] | null | null | null | programmes_python/CommandeEnTension.py | 3sigma/T-Quad-Commande-Moteurs-en-Tension | 4d302f2a4a0a1a12264b967a991b1824830c5556 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
##################################################################################
# Programme de commande en tension des moteurs du robot T-Quad,
# disponible à l'adresse:
# http://boutique.3sigma.fr/12-robots
#
# Auteur: 3Sigma
# Version 1.1.2 - 30/01/2017
##################################################################################
# Importe les fonctions Arduino pour Python
from pyduino import *
# Imports Généraux
import time, sched
import os
import threading
import signal
import json
import sys
# Pour la détection d'adresse IP
import socket
import fcntl
import struct
# Pour le serveur de socket
import tornado.httpserver
import tornado.ioloop
from tornado.ioloop import PeriodicCallback
import tornado.web
import tornado.websocket
import tornado.template
# Nom de l'hostname (utilisé ensuite pour savoir sur quel système
# tourne ce programme)
hostname = socket.gethostname()
# Imports pour la communication i2c avec l'Arduino Mega
from mega import Mega
mega = Mega(hostname = hostname)
# Moteurs
Nmoy = 1
omegaArriereDroit = 0.
codeurArriereDroitDeltaPos = 0
codeurArriereDroitDeltaPosPrec = 0
omegaArriereGauche = 0.
codeurArriereGaucheDeltaPos = 0
codeurArriereGaucheDeltaPosPrec = 0
omegaAvantDroit = 0.
codeurAvantDroitDeltaPos = 0
codeurAvantDroitDeltaPosPrec = 0
omegaAvantGauche = 0.
codeurAvantGaucheDeltaPos = 0
codeurAvantGaucheDeltaPosPrec = 0
# Variables nécessaires à la commande des moteurs
# Consignes de tension
vref = 0.
vrefArriereDroit = 0.
vrefArriereGauche = 0.
vrefAvantDroit = 0.
vrefAvantGauche = 0.
# Tension effectivement appliquée
commandeArriereDroit = 0.
commandeArriereGauche = 0.
commandeAvantDroit = 0.
commandeAvantGauche = 0.
# Saturations
umax = 6. # valeur max de la tension de commande du moteur
umin = -6. # valeur min (ou max en négatif) de la tension de commande du moteur
# Déclaration des variables pour la réception des données
typeSignal = 0
offset = 0.
amplitude = 0.
frequence = 0.
moteur1 = 0
moteur2 = 0
moteur3 = 0
moteur4 = 0
# Timeout de réception des données
timeout = 2
timeLastReceived = 0
timedOut = False
T0 = time.time()
dt = 0.01
i = 0
tdebut = 0
# Création d'un scheduler pour exécuter des opérations à cadence fixe
s = sched.scheduler(time.time, time.sleep)
idecimLectureTension = 0
decimLectureTension = 6000
decimErreurLectureTension = 100
# Mesure de la tension de la batterie
# On la contraint à être supérieure à 7V, pour éviter une division par
# zéro en cas de problème quelconque
lectureTensionOK = False
tensionAlim = 7.4
while not lectureTensionOK:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
lectureTensionOK = True
except:
print("Erreur lecture tension")
#--- setup ---
def setup():
CommandeMoteurs(0, 0, 0, 0)
# -- fin setup --
# -- loop --
def loop():
global i, T0
i = i+1
s.enterabs( T0 + (i * dt), 1, CalculVitesse, ())
s.run()
# -- fin loop --
def CalculVitesse():
global omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, timeLastReceived, timeout, timedOut, \
tdebut, codeurArriereDroitDeltaPos, codeurArriereGaucheDeltaPos, codeurAvantDroitDeltaPos, codeurAvantGaucheDeltaPos, \
commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche, \
vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche, vref, \
codeurArriereDroitDeltaPosPrec, codeurArriereGaucheDeltaPosPrec, codeurAvantDroitDeltaPosPrec, codeurAvantGaucheDeltaPosPrec, \
idecimLectureTension, decimLectureTension, decimErreurLectureTension, tensionAlim, \
typeSignal, offset, amplitude, frequence, moteur1, moteur2, moteur3, moteur4
tdebut = time.time()
# Mesure de la vitesse des moteurs grâce aux codeurs incrémentaux
try:
codeursDeltaPos = mega.read_codeursDeltaPos()
codeurArriereDroitDeltaPos = codeursDeltaPos[0]
codeurArriereGaucheDeltaPos = codeursDeltaPos[1]
codeurAvantDroitDeltaPos = codeursDeltaPos[2]
codeurAvantGaucheDeltaPos = codeursDeltaPos[3]
# Suppression de mesures aberrantes
if (abs(codeurArriereDroitDeltaPos - codeurArriereDroitDeltaPosPrec) > 20) or (abs(codeurArriereGaucheDeltaPos - codeurArriereGaucheDeltaPosPrec) > 20) or (abs(codeurAvantDroitDeltaPos - codeurAvantDroitDeltaPosPrec) > 20) or (abs(codeurAvantGaucheDeltaPos - codeurAvantGaucheDeltaPosPrec) > 20):
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
codeurArriereDroitDeltaPosPrec = codeurArriereDroitDeltaPos
codeurArriereGaucheDeltaPosPrec = codeurArriereGaucheDeltaPos
codeurAvantDroitDeltaPosPrec = codeurAvantDroitDeltaPos
codeurAvantGaucheDeltaPosPrec = codeurAvantGaucheDeltaPos
except:
#print "Error getting data"
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
omegaArriereDroit = -2 * ((2 * 3.141592 * codeurArriereDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaArriereGauche = 2 * ((2 * 3.141592 * codeurArriereGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantDroit = -2 * ((2 * 3.141592 * codeurAvantDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantGauche = 2 * ((2 * 3.141592 * codeurAvantGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
tcourant = time.time() - T0
# Calcul de la consigne en fonction des données reçues sur la liaison série
if typeSignal == 0: # signal carré
if frequence > 0:
if (tcourant - (float(int(tcourant*frequence)))/frequence < 1/(2*frequence)):
vref = offset + amplitude
else:
vref = offset
else:
vref = offset + amplitude
else: # sinus
if frequence > 0:
vref = offset + amplitude * sin(2*3.141592*frequence*tcourant)
else:
vref = offset + amplitude
# Application de la consigne sur chaque moteur
vrefArriereDroit = moteur1 * vref
vrefArriereGauche = moteur2 * vref
vrefAvantDroit = moteur3 * vref
vrefAvantGauche = moteur4 * vref
# Calcul de la commande avant saturation
# Si on n'a pas reçu de données depuis un certain temps, celles-ci sont annulées
if (time.time()-timeLastReceived) > timeout and not timedOut:
timedOut = True
vrefArriereDroit = 0
vrefArriereGauche = 0
vrefAvantDroit = 0
vrefAvantGauche = 0
commande_avant_sat_ArriereDroit = vrefArriereDroit
commande_avant_sat_ArriereGauche = vrefArriereGauche
commande_avant_sat_AvantDroit = vrefAvantDroit
commande_avant_sat_AvantGauche = vrefAvantGauche
# Application de la saturation sur la commande
if (commande_avant_sat_ArriereDroit > umax):
commandeArriereDroit = umax
elif (commande_avant_sat_ArriereDroit < umin):
commandeArriereDroit = umin
else:
commandeArriereDroit = commande_avant_sat_ArriereDroit
if (commande_avant_sat_ArriereGauche > umax) :
commandeArriereGauche = umax
elif (commande_avant_sat_ArriereGauche < umin):
commandeArriereGauche = umin
else:
commandeArriereGauche = commande_avant_sat_ArriereGauche
if (commande_avant_sat_AvantDroit > umax):
commandeAvantDroit = umax
elif (commande_avant_sat_AvantDroit < umin):
commandeAvantDroit = umin
else:
commandeAvantDroit = commande_avant_sat_AvantDroit
if (commande_avant_sat_AvantGauche > umax) :
commandeAvantGauche = umax
elif (commande_avant_sat_AvantGauche < umin):
commandeAvantGauche = umin
else:
commandeAvantGauche = commande_avant_sat_AvantGauche
CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche)
# Lecture de la tension d'alimentation
if idecimLectureTension >= decimLectureTension:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
idecimLectureTension = 0
except:
# On recommence la lecture dans decimErreurLectureTension * dt
idecimLectureTension = idecimLectureTension - decimErreurLectureTension
#print("Erreur lecture tension dans Loop")
else:
idecimLectureTension = idecimLectureTension + 1
#print time.time() - tdebut
def CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
global tensionAlim
# L'ensemble pont en H + moteur pourrait ne pas être linéaire
tensionArriereDroit = commandeArriereDroit
tensionArriereGauche = commandeArriereGauche
tensionAvantDroit = commandeAvantDroit
tensionAvantGauche = commandeAvantGauche
# Normalisation de la tension d'alimentation par
# rapport à la tension d'alimentation
tension_int_ArriereDroit = int(255 * tensionArriereDroit / tensionAlim)
tension_int_ArriereGauche = int(255 * tensionArriereGauche / tensionAlim)
tension_int_AvantDroit = int(255 * tensionAvantDroit / tensionAlim)
tension_int_AvantGauche = int(255 * tensionAvantGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_ArriereDroit > 255):
tension_int_ArriereDroit = 255
if (tension_int_ArriereDroit < -255):
tension_int_ArriereDroit = -255
if (tension_int_ArriereGauche > 255):
tension_int_ArriereGauche = 255
if (tension_int_ArriereGauche < -255):
tension_int_ArriereGauche = -255
if (tension_int_AvantDroit > 255):
tension_int_AvantDroit = 255
if (tension_int_AvantDroit < -255):
tension_int_AvantDroit = -255
if (tension_int_AvantGauche > 255):
tension_int_AvantGauche = 255
if (tension_int_AvantGauche < -255):
tension_int_AvantGauche = -255
# Commande PWM
try:
mega.moteursArriere(-tension_int_ArriereDroit, tension_int_ArriereGauche)
mega.moteursAvant(-tension_int_AvantDroit, tension_int_AvantGauche)
mega.moteursCRC(-tension_int_ArriereDroit + tension_int_ArriereGauche, -tension_int_AvantDroit + tension_int_AvantGauche)
except:
pass
#print "Erreur moteurs"
def emitData():
# Délai nécessaire pour que le serveur ait le temps de démarrer
delay(5000)
while not noLoop: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 20)
self.callback.start()
def on_message(self, message):
global vref, vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche, typeSignal, offset, amplitude, frequence, \
moteur1, moteur2, moteur3, moteur4, timeLastReceived, timedOut
jsonMessage = json.loads(message)
# Annulation du timeout de réception des données
timeLastReceived = time.time()
timedOut = False;
if jsonMessage.get('typeSignal') != None:
typeSignal = int(jsonMessage.get('typeSignal'))
if jsonMessage.get('offset') != None:
offset = float(jsonMessage.get('offset'))
if jsonMessage.get('amplitude') != None:
amplitude = float(jsonMessage.get('amplitude'))
if jsonMessage.get('frequence') != None:
frequence = float(jsonMessage.get('frequence'))
if jsonMessage.get('moteur1') != None:
moteur1 = float(jsonMessage.get('moteur1'))
if jsonMessage.get('moteur2') != None:
moteur2 = float(jsonMessage.get('moteur2'))
if jsonMessage.get('moteur3') != None:
moteur3 = float(jsonMessage.get('moteur3'))
if jsonMessage.get('moteur4') != None:
moteur4 = float(jsonMessage.get('moteur4'))
if not socketOK:
typeSignal = 0
offset = 0.
amplitude = 0.
frequence = 0.
def on_close(self):
global socketOK, vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche
print 'connection closed...'
socketOK = False
vrefArriereDroit = 0.
vrefArriereGauche = 0.
vrefAvantDroit = 0.
vrefAvantGauche = 0.
def sendToSocket(self):
global omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, vref, \
socketOK, commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche
tcourant = time.time() - T0
aEnvoyer = json.dumps({'Temps':("%.2f" % tcourant), \
'Consigne':("%.2f" % vref), \
'omegaArriereDroit':("%.2f" % omegaArriereDroit), \
'omegaArriereGauche':("%.2f" % omegaArriereGauche), \
'omegaAvantDroit':("%.2f" % omegaAvantDroit), \
'omegaAvantGauche':("%.2f" % omegaAvantGauche), \
'Raw':("%.2f" % tcourant) \
+ "," + ("%.2f" % vref) \
+ "," + ("%.2f" % omegaArriereDroit) \
+ "," + ("%.2f" % omegaArriereGauche) \
+ "," + ("%.2f" % omegaAvantDroit) \
+ "," + ("%.2f" % omegaAvantGauche) \
})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
global vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche, device
print 'Sortie du programme'
vrefArriereDroit = 0.
vrefArriereGauche = 0.
vrefAvantDroit = 0.
vrefAvantGauche = 0.
CommandeMoteurs(0, 0, 0, 0)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
| 34.982571 | 304 | 0.668058 |
GaucheDeltaPos = codeursDeltaPos[1]
codeurAvantDroitDeltaPos = codeursDeltaPos[2]
codeurAvantGaucheDeltaPos = codeursDeltaPos[3]
# Suppression de mesures aberrantes
if (abs(codeurArriereDroitDeltaPos - codeurArriereDroitDeltaPosPrec) > 20) or (abs(codeurArriereGaucheDeltaPos - codeurArriereGaucheDeltaPosPrec) > 20) or (abs(codeurAvantDroitDeltaPos - codeurAvantDroitDeltaPosPrec) > 20) or (abs(codeurAvantGaucheDeltaPos - codeurAvantGaucheDeltaPosPrec) > 20):
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
codeurArriereDroitDeltaPosPrec = codeurArriereDroitDeltaPos
codeurArriereGaucheDeltaPosPrec = codeurArriereGaucheDeltaPos
codeurAvantDroitDeltaPosPrec = codeurAvantDroitDeltaPos
codeurAvantGaucheDeltaPosPrec = codeurAvantGaucheDeltaPos
except:
#print "Error getting data"
codeurArriereDroitDeltaPos = codeurArriereDroitDeltaPosPrec
codeurArriereGaucheDeltaPos = codeurArriereGaucheDeltaPosPrec
codeurAvantDroitDeltaPos = codeurAvantDroitDeltaPosPrec
codeurAvantGaucheDeltaPos = codeurAvantGaucheDeltaPosPrec
omegaArriereDroit = -2 * ((2 * 3.141592 * codeurArriereDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaArriereGauche = 2 * ((2 * 3.141592 * codeurArriereGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantDroit = -2 * ((2 * 3.141592 * codeurAvantDroitDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
omegaAvantGauche = 2 * ((2 * 3.141592 * codeurAvantGaucheDeltaPos) / 1200) / (Nmoy * dt) # en rad/s
tcourant = time.time() - T0
# Calcul de la consigne en fonction des données reçues sur la liaison série
if typeSignal == 0: # signal carré
if frequence > 0:
if (tcourant - (float(int(tcourant*frequence)))/frequence < 1/(2*frequence)):
vref = offset + amplitude
else:
vref = offset
else:
vref = offset + amplitude
else: # sinus
if frequence > 0:
vref = offset + amplitude * sin(2*3.141592*frequence*tcourant)
else:
vref = offset + amplitude
# Application de la consigne sur chaque moteur
vrefArriereDroit = moteur1 * vref
vrefArriereGauche = moteur2 * vref
vrefAvantDroit = moteur3 * vref
vrefAvantGauche = moteur4 * vref
# Calcul de la commande avant saturation
# Si on n'a pas reçu de données depuis un certain temps, celles-ci sont annulées
if (time.time()-timeLastReceived) > timeout and not timedOut:
timedOut = True
vrefArriereDroit = 0
vrefArriereGauche = 0
vrefAvantDroit = 0
vrefAvantGauche = 0
commande_avant_sat_ArriereDroit = vrefArriereDroit
commande_avant_sat_ArriereGauche = vrefArriereGauche
commande_avant_sat_AvantDroit = vrefAvantDroit
commande_avant_sat_AvantGauche = vrefAvantGauche
if (commande_avant_sat_ArriereDroit > umax):
commandeArriereDroit = umax
elif (commande_avant_sat_ArriereDroit < umin):
commandeArriereDroit = umin
else:
commandeArriereDroit = commande_avant_sat_ArriereDroit
if (commande_avant_sat_ArriereGauche > umax) :
commandeArriereGauche = umax
elif (commande_avant_sat_ArriereGauche < umin):
commandeArriereGauche = umin
else:
commandeArriereGauche = commande_avant_sat_ArriereGauche
if (commande_avant_sat_AvantDroit > umax):
commandeAvantDroit = umax
elif (commande_avant_sat_AvantDroit < umin):
commandeAvantDroit = umin
else:
commandeAvantDroit = commande_avant_sat_AvantDroit
if (commande_avant_sat_AvantGauche > umax) :
commandeAvantGauche = umax
elif (commande_avant_sat_AvantGauche < umin):
commandeAvantGauche = umin
else:
commandeAvantGauche = commande_avant_sat_AvantGauche
CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche)
if idecimLectureTension >= decimLectureTension:
try:
tensionAlim = max(7.0, float(mega.read_battery_millivolts()) / 1000.)
idecimLectureTension = 0
except:
# On recommence la lecture dans decimErreurLectureTension * dt
idecimLectureTension = idecimLectureTension - decimErreurLectureTension
#print("Erreur lecture tension dans Loop")
else:
idecimLectureTension = idecimLectureTension + 1
#print time.time() - tdebut
def CommandeMoteurs(commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche):
# Cette fonction calcule et envoi les signaux PWM au pont en H
# en fonction des tensions de commande et d'alimentation
global tensionAlim
tensionArriereDroit = commandeArriereDroit
tensionArriereGauche = commandeArriereGauche
tensionAvantDroit = commandeAvantDroit
tensionAvantGauche = commandeAvantGauche
# Normalisation de la tension d'alimentation par
tension_int_ArriereDroit = int(255 * tensionArriereDroit / tensionAlim)
tension_int_ArriereGauche = int(255 * tensionArriereGauche / tensionAlim)
tension_int_AvantDroit = int(255 * tensionAvantDroit / tensionAlim)
tension_int_AvantGauche = int(255 * tensionAvantGauche / tensionAlim)
# Saturation par sécurité
if (tension_int_ArriereDroit > 255):
tension_int_ArriereDroit = 255
if (tension_int_ArriereDroit < -255):
tension_int_ArriereDroit = -255
if (tension_int_ArriereGauche > 255):
tension_int_ArriereGauche = 255
if (tension_int_ArriereGauche < -255):
tension_int_ArriereGauche = -255
if (tension_int_AvantDroit > 255):
tension_int_AvantDroit = 255
if (tension_int_AvantDroit < -255):
tension_int_AvantDroit = -255
if (tension_int_AvantGauche > 255):
tension_int_AvantGauche = 255
if (tension_int_AvantGauche < -255):
tension_int_AvantGauche = -255
# Commande PWM
try:
mega.moteursArriere(-tension_int_ArriereDroit, tension_int_ArriereGauche)
mega.moteursAvant(-tension_int_AvantDroit, tension_int_AvantGauche)
mega.moteursCRC(-tension_int_ArriereDroit + tension_int_ArriereGauche, -tension_int_AvantDroit + tension_int_AvantGauche)
except:
pass
#print "Erreur moteurs"
def emitData():
# Délai nécessaire pour que le serveur ait le temps de démarrer
delay(5000)
while not noLoop: loop() # appelle fonction loop sans fin
class WSHandler(tornado.websocket.WebSocketHandler):
def open(self):
global socketOK
print 'connection opened...'
socketOK = True
self.callback = PeriodicCallback(self.sendToSocket, 20)
self.callback.start()
def on_message(self, message):
global vref, vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche, typeSignal, offset, amplitude, frequence, \
moteur1, moteur2, moteur3, moteur4, timeLastReceived, timedOut
jsonMessage = json.loads(message)
# Annulation du timeout de réception des données
timeLastReceived = time.time()
timedOut = False;
if jsonMessage.get('typeSignal') != None:
typeSignal = int(jsonMessage.get('typeSignal'))
if jsonMessage.get('offset') != None:
offset = float(jsonMessage.get('offset'))
if jsonMessage.get('amplitude') != None:
amplitude = float(jsonMessage.get('amplitude'))
if jsonMessage.get('frequence') != None:
frequence = float(jsonMessage.get('frequence'))
if jsonMessage.get('moteur1') != None:
moteur1 = float(jsonMessage.get('moteur1'))
if jsonMessage.get('moteur2') != None:
moteur2 = float(jsonMessage.get('moteur2'))
if jsonMessage.get('moteur3') != None:
moteur3 = float(jsonMessage.get('moteur3'))
if jsonMessage.get('moteur4') != None:
moteur4 = float(jsonMessage.get('moteur4'))
if not socketOK:
typeSignal = 0
offset = 0.
amplitude = 0.
frequence = 0.
def on_close(self):
global socketOK, vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche
print 'connection closed...'
socketOK = False
vrefArriereDroit = 0.
vrefArriereGauche = 0.
vrefAvantDroit = 0.
vrefAvantGauche = 0.
def sendToSocket(self):
global omegaArriereDroit, omegaArriereGauche, omegaAvantDroit, omegaAvantGauche, vref, \
socketOK, commandeArriereDroit, commandeArriereGauche, commandeAvantDroit, commandeAvantGauche
tcourant = time.time() - T0
aEnvoyer = json.dumps({'Temps':("%.2f" % tcourant), \
'Consigne':("%.2f" % vref), \
'omegaArriereDroit':("%.2f" % omegaArriereDroit), \
'omegaArriereGauche':("%.2f" % omegaArriereGauche), \
'omegaAvantDroit':("%.2f" % omegaAvantDroit), \
'omegaAvantGauche':("%.2f" % omegaAvantGauche), \
'Raw':("%.2f" % tcourant) \
+ "," + ("%.2f" % vref) \
+ "," + ("%.2f" % omegaArriereDroit) \
+ "," + ("%.2f" % omegaArriereGauche) \
+ "," + ("%.2f" % omegaAvantDroit) \
+ "," + ("%.2f" % omegaAvantGauche) \
})
if socketOK:
try:
self.write_message(aEnvoyer)
except:
pass
def check_origin(self, origin):
# Voir http://www.tornadoweb.org/en/stable/websocket.html#tornado.websocket.WebSocketHandler.check_origin
# et http://www.arundhaj.com/blog/tornado-error-during-websocket-handshake.html
return True
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
application = tornado.web.Application([
(r'/ws', WSHandler)
])
def startTornado():
http_server = tornado.httpserver.HTTPServer(application)
http_server.listen(9090)
tornado.ioloop.IOLoop.instance().start()
# Gestion du CTRL-C
def signal_handler(signal, frame):
global vrefArriereDroit, vrefArriereGauche, vrefAvantDroit, vrefAvantGauche, device
print 'Sortie du programme'
vrefArriereDroit = 0.
vrefArriereGauche = 0.
vrefAvantDroit = 0.
vrefAvantGauche = 0.
CommandeMoteurs(0, 0, 0, 0)
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
#--- obligatoire pour lancement du code --
if __name__=="__main__": # pour rendre le code executable
setup() # appelle la fonction setup
print "Setup done."
th = threading.Thread(None, emitData, None, (), {})
th.daemon = True
th.start()
print "Starting Tornado."
try:
print "Connect to ws://" + get_ip_address('eth0') + ":9090/ws with Ethernet."
except:
pass
try:
print "Connect to ws://" + get_ip_address('wlan0') + ":9090/ws with Wifi."
except:
pass
socketOK = False
startTornado()
| false | true |
f7f80e998b2c4664f87931467c1aa6bc136bb29a | 4,019 | py | Python | storm_control/sc_hardware/yokogawa/w1SpinningDisk.py | BoettigerLab/microscope-control | 6cfa570d52ad1f14871bef94721dd51d14f61671 | [
"MIT"
] | 1 | 2021-03-17T20:25:59.000Z | 2021-03-17T20:25:59.000Z | storm_control/sc_hardware/yokogawa/w1SpinningDisk.py | BoettigerLab/microscope-control | 6cfa570d52ad1f14871bef94721dd51d14f61671 | [
"MIT"
] | null | null | null | storm_control/sc_hardware/yokogawa/w1SpinningDisk.py | BoettigerLab/microscope-control | 6cfa570d52ad1f14871bef94721dd51d14f61671 | [
"MIT"
] | 1 | 2021-03-17T21:24:35.000Z | 2021-03-17T21:24:35.000Z | #!/usr/bin/env python
"""
A serial interface to the W1 Spinning Disk from Yokogawa/Andor.
Jeffrey Moffitt 5/16
Hazen Babcock 5/17
"""
import storm_control.sc_hardware.serial.RS232 as RS232
class W1SpinningDisk(RS232.RS232):
def __init__(self, **kwds):
super().__init__(**kwds)
# Define error codes
self.error_codes = {"30005": "Command name error",
"30006": "Command argument number error",
"30007": "Command argument value error",
"30141": "Command argument value error",
"30012": "Interlock alarm is on",
"30133": "Interlock alarm is on",
"30014": "Electricity alarm is on",
"30015": "Shutter alarm is on",
"30016": "Actuator alarm is on",
"30017": "Disk alarm is on",
"30018": "Data error alarm is on",
"30019": "Other alarm is on",
"30021": "Designated system is not defined",
"30022": "Designated system does not exist",
"30023": "Designated system is not detected",
"30031": "Waiting for initialization to complete",
"30032": "Under maintenance mode",
"30201": "External SYNC signal is under use",
"30204": "Disk rotation stopped",
"30301": "Shutter error",
"30302": "Shutter unopenable error",
"1": "Unknown serial communication error"}
def commandResponse(self, command, timeout = 0.1):
# Clear buffer of old responses.
self.tty.timeout = 0
while (len(self.readline()) > 0):
pass
# Set timeout.
self.tty.timeout = timeout
# Send the command and wait timeout time for a response.
self.writeline(command)
response = self.readline()
# Check that we got a message within the timeout.
if (len(response) > 0):
[value, code] = response.split(":")[:2]
# Did we get an error?
if "N" in code: # Fixed bug in original version: if (code == "N\r"):
try:
print(">> Warning w1 error", self.error_codes[value])
except KeyError:
print(">> Warning unknown w1 error", value)
return None
#elif "A" in code:
# print("connected")
else:
print(value)
print(code)
return value
else:
return None
#
# The MIT License
#
# Copyright (c) 2017 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
| 39.792079 | 83 | 0.55362 |
import storm_control.sc_hardware.serial.RS232 as RS232
class W1SpinningDisk(RS232.RS232):
def __init__(self, **kwds):
super().__init__(**kwds)
self.error_codes = {"30005": "Command name error",
"30006": "Command argument number error",
"30007": "Command argument value error",
"30141": "Command argument value error",
"30012": "Interlock alarm is on",
"30133": "Interlock alarm is on",
"30014": "Electricity alarm is on",
"30015": "Shutter alarm is on",
"30016": "Actuator alarm is on",
"30017": "Disk alarm is on",
"30018": "Data error alarm is on",
"30019": "Other alarm is on",
"30021": "Designated system is not defined",
"30022": "Designated system does not exist",
"30023": "Designated system is not detected",
"30031": "Waiting for initialization to complete",
"30032": "Under maintenance mode",
"30201": "External SYNC signal is under use",
"30204": "Disk rotation stopped",
"30301": "Shutter error",
"30302": "Shutter unopenable error",
"1": "Unknown serial communication error"}
def commandResponse(self, command, timeout = 0.1):
self.tty.timeout = 0
while (len(self.readline()) > 0):
pass
self.tty.timeout = timeout
self.writeline(command)
response = self.readline()
if (len(response) > 0):
[value, code] = response.split(":")[:2]
if "N" in code:
try:
print(">> Warning w1 error", self.error_codes[value])
except KeyError:
print(">> Warning unknown w1 error", value)
return None
else:
print(value)
print(code)
return value
else:
return None
| true | true |
f7f810c613e8337ac0f0288efca9ad1b3807e795 | 1,627 | py | Python | saleor/graphql/api.py | ballon3/PlanB | 890735d0c09f68e7dd603f577e3d5bcbf818a2ab | [
"CC-BY-4.0"
] | 2 | 2019-10-16T13:41:57.000Z | 2020-03-07T07:34:05.000Z | saleor/graphql/api.py | ysaakpr/saleor | 8060fa88b9f8e0f78db4adb69396a835e3fd03d8 | [
"CC-BY-4.0"
] | 14 | 2020-03-24T17:54:18.000Z | 2022-02-10T19:43:59.000Z | saleor/graphql/api.py | ysaakpr/saleor | 8060fa88b9f8e0f78db4adb69396a835e3fd03d8 | [
"CC-BY-4.0"
] | 2 | 2019-10-12T09:35:02.000Z | 2019-10-15T07:18:25.000Z | import graphene
from .account.schema import AccountMutations, AccountQueries
from .checkout.schema import CheckoutMutations, CheckoutQueries
from .core.schema import CoreMutations, CoreQueries
from .discount.schema import DiscountMutations, DiscountQueries
from .extensions.schema import ExtensionsMutations, ExtensionsQueries
from .giftcard.schema import GiftCardMutations, GiftCardQueries
from .menu.schema import MenuMutations, MenuQueries
from .order.schema import OrderMutations, OrderQueries
from .page.schema import PageMutations, PageQueries
from .payment.schema import PaymentMutations, PaymentQueries
from .product.schema import ProductMutations, ProductQueries
from .shipping.schema import ShippingMutations, ShippingQueries
from .shop.schema import ShopMutations, ShopQueries
from .translations.schema import TranslationQueries
from .webhook.schema import WebhookMutations, WebhookQueries
class Query(
AccountQueries,
CheckoutQueries,
CoreQueries,
DiscountQueries,
ExtensionsQueries,
GiftCardQueries,
MenuQueries,
OrderQueries,
PageQueries,
PaymentQueries,
ProductQueries,
ShippingQueries,
ShopQueries,
TranslationQueries,
WebhookQueries,
):
node = graphene.Node.Field()
class Mutations(
AccountMutations,
CheckoutMutations,
CoreMutations,
DiscountMutations,
ExtensionsMutations,
GiftCardMutations,
MenuMutations,
OrderMutations,
PageMutations,
PaymentMutations,
ProductMutations,
ShippingMutations,
ShopMutations,
WebhookMutations,
):
pass
schema = graphene.Schema(Query, Mutations)
| 27.116667 | 69 | 0.791641 | import graphene
from .account.schema import AccountMutations, AccountQueries
from .checkout.schema import CheckoutMutations, CheckoutQueries
from .core.schema import CoreMutations, CoreQueries
from .discount.schema import DiscountMutations, DiscountQueries
from .extensions.schema import ExtensionsMutations, ExtensionsQueries
from .giftcard.schema import GiftCardMutations, GiftCardQueries
from .menu.schema import MenuMutations, MenuQueries
from .order.schema import OrderMutations, OrderQueries
from .page.schema import PageMutations, PageQueries
from .payment.schema import PaymentMutations, PaymentQueries
from .product.schema import ProductMutations, ProductQueries
from .shipping.schema import ShippingMutations, ShippingQueries
from .shop.schema import ShopMutations, ShopQueries
from .translations.schema import TranslationQueries
from .webhook.schema import WebhookMutations, WebhookQueries
class Query(
AccountQueries,
CheckoutQueries,
CoreQueries,
DiscountQueries,
ExtensionsQueries,
GiftCardQueries,
MenuQueries,
OrderQueries,
PageQueries,
PaymentQueries,
ProductQueries,
ShippingQueries,
ShopQueries,
TranslationQueries,
WebhookQueries,
):
node = graphene.Node.Field()
class Mutations(
AccountMutations,
CheckoutMutations,
CoreMutations,
DiscountMutations,
ExtensionsMutations,
GiftCardMutations,
MenuMutations,
OrderMutations,
PageMutations,
PaymentMutations,
ProductMutations,
ShippingMutations,
ShopMutations,
WebhookMutations,
):
pass
schema = graphene.Schema(Query, Mutations)
| true | true |
f7f81101600db7bb9121ab36da21e5ca8e166a41 | 7,812 | py | Python | tbx/log.py | ronhanson/python-tbx | 7f5015bcc231b42617bdc3537fb39e5b05d4f7af | [
"MIT"
] | 2 | 2016-05-27T06:21:27.000Z | 2018-12-01T15:02:42.000Z | tbx/log.py | ronhanson/python-tbx | 7f5015bcc231b42617bdc3537fb39e5b05d4f7af | [
"MIT"
] | null | null | null | tbx/log.py | ronhanson/python-tbx | 7f5015bcc231b42617bdc3537fb39e5b05d4f7af | [
"MIT"
] | 2 | 2018-12-01T15:02:43.000Z | 2020-11-23T07:57:09.000Z | # coding: utf-8
from __future__ import print_function
"""
(c) 2013 - Ronan Delacroix
Logging Utils
:author: Ronan Delacroix
"""
import sys
import os
import socket
import logging
import logging.handlers
from . import code
def configure_logging_to_screen(debug=False):
level = 'INFO'
if debug:
level = 'DEBUG'
(script_folder, app_name) = code.get_app_name()
settings = {'LOGGING_SCREEN_LEVEL': level, 'LOGGING_METHODS': ['SCREEN'], 'LOGGING_SCREEN_FORMAT': '%(levelname)-8s| %(message)s'}
configure_logger(logging.getLogger(), app_name, settings=settings)
def configure_logging(log_name, settings={}, application_name=None, force=False):
configure_logger(logging.getLogger(), log_name, settings=settings, application_name=application_name, force=force)
def configure_logger(logger, log_name, settings={}, application_name=None, force=False):
log_level = settings.get('LOGGING_LEVEL', 'DEBUG')
log_methods = settings.get('LOGGING_METHODS', ['SCREEN', 'FILE'])
logger.setLevel(log_level)
if not hasattr(logger, 'handlers_added') or force:
#we first remove all handlers
for handler in logger.handlers:
logger.removeHandler(handler)
#make handlers, that write the logs to screen, file, syslog
if 'SCREEN' in log_methods:
add_screen_logging(logger, settings)
if 'SYSLOG' in log_methods:# and ('SysLogHandler' in dir(logging)):
add_syslog_logging(logger, log_name, settings)
if 'FILE' in log_methods:
add_file_logging(logger, log_name, application_name, settings)
if 'MONGO' in log_methods:
add_mongo_logging(logger, log_name, application_name, settings)
logger.propagate = True
logger.handlers_added = True
def add_screen_logging(logger, settings={}):
screen_format = settings.get('LOGGING_SCREEN_FORMAT', '%(levelname)-8s| %(message)s')
level = settings.get('LOGGING_SCREEN_LEVEL', None)
write_to_screen_handler = logging.StreamHandler()
screen_formatter = logging.Formatter(screen_format, '%Y-%m-%dT%H:%M:%S')
write_to_screen_handler.setFormatter(screen_formatter)
if level:
write_to_screen_handler.setLevel(level)
logger.addHandler(write_to_screen_handler)
def add_syslog_logging(logger, log_name, settings={}):
#guessing syslog address
syslog_address = settings.get('LOGGING_SYSLOG_ADDRESS', None)
level = settings.get('LOGGING_SYSLOG_LEVEL', None)
if not syslog_address:
if sys.platform == 'darwin':
syslog_address = '/var/run/syslog'
else:
syslog_address = '/dev/log'
syslog_format = settings.get('LOGGING_SYSLOG_FORMAT', log_name+': [%(filename)s:%(funcName)s:%(lineno)d]\t%(levelname)s - %(message).1900s')
write_to_syslog_handler = logging.handlers.SysLogHandler(address=syslog_address)
syslog_formatter = logging.Formatter(syslog_format, '%Y-%m-%dT%H:%M:%S')
write_to_syslog_handler.setFormatter(syslog_formatter)
if level:
write_to_syslog_handler.setLevel(level)
logger.addHandler(write_to_syslog_handler)
def add_file_logging(logger, log_name, application_name, settings={}):
(script_folder, app_name) = code.get_app_name()
if not application_name:
application_name = app_name
log_folder = settings.get('LOGGING_FILE_FOLDER', None)
if not log_folder:
log_folder = os.path.join(script_folder, 'log')
log_folder = log_folder.replace('<app_name>', application_name).replace('<name>', log_name)
log_folder = os.path.abspath(log_folder)
if not os.path.isdir(log_folder):
try:
os.makedirs(log_folder)
except:
print("Warning : permission denied to log in folder '%s'. Will attempt to log in '%s'" %
(log_folder, os.path.join(script_folder, 'log')))
log_folder = os.path.join(script_folder, 'log')
if not os.path.isdir(log_folder):
try:
os.makedirs(log_folder)
except:
print("Impossible to log with FILE handler to %s either, abandoning file logging." % log_folder)
return
if not os.access(log_folder, os.W_OK):
print("Warning : permission denied to log in folder '%s'. Will attempt to log in '%s'" %
(log_folder, os.path.join(script_folder, 'log')))
log_folder = os.path.join(script_folder, 'log')
try:
os.makedirs(log_folder)
except:
print("Impossible to log with FILE handler to %s either, abandoning file logging." % log_folder)
return
file_format = settings.get('LOGGING_FILE_FORMAT', None)
log_file = os.path.join(log_folder, log_name + ".txt")
level = settings.get('LOGGING_FILE_LEVEL', 'DEBUG')
add_logging_file_handler(logger, log_file, file_format, level=level)
def add_logging_file_handler(logger, log_file, format=None, level=None):
if not format:
format = '[%(asctime)s] [%(filename)s:%(funcName)s:%(lineno)d]\t%(levelname)-8s - %(message)s'
write_to_file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=10000000, backupCount=10, encoding='UTF-8')
file_formatter = logging.Formatter(format, '%Y-%m-%dT%H:%M:%S')
write_to_file_handler.setFormatter(file_formatter)
if level:
write_to_file_handler.setLevel(level)
logger.addHandler(write_to_file_handler)
def add_mongo_logging(logger, log_name, application_name, settings={}):
(script_folder, app_name) = code.get_app_name()
if not application_name:
application_name = app_name
try:
import log4mongo.handlers
except ImportError:
print("Impossible to log with MONGO handler as log4mongo library is not available.", file=sys.stderr)
return
class MyMongoFormatter(log4mongo.handlers.MongoFormatter):
def format(self, record):
document = super(MyMongoFormatter, self).format(record)
del document['threadName']
del document['thread']
del document['loggerName']
del document['module']
del document['method']
document['log_name'] = log_name
document['hostname'] = socket.gethostname()
return document
mongo_handler_class = log4mongo.handlers.MongoHandler
mongo_handler_args = {
'host': settings.get('LOGGING_MONGO_HOST', "localhost"),
'port': settings.get('LOGGING_MONGO_PORT', 27017),
'database_name': settings.get('LOGGING_MONGO_DATABASE', application_name),
'collection': settings.get('LOGGING_MONGO_COLLECTION', log_name+"_logs"),
'capped': settings.get('LOGGING_MONGO_CAPPED', True),
'capped_max': settings.get('LOGGING_MONGO_CAPPED_MAX', 100000),
'capped_size': settings.get('LOGGING_MONGO_CAPPED_SIZE', 10000000),
'formatter' : MyMongoFormatter()
}
if settings.get('LOGGING_MONGO_BUFFER_SIZE', False):
mongo_handler_class = log4mongo.handlers.BufferedMongoHandler
mongo_handler_args.update({
'buffer_size': settings.get('LOGGING_MONGO_BUFFER_SIZE', 20),
'buffer_early_flush_level': settings.get('LOGGING_MONGO_BUFFER_FLUSH_LEVEL', logging.CRITICAL),
'buffer_periodical_flush_timing': settings.get('LOGGING_MONGO_BUFFER_FLUSH_TIMER', 5.0)
})
#class MongoFilter(logging.Filter):
# def filter(self, record):
# record.log_name = log_name
# record.hostname = socket.gethostname()
# return True
#
#logger.addFilter(MongoFilter())
log4mongo_handler = mongo_handler_class(**mongo_handler_args)
log4mongo_handler.setLevel(settings.get('LOGGING_MONGO_LEVEL', 'DEBUG'))
logger.addHandler(log4mongo_handler)
| 38.673267 | 144 | 0.684588 |
from __future__ import print_function
import sys
import os
import socket
import logging
import logging.handlers
from . import code
def configure_logging_to_screen(debug=False):
level = 'INFO'
if debug:
level = 'DEBUG'
(script_folder, app_name) = code.get_app_name()
settings = {'LOGGING_SCREEN_LEVEL': level, 'LOGGING_METHODS': ['SCREEN'], 'LOGGING_SCREEN_FORMAT': '%(levelname)-8s| %(message)s'}
configure_logger(logging.getLogger(), app_name, settings=settings)
def configure_logging(log_name, settings={}, application_name=None, force=False):
configure_logger(logging.getLogger(), log_name, settings=settings, application_name=application_name, force=force)
def configure_logger(logger, log_name, settings={}, application_name=None, force=False):
log_level = settings.get('LOGGING_LEVEL', 'DEBUG')
log_methods = settings.get('LOGGING_METHODS', ['SCREEN', 'FILE'])
logger.setLevel(log_level)
if not hasattr(logger, 'handlers_added') or force:
for handler in logger.handlers:
logger.removeHandler(handler)
if 'SCREEN' in log_methods:
add_screen_logging(logger, settings)
if 'SYSLOG' in log_methods:
add_syslog_logging(logger, log_name, settings)
if 'FILE' in log_methods:
add_file_logging(logger, log_name, application_name, settings)
if 'MONGO' in log_methods:
add_mongo_logging(logger, log_name, application_name, settings)
logger.propagate = True
logger.handlers_added = True
def add_screen_logging(logger, settings={}):
screen_format = settings.get('LOGGING_SCREEN_FORMAT', '%(levelname)-8s| %(message)s')
level = settings.get('LOGGING_SCREEN_LEVEL', None)
write_to_screen_handler = logging.StreamHandler()
screen_formatter = logging.Formatter(screen_format, '%Y-%m-%dT%H:%M:%S')
write_to_screen_handler.setFormatter(screen_formatter)
if level:
write_to_screen_handler.setLevel(level)
logger.addHandler(write_to_screen_handler)
def add_syslog_logging(logger, log_name, settings={}):
syslog_address = settings.get('LOGGING_SYSLOG_ADDRESS', None)
level = settings.get('LOGGING_SYSLOG_LEVEL', None)
if not syslog_address:
if sys.platform == 'darwin':
syslog_address = '/var/run/syslog'
else:
syslog_address = '/dev/log'
syslog_format = settings.get('LOGGING_SYSLOG_FORMAT', log_name+': [%(filename)s:%(funcName)s:%(lineno)d]\t%(levelname)s - %(message).1900s')
write_to_syslog_handler = logging.handlers.SysLogHandler(address=syslog_address)
syslog_formatter = logging.Formatter(syslog_format, '%Y-%m-%dT%H:%M:%S')
write_to_syslog_handler.setFormatter(syslog_formatter)
if level:
write_to_syslog_handler.setLevel(level)
logger.addHandler(write_to_syslog_handler)
def add_file_logging(logger, log_name, application_name, settings={}):
(script_folder, app_name) = code.get_app_name()
if not application_name:
application_name = app_name
log_folder = settings.get('LOGGING_FILE_FOLDER', None)
if not log_folder:
log_folder = os.path.join(script_folder, 'log')
log_folder = log_folder.replace('<app_name>', application_name).replace('<name>', log_name)
log_folder = os.path.abspath(log_folder)
if not os.path.isdir(log_folder):
try:
os.makedirs(log_folder)
except:
print("Warning : permission denied to log in folder '%s'. Will attempt to log in '%s'" %
(log_folder, os.path.join(script_folder, 'log')))
log_folder = os.path.join(script_folder, 'log')
if not os.path.isdir(log_folder):
try:
os.makedirs(log_folder)
except:
print("Impossible to log with FILE handler to %s either, abandoning file logging." % log_folder)
return
if not os.access(log_folder, os.W_OK):
print("Warning : permission denied to log in folder '%s'. Will attempt to log in '%s'" %
(log_folder, os.path.join(script_folder, 'log')))
log_folder = os.path.join(script_folder, 'log')
try:
os.makedirs(log_folder)
except:
print("Impossible to log with FILE handler to %s either, abandoning file logging." % log_folder)
return
file_format = settings.get('LOGGING_FILE_FORMAT', None)
log_file = os.path.join(log_folder, log_name + ".txt")
level = settings.get('LOGGING_FILE_LEVEL', 'DEBUG')
add_logging_file_handler(logger, log_file, file_format, level=level)
def add_logging_file_handler(logger, log_file, format=None, level=None):
if not format:
format = '[%(asctime)s] [%(filename)s:%(funcName)s:%(lineno)d]\t%(levelname)-8s - %(message)s'
write_to_file_handler = logging.handlers.RotatingFileHandler(log_file, maxBytes=10000000, backupCount=10, encoding='UTF-8')
file_formatter = logging.Formatter(format, '%Y-%m-%dT%H:%M:%S')
write_to_file_handler.setFormatter(file_formatter)
if level:
write_to_file_handler.setLevel(level)
logger.addHandler(write_to_file_handler)
def add_mongo_logging(logger, log_name, application_name, settings={}):
(script_folder, app_name) = code.get_app_name()
if not application_name:
application_name = app_name
try:
import log4mongo.handlers
except ImportError:
print("Impossible to log with MONGO handler as log4mongo library is not available.", file=sys.stderr)
return
class MyMongoFormatter(log4mongo.handlers.MongoFormatter):
def format(self, record):
document = super(MyMongoFormatter, self).format(record)
del document['threadName']
del document['thread']
del document['loggerName']
del document['module']
del document['method']
document['log_name'] = log_name
document['hostname'] = socket.gethostname()
return document
mongo_handler_class = log4mongo.handlers.MongoHandler
mongo_handler_args = {
'host': settings.get('LOGGING_MONGO_HOST', "localhost"),
'port': settings.get('LOGGING_MONGO_PORT', 27017),
'database_name': settings.get('LOGGING_MONGO_DATABASE', application_name),
'collection': settings.get('LOGGING_MONGO_COLLECTION', log_name+"_logs"),
'capped': settings.get('LOGGING_MONGO_CAPPED', True),
'capped_max': settings.get('LOGGING_MONGO_CAPPED_MAX', 100000),
'capped_size': settings.get('LOGGING_MONGO_CAPPED_SIZE', 10000000),
'formatter' : MyMongoFormatter()
}
if settings.get('LOGGING_MONGO_BUFFER_SIZE', False):
mongo_handler_class = log4mongo.handlers.BufferedMongoHandler
mongo_handler_args.update({
'buffer_size': settings.get('LOGGING_MONGO_BUFFER_SIZE', 20),
'buffer_early_flush_level': settings.get('LOGGING_MONGO_BUFFER_FLUSH_LEVEL', logging.CRITICAL),
'buffer_periodical_flush_timing': settings.get('LOGGING_MONGO_BUFFER_FLUSH_TIMER', 5.0)
})
log4mongo_handler = mongo_handler_class(**mongo_handler_args)
log4mongo_handler.setLevel(settings.get('LOGGING_MONGO_LEVEL', 'DEBUG'))
logger.addHandler(log4mongo_handler)
| true | true |
f7f8111b364f25e2d551da6532929bb85b43b524 | 4,831 | py | Python | tests/_ntlm_raw/test_security.py | dhirschfeld/pyspnego | 5ce9494e7f582fbcc533c8bf6c3dce4a946c8138 | [
"MIT"
] | null | null | null | tests/_ntlm_raw/test_security.py | dhirschfeld/pyspnego | 5ce9494e7f582fbcc533c8bf6c3dce4a946c8138 | [
"MIT"
] | 1 | 2020-08-27T03:48:49.000Z | 2020-08-27T03:48:49.000Z | tests/_ntlm_raw/test_security.py | dhirschfeld/pyspnego | 5ce9494e7f582fbcc533c8bf6c3dce4a946c8138 | [
"MIT"
] | null | null | null | # Copyright: (c) 2020, Jordan Borean (@jborean93) <jborean93@gmail.com>
# MIT License (see LICENSE or https://opensource.org/licenses/MIT)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type # noqa (fixes E402 for the imports below)
import re
import pytest
from spnego._ntlm_raw.crypto import (
compute_response_v1,
lmowfv1,
ntowfv1,
rc4init,
sealkey,
signkey,
)
from spnego._ntlm_raw.messages import (
NegotiateFlags,
)
from spnego._ntlm_raw.security import (
seal,
sign,
)
from spnego._text import (
to_bytes,
)
from spnego.exceptions import (
OperationNotAvailableError,
)
from .._ntlm_raw import (
TEST_CLIENT_CHALLENGE,
TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS,
TEST_NTLMV1_FLAGS,
TEST_PASSWD,
TEST_RANDOM_SESSION_KEY,
TEST_SERVER_CHALLENGE,
)
def test_seal_ntlmv1():
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/9e2b483e-d185-4feb-aa4f-db6e2c0c49d9
seal_key = sealkey(TEST_NTLMV1_FLAGS, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(TEST_NTLMV1_FLAGS, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(TEST_NTLMV1_FLAGS, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x56\xFE\x04\xD8\x61\xF9\x31\x9A\xF0\xD7\x23\x8A\x2E\x3B\x4D\x45" \
b"\x7F\xB8"
# The docs example seems to keep the random pad in the signature even though the actual function definition sets
# that to 0x00000000. Assert the actual working implementation that has been tested against MS servers.
assert actual_signature == b"\x01\x00\x00\x00\x00\x00\x00\x00\x09\xDC\xD1\xDF\x2E\x45\x9D\x36"
def test_seal_ntlmv1_with_ess():
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/052aef59-b55b-4800-b4a8-e93eca1600d6
key_exchange_key = compute_response_v1(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, ntowfv1(TEST_PASSWD),
lmowfv1(TEST_PASSWD), TEST_SERVER_CHALLENGE, TEST_CLIENT_CHALLENGE)[2]
seal_key = sealkey(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, key_exchange_key, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, key_exchange_key, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\xA0\x23\x72\xF6\x53\x02\x73\xF3\xAA\x1E\xB9\x01\x90\xCE\x52\x00" \
b"\xC9\x9D"
assert actual_signature == b"\x01\x00\x00\x00\xFF\x2A\xEB\x52\xF6\x81\x79\x3A\x00\x00\x00\x00"
def test_seal_ntlmv2():
# https://docs.microsoft.com/en-us/openspecs/windows_protocols/ms-nlmp/54973495-20d2-49e8-9925-c399a403ed4a
flags = NegotiateFlags.seal | NegotiateFlags.sign | NegotiateFlags.extended_session_security | \
NegotiateFlags.key_exch | NegotiateFlags.key_128
seal_key = sealkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(flags, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x54\xE5\x01\x65\xBF\x19\x36\xDC\x99\x60\x20\xC1\x81\x1B\x0F\x06" \
b"\xFB\x5F"
assert actual_signature == b"\x01\x00\x00\x00\x7F\xB3\x8E\xC5\xC5\x5D\x49\x76\x00\x00\x00\x00"
def test_seal_ntlmv2_no_key_exch():
flags = NegotiateFlags.seal | NegotiateFlags.sign | NegotiateFlags.extended_session_security | \
NegotiateFlags.key_128
seal_key = sealkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(flags, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x54\xE5\x01\x65\xBF\x19\x36\xDC\x99\x60\x20\xC1\x81\x1B\x0F\x06" \
b"\xFB\x5F"
assert actual_signature == b"\x01\x00\x00\x00\x70\x35\x28\x51\xF2\x56\x43\x09\x00\x00\x00\x00"
def test_sign_with_always_sign():
actual = sign(NegotiateFlags.always_sign, None, b"", 0, b"data")
assert actual == b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
def test_sign_no_integrity():
expected = "SpnegoError (16): Operation not supported or available, Context: Signing without integrity."
with pytest.raises(OperationNotAvailableError, match=re.escape(expected)):
sign(0, None, b"", 0, b"data")
| 38.959677 | 116 | 0.723867 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
import pytest
from spnego._ntlm_raw.crypto import (
compute_response_v1,
lmowfv1,
ntowfv1,
rc4init,
sealkey,
signkey,
)
from spnego._ntlm_raw.messages import (
NegotiateFlags,
)
from spnego._ntlm_raw.security import (
seal,
sign,
)
from spnego._text import (
to_bytes,
)
from spnego.exceptions import (
OperationNotAvailableError,
)
from .._ntlm_raw import (
TEST_CLIENT_CHALLENGE,
TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS,
TEST_NTLMV1_FLAGS,
TEST_PASSWD,
TEST_RANDOM_SESSION_KEY,
TEST_SERVER_CHALLENGE,
)
def test_seal_ntlmv1():
seal_key = sealkey(TEST_NTLMV1_FLAGS, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(TEST_NTLMV1_FLAGS, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(TEST_NTLMV1_FLAGS, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x56\xFE\x04\xD8\x61\xF9\x31\x9A\xF0\xD7\x23\x8A\x2E\x3B\x4D\x45" \
b"\x7F\xB8"
assert actual_signature == b"\x01\x00\x00\x00\x00\x00\x00\x00\x09\xDC\xD1\xDF\x2E\x45\x9D\x36"
def test_seal_ntlmv1_with_ess():
key_exchange_key = compute_response_v1(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, ntowfv1(TEST_PASSWD),
lmowfv1(TEST_PASSWD), TEST_SERVER_CHALLENGE, TEST_CLIENT_CHALLENGE)[2]
seal_key = sealkey(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, key_exchange_key, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, key_exchange_key, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(TEST_NTLMV1_CLIENT_CHALLENGE_FLAGS, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\xA0\x23\x72\xF6\x53\x02\x73\xF3\xAA\x1E\xB9\x01\x90\xCE\x52\x00" \
b"\xC9\x9D"
assert actual_signature == b"\x01\x00\x00\x00\xFF\x2A\xEB\x52\xF6\x81\x79\x3A\x00\x00\x00\x00"
def test_seal_ntlmv2():
flags = NegotiateFlags.seal | NegotiateFlags.sign | NegotiateFlags.extended_session_security | \
NegotiateFlags.key_exch | NegotiateFlags.key_128
seal_key = sealkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(flags, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x54\xE5\x01\x65\xBF\x19\x36\xDC\x99\x60\x20\xC1\x81\x1B\x0F\x06" \
b"\xFB\x5F"
assert actual_signature == b"\x01\x00\x00\x00\x7F\xB3\x8E\xC5\xC5\x5D\x49\x76\x00\x00\x00\x00"
def test_seal_ntlmv2_no_key_exch():
flags = NegotiateFlags.seal | NegotiateFlags.sign | NegotiateFlags.extended_session_security | \
NegotiateFlags.key_128
seal_key = sealkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
seal_handle = rc4init(seal_key)
sign_key = signkey(flags, TEST_RANDOM_SESSION_KEY, usage='initiate')
b_data = to_bytes(u"Plaintext", encoding='utf-16-le')
actual_msg, actual_signature = seal(flags, seal_handle, sign_key, 0, b_data)
assert actual_msg == b"\x54\xE5\x01\x65\xBF\x19\x36\xDC\x99\x60\x20\xC1\x81\x1B\x0F\x06" \
b"\xFB\x5F"
assert actual_signature == b"\x01\x00\x00\x00\x70\x35\x28\x51\xF2\x56\x43\x09\x00\x00\x00\x00"
def test_sign_with_always_sign():
actual = sign(NegotiateFlags.always_sign, None, b"", 0, b"data")
assert actual == b"\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
def test_sign_no_integrity():
expected = "SpnegoError (16): Operation not supported or available, Context: Signing without integrity."
with pytest.raises(OperationNotAvailableError, match=re.escape(expected)):
sign(0, None, b"", 0, b"data")
| true | true |
f7f8129e746e63705f1dee8a105616c1e0b1cb4d | 15,100 | py | Python | train/tasks/semantic/dataset/kitti/parser.py | andrewkouri/lidar-bonnetal | a0b5c6aba530701084ac66a02532689ed580f934 | [
"MIT"
] | null | null | null | train/tasks/semantic/dataset/kitti/parser.py | andrewkouri/lidar-bonnetal | a0b5c6aba530701084ac66a02532689ed580f934 | [
"MIT"
] | null | null | null | train/tasks/semantic/dataset/kitti/parser.py | andrewkouri/lidar-bonnetal | a0b5c6aba530701084ac66a02532689ed580f934 | [
"MIT"
] | 2 | 2020-08-29T06:24:13.000Z | 2021-06-25T22:13:22.000Z | import os
import numpy as np
import torch
from torch.utils.data import Dataset
from common.laserscan import LaserScan, SemLaserScan
EXTENSIONS_SCAN = ['.bin']
EXTENSIONS_LABEL = ['.label']
def is_scan(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_SCAN)
def is_label(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_LABEL)
class SemanticKitti(Dataset):
def __init__(self, root, # directory where data is
sequences, # sequences for this data (e.g. [1,3,4,6])
labels, # label dict: (e.g 10: "car")
color_map, # colors dict bgr (e.g 10: [255, 0, 0])
learning_map, # classes to learn (0 to N-1 for xentropy)
learning_map_inv, # inverse of previous (recover labels)
sensor, # sensor to parse scans from
max_points=150000, # max number of points present in dataset
gt=True): # send ground truth?
# save deats
self.root = os.path.join(root, "sequences")
self.sequences = sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.sensor_img_H = sensor["img_prop"]["height"]
self.sensor_img_W = sensor["img_prop"]["width"]
self.sensor_img_means = torch.tensor(sensor["img_means"],
dtype=torch.float)
self.sensor_img_stds = torch.tensor(sensor["img_stds"],
dtype=torch.float)
self.sensor_fov_up = sensor["fov_up"]
self.sensor_fov_down = sensor["fov_down"]
self.max_points = max_points
self.gt = gt
# get number of classes (can't be len(self.learning_map) because there
# are multiple repeated entries, so the number that matters is how many
# there are for the xentropy)
self.nclasses = len(self.learning_map_inv)
# sanity checks
# make sure directory exists
if os.path.isdir(self.root):
print("Sequences folder exists! Using sequences from %s" % self.root)
else:
raise ValueError("Sequences folder doesn't exist! Exiting...")
# make sure labels is a dict
assert (isinstance(self.labels, dict))
# make sure color_map is a dict
assert (isinstance(self.color_map, dict))
# make sure learning_map is a dict
assert (isinstance(self.learning_map, dict))
# make sure sequences is a list
assert (isinstance(self.sequences, list))
# placeholder for filenames
self.scan_files = []
self.label_files = []
# fill in with names, checking that all sequences are complete
for seq in self.sequences:
# to string
seq = '{0:02d}'.format(int(seq))
print("parsing seq {}".format(seq))
# get paths for each
scan_path = os.path.join(self.root, seq, "velodyne")
label_path = os.path.join(self.root, seq, "labels")
# get files
scan_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_path)) for f in fn if is_scan(f)]
label_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_path)) for f in fn if is_label(f)]
# check all scans have labels
if self.gt:
assert (len(scan_files) == len(label_files))
# extend list
self.scan_files.extend(scan_files)
self.label_files.extend(label_files)
# sort for correspondance
self.scan_files.sort()
self.label_files.sort()
print("Using {} scans from sequences {}".format(len(self.scan_files),
self.sequences))
def __getitem__(self, index):
# get item in tensor shape
scan_file = self.scan_files[index]
if self.gt:
label_file = self.label_files[index]
# open a semantic laserscan
if self.gt:
scan = SemLaserScan(self.color_map,
project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
else:
scan = LaserScan(project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
# open and obtain scan
scan.open_scan(scan_file)
if self.gt:
scan.open_label(label_file)
# map unused classes to used classes (also for projection)
scan.sem_label = self.map(scan.sem_label, self.learning_map)
scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)
# make a tensor of the uncompressed data (with the max num points)
unproj_n_points = scan.points.shape[0]
unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
if self.gt:
unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32)
unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
else:
unproj_labels = []
# get points and labels
proj_range = torch.from_numpy(scan.proj_range).clone()
proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
proj_remission = torch.from_numpy(scan.proj_remission).clone()
proj_mask = torch.from_numpy(scan.proj_mask)
if self.gt:
proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
proj_labels = proj_labels * proj_mask
else:
proj_labels = []
proj_x = torch.full([self.max_points], -1, dtype=torch.long)
proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
proj_y = torch.full([self.max_points], -1, dtype=torch.long)
proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
proj = torch.cat([proj_range.unsqueeze(0).clone(),
proj_xyz.clone().permute(2, 0, 1),
proj_remission.unsqueeze(0).clone()])
proj = (proj - self.sensor_img_means[:, None, None]
) / self.sensor_img_stds[:, None, None]
proj = proj * proj_mask.float()
# get name and sequence
path_norm = os.path.normpath(scan_file)
path_split = path_norm.split(os.sep)
path_seq = path_split[-3]
path_name = path_split[-1].replace(".bin", ".label")
# print("path_norm: ", path_norm)
# print("path_seq", path_seq)
# print("path_name", path_name)
# return
return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
def __len__(self):
return len(self.scan_files)
@staticmethod
def map(label, mapdict):
# put label from original values to xentropy
# or vice-versa, depending on dictionary values
# make learning map a lookup table
maxkey = 0
for key, data in mapdict.items():
if isinstance(data, list):
nel = len(data)
else:
nel = 1
if key > maxkey:
maxkey = key
# +100 hack making lut bigger just in case there are unknown labels
if nel > 1:
lut = np.zeros((maxkey + 100, nel), dtype=np.int32)
else:
lut = np.zeros((maxkey + 100), dtype=np.int32)
for key, data in mapdict.items():
try:
lut[key] = data
except IndexError:
print("Wrong key ", key)
# do the mapping
return lut[label]
class Parser:
# standard conv, BN, relu
def __init__(self,
root, # directory for data
train_sequences, # sequences to train
valid_sequences, # sequences to validate.
test_sequences, # sequences to test (if none, don't get)
labels, # labels in data
color_map, # color for each label
learning_map, # mapping for training labels
learning_map_inv, # recover labels from xentropy
sensor, # sensor to use
max_points, # max points in each scan in entire dataset
batch_size, # batch size for train and val
workers, # threads to load data
gt=True, # get gt?
shuffle_train=True): # shuffle training set?
super(Parser, self).__init__()
# if I am training, get the dataset
self.root = root
self.train_sequences = train_sequences
self.valid_sequences = valid_sequences
self.test_sequences = test_sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.max_points = max_points
self.batch_size = batch_size
self.workers = workers
self.gt = gt
self.shuffle_train = shuffle_train
# number of classes that matters is the one for xentropy
self.nclasses = len(self.learning_map_inv)
# Data loading code
self.train_dataset = SemanticKitti(root=self.root,
sequences=self.train_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.trainloader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle_train,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.trainloader) > 0
self.trainiter = iter(self.trainloader)
self.valid_dataset = SemanticKitti(root=self.root,
sequences=self.valid_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.validloader = torch.utils.data.DataLoader(self.valid_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.validloader) > 0
self.validiter = iter(self.validloader)
if self.test_sequences:
self.test_dataset = SemanticKitti(root=self.root,
sequences=self.test_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=False)
self.testloader = torch.utils.data.DataLoader(self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.testloader) > 0
self.testiter = iter(self.testloader)
def get_train_batch(self):
scans = self.trainiter.next()
return scans
def get_train_set(self):
return self.trainloader
def get_valid_batch(self):
scans = self.validiter.next()
return scans
def get_valid_set(self):
return self.validloader
def get_test_batch(self):
scans = self.testiter.next()
return scans
def get_test_set(self):
return self.testloader
def get_train_size(self):
return len(self.trainloader)
def get_valid_size(self):
return len(self.validloader)
def get_test_size(self):
return len(self.testloader)
def get_n_classes(self):
return self.nclasses
def get_original_class_string(self, idx):
return self.labels[idx]
def get_xentropy_class_string(self, idx):
return self.labels[self.learning_map_inv[idx]]
def to_original(self, label):
# put label in original values
return SemanticKitti.map(label, self.learning_map_inv)
def to_xentropy(self, label):
# put label in xentropy values
return SemanticKitti.map(label, self.learning_map)
def to_color(self, label):
# put label in original values
label = SemanticKitti.map(label, self.learning_map_inv)
# put label in color
return SemanticKitti.map(label, self.color_map)
| 41.369863 | 195 | 0.536358 | import os
import numpy as np
import torch
from torch.utils.data import Dataset
from common.laserscan import LaserScan, SemLaserScan
EXTENSIONS_SCAN = ['.bin']
EXTENSIONS_LABEL = ['.label']
def is_scan(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_SCAN)
def is_label(filename):
return any(filename.endswith(ext) for ext in EXTENSIONS_LABEL)
class SemanticKitti(Dataset):
def __init__(self, root,
sequences,
labels,
color_map,
learning_map,
learning_map_inv,
sensor,
max_points=150000,
gt=True):
self.root = os.path.join(root, "sequences")
self.sequences = sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.sensor_img_H = sensor["img_prop"]["height"]
self.sensor_img_W = sensor["img_prop"]["width"]
self.sensor_img_means = torch.tensor(sensor["img_means"],
dtype=torch.float)
self.sensor_img_stds = torch.tensor(sensor["img_stds"],
dtype=torch.float)
self.sensor_fov_up = sensor["fov_up"]
self.sensor_fov_down = sensor["fov_down"]
self.max_points = max_points
self.gt = gt
# are multiple repeated entries, so the number that matters is how many
# there are for the xentropy)
self.nclasses = len(self.learning_map_inv)
# sanity checks
# make sure directory exists
if os.path.isdir(self.root):
print("Sequences folder exists! Using sequences from %s" % self.root)
else:
raise ValueError("Sequences folder doesn't exist! Exiting...")
assert (isinstance(self.labels, dict))
assert (isinstance(self.color_map, dict))
assert (isinstance(self.learning_map, dict))
assert (isinstance(self.sequences, list))
self.scan_files = []
self.label_files = []
for seq in self.sequences:
seq = '{0:02d}'.format(int(seq))
print("parsing seq {}".format(seq))
scan_path = os.path.join(self.root, seq, "velodyne")
label_path = os.path.join(self.root, seq, "labels")
scan_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(scan_path)) for f in fn if is_scan(f)]
label_files = [os.path.join(dp, f) for dp, dn, fn in os.walk(
os.path.expanduser(label_path)) for f in fn if is_label(f)]
if self.gt:
assert (len(scan_files) == len(label_files))
self.scan_files.extend(scan_files)
self.label_files.extend(label_files)
self.scan_files.sort()
self.label_files.sort()
print("Using {} scans from sequences {}".format(len(self.scan_files),
self.sequences))
def __getitem__(self, index):
scan_file = self.scan_files[index]
if self.gt:
label_file = self.label_files[index]
if self.gt:
scan = SemLaserScan(self.color_map,
project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
else:
scan = LaserScan(project=True,
H=self.sensor_img_H,
W=self.sensor_img_W,
fov_up=self.sensor_fov_up,
fov_down=self.sensor_fov_down)
scan.open_scan(scan_file)
if self.gt:
scan.open_label(label_file)
scan.sem_label = self.map(scan.sem_label, self.learning_map)
scan.proj_sem_label = self.map(scan.proj_sem_label, self.learning_map)
unproj_n_points = scan.points.shape[0]
unproj_xyz = torch.full((self.max_points, 3), -1.0, dtype=torch.float)
unproj_xyz[:unproj_n_points] = torch.from_numpy(scan.points)
unproj_range = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_range[:unproj_n_points] = torch.from_numpy(scan.unproj_range)
unproj_remissions = torch.full([self.max_points], -1.0, dtype=torch.float)
unproj_remissions[:unproj_n_points] = torch.from_numpy(scan.remissions)
if self.gt:
unproj_labels = torch.full([self.max_points], -1.0, dtype=torch.int32)
unproj_labels[:unproj_n_points] = torch.from_numpy(scan.sem_label)
else:
unproj_labels = []
proj_range = torch.from_numpy(scan.proj_range).clone()
proj_xyz = torch.from_numpy(scan.proj_xyz).clone()
proj_remission = torch.from_numpy(scan.proj_remission).clone()
proj_mask = torch.from_numpy(scan.proj_mask)
if self.gt:
proj_labels = torch.from_numpy(scan.proj_sem_label).clone()
proj_labels = proj_labels * proj_mask
else:
proj_labels = []
proj_x = torch.full([self.max_points], -1, dtype=torch.long)
proj_x[:unproj_n_points] = torch.from_numpy(scan.proj_x)
proj_y = torch.full([self.max_points], -1, dtype=torch.long)
proj_y[:unproj_n_points] = torch.from_numpy(scan.proj_y)
proj = torch.cat([proj_range.unsqueeze(0).clone(),
proj_xyz.clone().permute(2, 0, 1),
proj_remission.unsqueeze(0).clone()])
proj = (proj - self.sensor_img_means[:, None, None]
) / self.sensor_img_stds[:, None, None]
proj = proj * proj_mask.float()
path_norm = os.path.normpath(scan_file)
path_split = path_norm.split(os.sep)
path_seq = path_split[-3]
path_name = path_split[-1].replace(".bin", ".label")
return proj, proj_mask, proj_labels, unproj_labels, path_seq, path_name, proj_x, proj_y, proj_range, unproj_range, proj_xyz, unproj_xyz, proj_remission, unproj_remissions, unproj_n_points
def __len__(self):
return len(self.scan_files)
@staticmethod
def map(label, mapdict):
maxkey = 0
for key, data in mapdict.items():
if isinstance(data, list):
nel = len(data)
else:
nel = 1
if key > maxkey:
maxkey = key
if nel > 1:
lut = np.zeros((maxkey + 100, nel), dtype=np.int32)
else:
lut = np.zeros((maxkey + 100), dtype=np.int32)
for key, data in mapdict.items():
try:
lut[key] = data
except IndexError:
print("Wrong key ", key)
return lut[label]
class Parser:
def __init__(self,
root,
train_sequences,
valid_sequences,
test_sequences,
labels, # labels in data
color_map, # color for each label
learning_map, # mapping for training labels
learning_map_inv, # recover labels from xentropy
sensor, # sensor to use
max_points, # max points in each scan in entire dataset
batch_size, # batch size for train and val
workers, # threads to load data
gt=True, # get gt?
shuffle_train=True): # shuffle training set?
super(Parser, self).__init__()
# if I am training, get the dataset
self.root = root
self.train_sequences = train_sequences
self.valid_sequences = valid_sequences
self.test_sequences = test_sequences
self.labels = labels
self.color_map = color_map
self.learning_map = learning_map
self.learning_map_inv = learning_map_inv
self.sensor = sensor
self.max_points = max_points
self.batch_size = batch_size
self.workers = workers
self.gt = gt
self.shuffle_train = shuffle_train
# number of classes that matters is the one for xentropy
self.nclasses = len(self.learning_map_inv)
# Data loading code
self.train_dataset = SemanticKitti(root=self.root,
sequences=self.train_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.trainloader = torch.utils.data.DataLoader(self.train_dataset,
batch_size=self.batch_size,
shuffle=self.shuffle_train,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.trainloader) > 0
self.trainiter = iter(self.trainloader)
self.valid_dataset = SemanticKitti(root=self.root,
sequences=self.valid_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=self.gt)
self.validloader = torch.utils.data.DataLoader(self.valid_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.validloader) > 0
self.validiter = iter(self.validloader)
if self.test_sequences:
self.test_dataset = SemanticKitti(root=self.root,
sequences=self.test_sequences,
labels=self.labels,
color_map=self.color_map,
learning_map=self.learning_map,
learning_map_inv=self.learning_map_inv,
sensor=self.sensor,
max_points=max_points,
gt=False)
self.testloader = torch.utils.data.DataLoader(self.test_dataset,
batch_size=self.batch_size,
shuffle=False,
num_workers=self.workers,
pin_memory=True,
drop_last=True)
assert len(self.testloader) > 0
self.testiter = iter(self.testloader)
def get_train_batch(self):
scans = self.trainiter.next()
return scans
def get_train_set(self):
return self.trainloader
def get_valid_batch(self):
scans = self.validiter.next()
return scans
def get_valid_set(self):
return self.validloader
def get_test_batch(self):
scans = self.testiter.next()
return scans
def get_test_set(self):
return self.testloader
def get_train_size(self):
return len(self.trainloader)
def get_valid_size(self):
return len(self.validloader)
def get_test_size(self):
return len(self.testloader)
def get_n_classes(self):
return self.nclasses
def get_original_class_string(self, idx):
return self.labels[idx]
def get_xentropy_class_string(self, idx):
return self.labels[self.learning_map_inv[idx]]
def to_original(self, label):
# put label in original values
return SemanticKitti.map(label, self.learning_map_inv)
def to_xentropy(self, label):
# put label in xentropy values
return SemanticKitti.map(label, self.learning_map)
def to_color(self, label):
# put label in original values
label = SemanticKitti.map(label, self.learning_map_inv)
# put label in color
return SemanticKitti.map(label, self.color_map)
| true | true |
f7f8133e4f86d23d1c2ffa7cf3cc69596e7c8612 | 506 | py | Python | setup.py | tylerhether/Flip2BeRAD | f16ee1873f4e58cb1fc516ccd6d6a6ff2b90cc4c | [
"MIT"
] | 6 | 2016-05-04T21:32:01.000Z | 2022-02-26T22:33:28.000Z | setup.py | tylerhether/Flip2BeRAD | f16ee1873f4e58cb1fc516ccd6d6a6ff2b90cc4c | [
"MIT"
] | null | null | null | setup.py | tylerhether/Flip2BeRAD | f16ee1873f4e58cb1fc516ccd6d6a6ff2b90cc4c | [
"MIT"
] | 1 | 2019-06-18T04:06:28.000Z | 2019-06-18T04:06:28.000Z | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Flip2BeRAD',
'author': 'Tyler Hether',
'url': 'https://github.com/tylerhether/Flip2BeRAD',
'download_url': 'https://github.com/tylerhether/Flip2BeRAD',
'author_email': 'tyler [dot] hether [at] gmail [dot, ...]',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['Flip2BeRAD'],
'scripts': [],
'name': 'Flip2BeRAD'
}
setup(**config)
| 25.3 | 64 | 0.626482 | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
config = {
'description': 'Flip2BeRAD',
'author': 'Tyler Hether',
'url': 'https://github.com/tylerhether/Flip2BeRAD',
'download_url': 'https://github.com/tylerhether/Flip2BeRAD',
'author_email': 'tyler [dot] hether [at] gmail [dot, ...]',
'version': '0.1',
'install_requires': ['nose'],
'packages': ['Flip2BeRAD'],
'scripts': [],
'name': 'Flip2BeRAD'
}
setup(**config)
| true | true |
f7f813c493976cfdfc39651e3c3851a2bae9bcfb | 2,244 | py | Python | tutorials/mechanisms/tutorial_convenience.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 13 | 2020-11-05T10:59:13.000Z | 2022-03-21T01:38:31.000Z | tutorials/mechanisms/tutorial_convenience.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 4 | 2022-01-27T10:23:40.000Z | 2022-03-10T18:16:06.000Z | tutorials/mechanisms/tutorial_convenience.py | AQ18/skimpy | 435fc50244f2ca815bbb39d525a82a4692f5c0ac | [
"Apache-2.0"
] | 6 | 2020-08-04T17:01:33.000Z | 2022-03-21T01:38:32.000Z | # -*- coding: utf-8 -*-
"""
.. module:: skimpy
:platform: Unix, Windows
:synopsis: Simple Kinetic Models in Python
.. moduleauthor:: SKiMPy team
[---------]
Copyright 2017 Laboratory of Computational Systems Biotechnology (LCSB),
Ecole Polytechnique Federale de Lausanne (EPFL), Switzerland
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import numpy as np
# Test models
from skimpy.core import *
from skimpy.mechanisms import *
name = 'pfk'
SpecificConvenience = make_convenience([-2, -1, 3])
metabolites = SpecificConvenience.Reactants(substrate1 = 'A',
substrate2 = 'B',
product1 = 'C' )
# thermo_data = {'S': 1e-2,
# 'P': 1e-2,
# 'sig_S': 0.1,
# 'sig_P': 0.1,
# 'gamma': 0.1,
# 'flux': 1.0,
# 'E_tot': 1e-5}
## QSSA Method
parameters = SpecificConvenience.Parameters(
vmax_forward = 1.0,
k_equilibrium=2.0,
km_substrate1 = 10.0,
km_substrate2 = 10.0,
km_product1 = 10.0)
pfk = Reaction(name=name,
mechanism=SpecificConvenience,
reactants=metabolites,
)
this_model = KineticModel()
this_model.add_reaction(pfk)
this_model.parametrize_by_reaction({pfk.name:parameters})
this_model.compile_ode(sim_type = QSSA)
this_model.initial_conditions['A'] = 10.0
this_model.initial_conditions['B'] = 10.0
this_model.initial_conditions['C'] = 10.0
this_sol_qssa = this_model.solve_ode(np.linspace(0.0, 50.0, 500),solver_type = 'cvode')
this_sol_qssa.plot('output/base_out_qssa.html') | 30.739726 | 87 | 0.61943 |
import numpy as np
from skimpy.core import *
from skimpy.mechanisms import *
name = 'pfk'
SpecificConvenience = make_convenience([-2, -1, 3])
metabolites = SpecificConvenience.Reactants(substrate1 = 'A',
substrate2 = 'B',
product1 = 'C' )
SpecificConvenience.Parameters(
vmax_forward = 1.0,
k_equilibrium=2.0,
km_substrate1 = 10.0,
km_substrate2 = 10.0,
km_product1 = 10.0)
pfk = Reaction(name=name,
mechanism=SpecificConvenience,
reactants=metabolites,
)
this_model = KineticModel()
this_model.add_reaction(pfk)
this_model.parametrize_by_reaction({pfk.name:parameters})
this_model.compile_ode(sim_type = QSSA)
this_model.initial_conditions['A'] = 10.0
this_model.initial_conditions['B'] = 10.0
this_model.initial_conditions['C'] = 10.0
this_sol_qssa = this_model.solve_ode(np.linspace(0.0, 50.0, 500),solver_type = 'cvode')
this_sol_qssa.plot('output/base_out_qssa.html') | true | true |
f7f8144b656a6265d8951bf78bd6667ebc8314e4 | 939 | py | Python | thevoid/urls.py | CBR0MS/telematicEnvironment | 6b3130347cad06c6b3aa453010c91d9990bc9cb8 | [
"MIT"
] | null | null | null | thevoid/urls.py | CBR0MS/telematicEnvironment | 6b3130347cad06c6b3aa453010c91d9990bc9cb8 | [
"MIT"
] | 2 | 2020-06-05T19:00:38.000Z | 2021-06-10T20:51:00.000Z | thevoid/urls.py | cbroms/telematicEnvironment | 6b3130347cad06c6b3aa453010c91d9990bc9cb8 | [
"MIT"
] | null | null | null | """thevoid URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('whispers.urls')),
path('admin/', admin.site.urls),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 37.56 | 77 | 0.72524 | from django.contrib import admin
from django.urls import include, path
from django.conf import settings
from django.conf.urls.static import static
urlpatterns = [
path('', include('whispers.urls')),
path('admin/', admin.site.urls),
]+ static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| true | true |
f7f814d2708edec9c91c5aec436d7420ee06b70a | 7,436 | py | Python | src/teacher_school_allocation.py | sarahsester/q_hackathon | 963dcfbe8e3fa8bda954f4fc6db8a238f1d8a720 | [
"MIT"
] | null | null | null | src/teacher_school_allocation.py | sarahsester/q_hackathon | 963dcfbe8e3fa8bda954f4fc6db8a238f1d8a720 | [
"MIT"
] | null | null | null | src/teacher_school_allocation.py | sarahsester/q_hackathon | 963dcfbe8e3fa8bda954f4fc6db8a238f1d8a720 | [
"MIT"
] | 1 | 2021-11-30T18:27:56.000Z | 2021-11-30T18:27:56.000Z | from ortools.linear_solver import pywraplp
import pandas as pd
import numpy as np
def create_cost_matrix(distances, pref_big_school, pref_rural):
cost_matrix = distances + 10 * pref_big_school + 10 * pref_rural
return cost_matrix
def find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Save costs for further iterations
costs_per_teacher = []
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 1, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
costs_per_teacher.append(cost_matrix[t][s])
adapted_costs = cost_matrix * np.array(costs_per_teacher)[:, np.newaxis] / 10
return df, adapted_costs
def find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools,
adapted_cost_matrix):
# Create cost matrix
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
# Create the mip solver with the SCIP backend.
solver = pywraplp.Solver.CreateSolver('SCIP')
# x[t,s] is an array of 0-1 variables, which will be 1 if teacher t is assigned to school s.
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
# Constraint 1: Each teacher is assigned to one school.
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
# Constraint 2: Each school is assigned to minimum x teachers.
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
# Constraint 3: Each school is assigned to maximal x+20 teachers.
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
# Constraint 4: Each teacher has a maximum cost of 100.
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
# Objective
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(adapted_cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
# Solve
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
# Print solution.
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
# Test if x[t,s] is 1 (with tolerance for floating point arithmetic).
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 2, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
return df
if __name__ == '__main__':
nb_of_teachers = 761
nb_of_schools = 58
# Get school data
df_schools = pd.read_csv('../data/school_dataset.csv')
# Get cost matrix
distances = pd.read_pickle('../data/geopy_distance_matrix_Waldorfschule.pkl')
# distances = np.random.rand(nb_of_teachers, nb_of_schools) * 200
pref_big_school = pd.read_pickle(r'../data/preference_big_school_Waldorfschule.pkl')
pref_rural = pd.read_pickle(r'../data/preference_rural_Waldorfschule.pkl')
df, adapted_costs = find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural,
number_teachers=nb_of_teachers, number_schools=nb_of_schools)
print(df)
print(df.groupby(['school']).count()['teacher'])
print(f'Average costs: {df["cost"].mean()}.')
print(f'Teacher {df["cost"].argmin()} has minimum costs ({df["cost"].min()}).')
print(f'Teacher {df["cost"].argmax()} has maximal costs ({df["cost"].max()}).')
print(adapted_costs)
df2 = find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers=nb_of_teachers,
number_schools=nb_of_schools, adapted_cost_matrix=adapted_costs)
print(df2)
print(df2.groupby(['school']).count()['teacher'])
print(f'Average costs: {df2["cost"].mean()}.')
print(f'Teacher {df2["cost"].argmin()} has minimum costs ({df2["cost"].min()}).')
print(f'Teacher {df2["cost"].argmax()} has maximal costs ({df2["cost"].max()}).')
df_all = df.append(df2)
df_all.to_csv('../data/results.csv', index=False)
| 44 | 121 | 0.630312 | from ortools.linear_solver import pywraplp
import pandas as pd
import numpy as np
def create_cost_matrix(distances, pref_big_school, pref_rural):
cost_matrix = distances + 10 * pref_big_school + 10 * pref_rural
return cost_matrix
def find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools):
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
solver = pywraplp.Solver.CreateSolver('SCIP')
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
costs_per_teacher = []
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 1, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
costs_per_teacher.append(cost_matrix[t][s])
adapted_costs = cost_matrix * np.array(costs_per_teacher)[:, np.newaxis] / 10
return df, adapted_costs
def find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers, number_schools,
adapted_cost_matrix):
cost_matrix = create_cost_matrix(distances, pref_big_school, pref_rural)
solver = pywraplp.Solver.CreateSolver('SCIP')
x = {}
for t in range(number_teachers):
for s in range(number_schools):
x[t, s] = solver.IntVar(0, 1, '')
for t in range(number_teachers):
solver.Add(solver.Sum([x[t, s] for s in range(number_schools)]) == 1)
for s in range(number_schools):
solver.Add(solver.Sum([x[t, s] for t in range(number_teachers)]) >= df_schools['min_number_of_teachers'][s])
for s in range(number_schools):
solver.Add(
solver.Sum([x[t, s] for t in range(number_teachers)]) <= df_schools['min_number_of_teachers'][s] + 20)
for t in range(number_teachers):
solver.Add(solver.Sum([cost_matrix[t][s] * x[t, s] for s in range(number_schools)]) <= 100)
objective_terms = []
for t in range(number_teachers):
for s in range(number_schools):
objective_terms.append(adapted_cost_matrix[t][s] * x[t, s])
solver.Minimize(solver.Sum(objective_terms))
status = solver.Solve()
df = pd.DataFrame(columns=['iteration', 'teacher', 'school', 'cost', 'dist'])
if status == pywraplp.Solver.OPTIMAL or status == pywraplp.Solver.FEASIBLE:
print(f'Total cost = {solver.Objective().Value()}\n')
for t in range(number_teachers):
for s in range(number_schools):
if x[t, s].solution_value() > 0.5:
print(f'Teacher {t} assigned to school {s}. Cost={cost_matrix[t][s]}')
df = df.append({'iteration': 2, 'teacher': t, 'school': s, 'cost': cost_matrix[t][s],
'dist': distances[t][s],
'pref_school_size_unsatisfied': pref_big_school[t][s],
'pref_urban_rural_unsatisfied': pref_rural[t][s]},
ignore_index=True)
return df
if __name__ == '__main__':
nb_of_teachers = 761
nb_of_schools = 58
df_schools = pd.read_csv('../data/school_dataset.csv')
distances = pd.read_pickle('../data/geopy_distance_matrix_Waldorfschule.pkl')
pref_big_school = pd.read_pickle(r'../data/preference_big_school_Waldorfschule.pkl')
pref_rural = pd.read_pickle(r'../data/preference_rural_Waldorfschule.pkl')
df, adapted_costs = find_optimal_allocation(df_schools, distances, pref_big_school, pref_rural,
number_teachers=nb_of_teachers, number_schools=nb_of_schools)
print(df)
print(df.groupby(['school']).count()['teacher'])
print(f'Average costs: {df["cost"].mean()}.')
print(f'Teacher {df["cost"].argmin()} has minimum costs ({df["cost"].min()}).')
print(f'Teacher {df["cost"].argmax()} has maximal costs ({df["cost"].max()}).')
print(adapted_costs)
df2 = find_optimal_allocation_it2(df_schools, distances, pref_big_school, pref_rural, number_teachers=nb_of_teachers,
number_schools=nb_of_schools, adapted_cost_matrix=adapted_costs)
print(df2)
print(df2.groupby(['school']).count()['teacher'])
print(f'Average costs: {df2["cost"].mean()}.')
print(f'Teacher {df2["cost"].argmin()} has minimum costs ({df2["cost"].min()}).')
print(f'Teacher {df2["cost"].argmax()} has maximal costs ({df2["cost"].max()}).')
df_all = df.append(df2)
df_all.to_csv('../data/results.csv', index=False)
| true | true |
f7f814e911f204789aaf98d602ae5abf2de91353 | 41 | py | Python | cf_xarray/__init__.py | Ouranosinc/cf-xarray | 713d60e366fd3ad31a1f2f2169f02662f3b7a6f4 | [
"Apache-2.0"
] | 1 | 2020-06-01T16:24:10.000Z | 2020-06-01T16:24:10.000Z | cf_xarray/__init__.py | jthielen/cf-xarray | d3b45ae578fc1510f276eb4d549a18b0b5c503ec | [
"Apache-2.0"
] | null | null | null | cf_xarray/__init__.py | jthielen/cf-xarray | d3b45ae578fc1510f276eb4d549a18b0b5c503ec | [
"Apache-2.0"
] | null | null | null | from .accessor import CFAccessor # noqa
| 20.5 | 40 | 0.780488 | from .accessor import CFAccessor
| true | true |
f7f814eac8b81f6a44fcb690cad4b38ce10cbc07 | 1,068 | py | Python | rrtstar/sampling.py | rdesarz/rrtstar | 0e2737fbb7bb7e45789d606e6c6c2b7ce5824f65 | [
"MIT"
] | null | null | null | rrtstar/sampling.py | rdesarz/rrtstar | 0e2737fbb7bb7e45789d606e6c6c2b7ce5824f65 | [
"MIT"
] | null | null | null | rrtstar/sampling.py | rdesarz/rrtstar | 0e2737fbb7bb7e45789d606e6c6c2b7ce5824f65 | [
"MIT"
] | null | null | null | import random
import numpy as np
from rrtstar.geometry import Zone2d, Point2d
def generate_new_sample_uniform(planification_zone: Zone2d) -> Point2d:
x = np.random.uniform(planification_zone.x_min, planification_zone.x_max, 1)
y = np.random.uniform(planification_zone.y_min, planification_zone.y_max, 1)
return Point2d(x[0], y[0])
def generate_new_sample_biased(goal: Point2d) -> Point2d:
x, y = np.random.multivariate_normal(goal.to_array(), [[10, 0], [0, 10]]).T
return Point2d(x, y)
def generate_new_sample_biased_towards_goal(
planification_zone: Zone2d, goal: Point2d, goal_sample_rate: int
) -> Point2d:
# There is a probability to generate a sample that is the goal.
# Therefore, the tree is biased to grow towards the goal.
if random.randint(0, 100) > goal_sample_rate:
return Point2d(
random.uniform(planification_zone.x_min, planification_zone.x_max),
random.uniform(planification_zone.y_min, planification_zone.y_max),
)
else:
return Point2d(goal.x, goal.y) | 35.6 | 80 | 0.719101 | import random
import numpy as np
from rrtstar.geometry import Zone2d, Point2d
def generate_new_sample_uniform(planification_zone: Zone2d) -> Point2d:
x = np.random.uniform(planification_zone.x_min, planification_zone.x_max, 1)
y = np.random.uniform(planification_zone.y_min, planification_zone.y_max, 1)
return Point2d(x[0], y[0])
def generate_new_sample_biased(goal: Point2d) -> Point2d:
x, y = np.random.multivariate_normal(goal.to_array(), [[10, 0], [0, 10]]).T
return Point2d(x, y)
def generate_new_sample_biased_towards_goal(
planification_zone: Zone2d, goal: Point2d, goal_sample_rate: int
) -> Point2d:
if random.randint(0, 100) > goal_sample_rate:
return Point2d(
random.uniform(planification_zone.x_min, planification_zone.x_max),
random.uniform(planification_zone.y_min, planification_zone.y_max),
)
else:
return Point2d(goal.x, goal.y) | true | true |
f7f814ef63dded3155369a2ac9ae0d9fba906316 | 9,012 | py | Python | usaspending_api/transactions/agnostic_transaction_loader.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | 1 | 2020-06-15T19:59:52.000Z | 2020-06-15T19:59:52.000Z | usaspending_api/transactions/agnostic_transaction_loader.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | usaspending_api/transactions/agnostic_transaction_loader.py | g4brielvs/usaspending-api | bae7da2c204937ec1cdf75c052405b13145728d5 | [
"CC0-1.0"
] | null | null | null | import logging
import psycopg2
from datetime import datetime, timezone
from django.conf import settings
from django.core.management import call_command
from pathlib import Path
from typing import Tuple
from usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date
from usaspending_api.common.etl import ETLDBLinkTable, ETLTable, operations
from usaspending_api.common.helpers.date_helper import datetime_command_line_argument_type
from usaspending_api.common.helpers.sql_helpers import get_broker_dsn_string
from usaspending_api.common.helpers.timing_helpers import ScriptTimer as Timer
from usaspending_api.common.retrieve_file_from_uri import SCHEMA_HELP_TEXT
from usaspending_api.transactions.loader_functions import filepath_command_line_argument_type
from usaspending_api.transactions.loader_functions import read_file_for_database_ids
from usaspending_api.transactions.loader_functions import store_ids_in_file
logger = logging.getLogger("script")
class AgnosticTransactionLoader:
begining_of_time = "1970-01-01"
chunk_size = 25000
is_incremental = False
successful_run = False
upsert_records = 0
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument(
"--ids", nargs="+", help=f"Load/Reload transactions using this {self.shared_pk} list (space-separated)",
)
mutually_exclusive_group.add_argument(
"--date",
dest="datetime",
type=datetime_command_line_argument_type(naive=True), # Broker date/times are naive.
help="Load/Reload records from the provided datetime to the script execution start time.",
)
mutually_exclusive_group.add_argument(
"--since-last-load",
dest="incremental_date",
action="store_true",
help="Equivalent to loading from date, but date is drawn from last update date recorded in DB.",
)
mutually_exclusive_group.add_argument(
"--file",
dest="file",
type=filepath_command_line_argument_type(chunk_count=self.chunk_size),
help=(
f"Load/Reload transactions using {self.shared_pk} values stored at this file path"
f" (one ID per line) {SCHEMA_HELP_TEXT}"
),
)
mutually_exclusive_group.add_argument(
"--reload-all",
action="store_true",
help=(
f"Script will load or reload all {self.broker_source_table_name} records from broker database,"
" from all time. This does NOT clear the USASpending database first."
),
)
parser.add_argument(
"--process-deletes",
action="store_true",
help=(
"If not in local mode, process deletes before beginning the upsert operations."
" This shouldn't be used with --file or --ids parameters"
),
)
def handle(self, *args, **options):
with Timer(message="Script"):
self.run_script(*args, **options)
def run_script(self, *args, **options):
self.start_time = datetime.now(timezone.utc)
self.options = options
if self.options["incremental_date"]:
self.is_incremental = True
self.options["datetime"] = self.obtain_last_date()
if self.options["process_deletes"]:
delete_date = self.options["datetime"]
if not delete_date:
delete_date = self.begining_of_time
with Timer(message="Processing deletes"):
delete_job_status = call_command(self.delete_management_command, f"--date={delete_date}")
if delete_job_status != 0:
raise RuntimeError("Fatal error. Problem with the deletes")
try:
with Timer(message="Load Process"):
self.process()
self.successful_run = True
except (Exception, SystemExit, KeyboardInterrupt):
logger.exception("Fatal error")
finally:
self.cleanup()
def obtain_last_date(self):
dt = get_last_load_date(self.last_load_record, self.lookback_minutes)
if not dt:
raise SystemExit("No datetime stored in the database, unable to use --since-last-load")
return dt
def process(self) -> None:
with Timer(message="Compiling IDs to process"):
self.file_path, self.total_ids_to_process = self.compile_transactions_to_process()
logger.info(f"{self.total_ids_to_process:,} IDs stored")
with Timer(message="Transfering Data"):
self.copy_broker_table_data(self.broker_source_table_name, self.destination_table_name, self.shared_pk)
def cleanup(self) -> None:
"""Finalize the execution and cleanup for the next script run"""
logger.info(f"Processed {self.upsert_records:,} transction records (insert/update)")
if self.successful_run and (self.is_incremental or self.options["reload_all"]):
logger.info("Updated last run time for next incremental load")
update_last_load_date(self.last_load_record, self.start_time)
if hasattr(self, "file_path") and self.file_path.exists():
# If the script fails before the file is created, skip
# If the file still exists, remove
self.file_path.unlink()
if self.successful_run:
logger.info(f"Loading {self.destination_table_name} completed successfully")
else:
logger.info("Failed state on exit")
raise SystemExit(1)
def compile_transactions_to_process(self) -> Tuple[Path, int]:
ids = []
if self.options["file"]:
ids = self.options["file"]
logger.info("using provided IDs in file")
elif self.options["ids"]:
ids = self.options["ids"]
logger.info("using provided IDs")
else:
ids = self.generate_ids_from_broker()
file_name = f"{self.working_file_prefix}_{self.start_time.strftime('%Y%m%d_%H%M%S_%f')}"
return store_ids_in_file(ids, file_name, is_numeric=False)
def generate_ids_from_broker(self):
sql = self.combine_sql()
with psycopg2.connect(dsn=get_broker_dsn_string()) as connection:
with connection.cursor("usaspending_data_transfer") as cursor:
cursor.execute(sql.strip("\n"))
while True:
id_list = [id[0] for id in cursor.fetchmany(size=self.chunk_size)]
if not id_list:
break
for broker_id in id_list:
yield broker_id
def combine_sql(self):
"""Create SQL used to fetch transaction ids for records marked to transfer"""
if self.options["reload_all"]:
logger.info("FULL RELOAD")
sql = self.broker_full_select_sql
optional_predicate = ""
elif self.options["datetime"]:
logger.info(f"Using datetime '{self.options['datetime']}'")
sql = self.broker_incremental_select_sql
predicate = f"\"updated_at\" >= '{self.options['datetime']}'"
if "where" in sql.lower():
optional_predicate = f"and {predicate}"
else:
optional_predicate = f"where {predicate}"
return sql.format(id=self.shared_pk, table=self.broker_source_table_name, optional_predicate=optional_predicate)
def copy_broker_table_data(self, source_tablename, dest_tablename, primary_key):
"""Loop through the batches of IDs and load using the ETL tables"""
destination = ETLTable(dest_tablename)
source = ETLDBLinkTable(source_tablename, settings.DATA_BROKER_DBLINK_NAME, destination.data_types)
transactions_remaining_count = self.total_ids_to_process
for id_list in read_file_for_database_ids(str(self.file_path), self.chunk_size, is_numeric=False):
with Timer(message=f"Upsert {len(id_list):,} records"):
if len(id_list) != 0:
predicate = self.extra_predicate + [{"field": primary_key, "op": "IN", "values": tuple(id_list)}]
record_count = operations.upsert_records_with_predicate(source, destination, predicate, primary_key)
else:
logger.warning("No records to load. Please check parameters and settings to confirm accuracy")
record_count = 0
if transactions_remaining_count > len(id_list):
transactions_remaining_count -= len(id_list)
else:
transactions_remaining_count = 0
self.upsert_records += record_count
logger.info(f"{self.upsert_records:,} successful upserts, {transactions_remaining_count:,} remaining.")
| 44.176471 | 120 | 0.649467 | import logging
import psycopg2
from datetime import datetime, timezone
from django.conf import settings
from django.core.management import call_command
from pathlib import Path
from typing import Tuple
from usaspending_api.broker.helpers.last_load_date import get_last_load_date, update_last_load_date
from usaspending_api.common.etl import ETLDBLinkTable, ETLTable, operations
from usaspending_api.common.helpers.date_helper import datetime_command_line_argument_type
from usaspending_api.common.helpers.sql_helpers import get_broker_dsn_string
from usaspending_api.common.helpers.timing_helpers import ScriptTimer as Timer
from usaspending_api.common.retrieve_file_from_uri import SCHEMA_HELP_TEXT
from usaspending_api.transactions.loader_functions import filepath_command_line_argument_type
from usaspending_api.transactions.loader_functions import read_file_for_database_ids
from usaspending_api.transactions.loader_functions import store_ids_in_file
logger = logging.getLogger("script")
class AgnosticTransactionLoader:
begining_of_time = "1970-01-01"
chunk_size = 25000
is_incremental = False
successful_run = False
upsert_records = 0
def add_arguments(self, parser):
mutually_exclusive_group = parser.add_mutually_exclusive_group(required=True)
mutually_exclusive_group.add_argument(
"--ids", nargs="+", help=f"Load/Reload transactions using this {self.shared_pk} list (space-separated)",
)
mutually_exclusive_group.add_argument(
"--date",
dest="datetime",
type=datetime_command_line_argument_type(naive=True),
help="Load/Reload records from the provided datetime to the script execution start time.",
)
mutually_exclusive_group.add_argument(
"--since-last-load",
dest="incremental_date",
action="store_true",
help="Equivalent to loading from date, but date is drawn from last update date recorded in DB.",
)
mutually_exclusive_group.add_argument(
"--file",
dest="file",
type=filepath_command_line_argument_type(chunk_count=self.chunk_size),
help=(
f"Load/Reload transactions using {self.shared_pk} values stored at this file path"
f" (one ID per line) {SCHEMA_HELP_TEXT}"
),
)
mutually_exclusive_group.add_argument(
"--reload-all",
action="store_true",
help=(
f"Script will load or reload all {self.broker_source_table_name} records from broker database,"
" from all time. This does NOT clear the USASpending database first."
),
)
parser.add_argument(
"--process-deletes",
action="store_true",
help=(
"If not in local mode, process deletes before beginning the upsert operations."
" This shouldn't be used with --file or --ids parameters"
),
)
def handle(self, *args, **options):
with Timer(message="Script"):
self.run_script(*args, **options)
def run_script(self, *args, **options):
self.start_time = datetime.now(timezone.utc)
self.options = options
if self.options["incremental_date"]:
self.is_incremental = True
self.options["datetime"] = self.obtain_last_date()
if self.options["process_deletes"]:
delete_date = self.options["datetime"]
if not delete_date:
delete_date = self.begining_of_time
with Timer(message="Processing deletes"):
delete_job_status = call_command(self.delete_management_command, f"--date={delete_date}")
if delete_job_status != 0:
raise RuntimeError("Fatal error. Problem with the deletes")
try:
with Timer(message="Load Process"):
self.process()
self.successful_run = True
except (Exception, SystemExit, KeyboardInterrupt):
logger.exception("Fatal error")
finally:
self.cleanup()
def obtain_last_date(self):
dt = get_last_load_date(self.last_load_record, self.lookback_minutes)
if not dt:
raise SystemExit("No datetime stored in the database, unable to use --since-last-load")
return dt
def process(self) -> None:
with Timer(message="Compiling IDs to process"):
self.file_path, self.total_ids_to_process = self.compile_transactions_to_process()
logger.info(f"{self.total_ids_to_process:,} IDs stored")
with Timer(message="Transfering Data"):
self.copy_broker_table_data(self.broker_source_table_name, self.destination_table_name, self.shared_pk)
def cleanup(self) -> None:
logger.info(f"Processed {self.upsert_records:,} transction records (insert/update)")
if self.successful_run and (self.is_incremental or self.options["reload_all"]):
logger.info("Updated last run time for next incremental load")
update_last_load_date(self.last_load_record, self.start_time)
if hasattr(self, "file_path") and self.file_path.exists():
# If the script fails before the file is created, skip
# If the file still exists, remove
self.file_path.unlink()
if self.successful_run:
logger.info(f"Loading {self.destination_table_name} completed successfully")
else:
logger.info("Failed state on exit")
raise SystemExit(1)
def compile_transactions_to_process(self) -> Tuple[Path, int]:
ids = []
if self.options["file"]:
ids = self.options["file"]
logger.info("using provided IDs in file")
elif self.options["ids"]:
ids = self.options["ids"]
logger.info("using provided IDs")
else:
ids = self.generate_ids_from_broker()
file_name = f"{self.working_file_prefix}_{self.start_time.strftime('%Y%m%d_%H%M%S_%f')}"
return store_ids_in_file(ids, file_name, is_numeric=False)
def generate_ids_from_broker(self):
sql = self.combine_sql()
with psycopg2.connect(dsn=get_broker_dsn_string()) as connection:
with connection.cursor("usaspending_data_transfer") as cursor:
cursor.execute(sql.strip("\n"))
while True:
id_list = [id[0] for id in cursor.fetchmany(size=self.chunk_size)]
if not id_list:
break
for broker_id in id_list:
yield broker_id
def combine_sql(self):
if self.options["reload_all"]:
logger.info("FULL RELOAD")
sql = self.broker_full_select_sql
optional_predicate = ""
elif self.options["datetime"]:
logger.info(f"Using datetime '{self.options['datetime']}'")
sql = self.broker_incremental_select_sql
predicate = f"\"updated_at\" >= '{self.options['datetime']}'"
if "where" in sql.lower():
optional_predicate = f"and {predicate}"
else:
optional_predicate = f"where {predicate}"
return sql.format(id=self.shared_pk, table=self.broker_source_table_name, optional_predicate=optional_predicate)
def copy_broker_table_data(self, source_tablename, dest_tablename, primary_key):
destination = ETLTable(dest_tablename)
source = ETLDBLinkTable(source_tablename, settings.DATA_BROKER_DBLINK_NAME, destination.data_types)
transactions_remaining_count = self.total_ids_to_process
for id_list in read_file_for_database_ids(str(self.file_path), self.chunk_size, is_numeric=False):
with Timer(message=f"Upsert {len(id_list):,} records"):
if len(id_list) != 0:
predicate = self.extra_predicate + [{"field": primary_key, "op": "IN", "values": tuple(id_list)}]
record_count = operations.upsert_records_with_predicate(source, destination, predicate, primary_key)
else:
logger.warning("No records to load. Please check parameters and settings to confirm accuracy")
record_count = 0
if transactions_remaining_count > len(id_list):
transactions_remaining_count -= len(id_list)
else:
transactions_remaining_count = 0
self.upsert_records += record_count
logger.info(f"{self.upsert_records:,} successful upserts, {transactions_remaining_count:,} remaining.")
| true | true |
f7f81506c3e92f8523861a133ba6f7a86ac1c5e1 | 8,233 | py | Python | avalon/maya/compat.py | kstrandli/avalon-core | 6e1fe862484983adf9e4c8b14cb229d56b9ed465 | [
"MIT"
] | 168 | 2017-06-23T15:50:43.000Z | 2022-02-27T10:48:45.000Z | avalon/maya/compat.py | kstrandli/avalon-core | 6e1fe862484983adf9e4c8b14cb229d56b9ed465 | [
"MIT"
] | 366 | 2017-06-22T08:38:45.000Z | 2021-06-19T07:29:06.000Z | avalon/maya/compat.py | kstrandli/avalon-core | 6e1fe862484983adf9e4c8b14cb229d56b9ed465 | [
"MIT"
] | 42 | 2017-06-23T15:27:26.000Z | 2021-09-29T17:28:18.000Z | """Compatibility
This module is to ensure the compatibility between Maya, Avalon and Pyblish
is maintained.
"""
import maya.cmds as cmds
import os
import logging
import avalon.pipeline
log = logging.getLogger(__name__)
create = avalon.pipeline.create
def remove_googleapiclient():
"""Check if the compatibility must be maintained
The Maya 2018 version tries to import the `http` module from
Maya2018/plug-ins/MASH/scripts/googleapiclient/http.py in stead of the
module from six.py. This import conflict causes a crash Avalon's publisher.
This is due to Autodesk adding paths to the PYTHONPATH environment variable
which contain modules instead of only packages.
"""
keyword = "googleapiclient"
# reconstruct python paths
python_paths = os.environ["PYTHONPATH"].split(os.pathsep)
paths = [path for path in python_paths if keyword not in path]
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
def install():
"""Run all compatibility functions"""
if cmds.about(version=True) == "2018":
remove_googleapiclient()
def load(Loader,
representation,
name=None,
namespace=None,
data=None):
"""Load asset via database
Deprecated; this functionality is replaced by `api.load()`
Arguments:
Loader (api.Loader): The loader to process in host Maya.
representation (dict, io.ObjectId or str): Address to representation
name (str, optional): Use pre-defined name
namespace (str, optional): Use pre-defined namespace
data (dict, optional): Additional settings dictionary
"""
from avalon.vendor import six
from avalon import io
from avalon.maya import lib
from avalon.maya.pipeline import containerise
assert representation, "This is a bug"
if isinstance(representation, (six.string_types, io.ObjectId)):
representation = io.find_one({"_id": io.ObjectId(str(representation))})
version, subset, asset, project = io.parenthood(representation)
assert all([representation, version, subset, asset, project]), (
"This is a bug"
)
context = {
"project": project,
"asset": asset,
"subset": subset,
"version": version,
"representation": representation,
}
# Ensure data is a dictionary when no explicit data provided
if data:
assert isinstance(data, dict), "Data must be a dictionary"
else:
data = dict()
name = name or subset["name"]
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
# TODO(roy): add compatibility check, see `tools.loader.lib`
Loader.log.info(
"Running '%s' on '%s'" % (Loader.__name__, asset["name"])
)
try:
loader = Loader(context)
with lib.maintained_selection():
loader.process(name, namespace, context, data)
except OSError as e:
log.info("WARNING: %s" % e)
return list()
# Only containerize if any nodes were loaded by the Loader
nodes = loader[:]
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=loader[:],
context=context,
loader=Loader.__name__)
def update(container, version=-1):
"""Update `container` to `version`
Deprecated; this functionality is replaced by `api.update()`
This function relies on a container being referenced. At the time of this
writing, all assets - models, rigs, animations, shaders - are referenced
and should pose no problem. But should there be an asset that isn't
referenced then this function will need to see an update.
Arguments:
container (avalon-core:container-1.0): Container to update,
from `host.ls()`.
version (int, optional): Update the container to this version.
If no version is passed, the latest is assumed.
"""
from avalon import io
from avalon import api
node = container["objectName"]
# Assume asset has been referenced
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
current_representation = io.find_one({
"_id": io.ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
version_, subset, asset, project = io.parenthood(current_representation)
if version == -1:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
})
new_representation = io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
})
assert new_version is not None, "This is a bug"
template_publish = project["config"]["template"]["publish"]
fname = template_publish.format(**{
"root": api.registered_root(),
"project": project["name"],
"asset": asset["name"],
"silo": asset["silo"],
"subset": subset["name"],
"version": new_version["name"],
"representation": current_representation["name"],
})
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(new_representation["name"])
assert file_type, ("Unsupported representation: %s" % new_representation)
assert os.path.exists(fname), "%s does not exist." % fname
cmds.file(fname, loadReference=reference_node, type=file_type)
# Update metadata
cmds.setAttr(container["objectName"] + ".representation",
str(new_representation["_id"]),
type="string")
def remove(container):
"""Remove an existing `container` from Maya scene
Deprecated; this functionality is replaced by `api.remove()`
Arguments:
container (avalon-core:container-1.0): Which container
to remove from scene.
"""
node = container["objectName"]
# Assume asset has been referenced
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
log.info("Removing '%s' from Maya.." % container["name"])
namespace = cmds.referenceQuery(reference_node, namespace=True)
fname = cmds.referenceQuery(reference_node, filename=True)
cmds.file(fname, removeReference=True)
try:
cmds.delete(node)
except ValueError:
# Already implicitly deleted by Maya upon removing reference
pass
try:
# If container is not automatically cleaned up by May (issue #118)
cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True)
except RuntimeError:
pass
class BackwardsCompatibleLoader(avalon.pipeline.Loader):
"""A backwards compatible loader.
This triggers the old-style `process` through the old Maya's host `load`,
`update` and `remove` methods and exposes it through the new-style Loader
api.
Note: This inherits from `avalon.pipeline.Loader` and *not* from
`avalon.maya.pipeline.Loader`
"""
def load(self,
context,
name=None,
namespace=None,
data=None):
return load(Loader=self.__class__,
representation=context['representation'],
name=name,
namespace=namespace,
data=data)
def remove(self, container):
return remove(container)
def update(self, container, representation):
version = representation['context']['version']
return update(container, version=version)
| 29.508961 | 79 | 0.631362 | import maya.cmds as cmds
import os
import logging
import avalon.pipeline
log = logging.getLogger(__name__)
create = avalon.pipeline.create
def remove_googleapiclient():
keyword = "googleapiclient"
python_paths = os.environ["PYTHONPATH"].split(os.pathsep)
paths = [path for path in python_paths if keyword not in path]
os.environ["PYTHONPATH"] = os.pathsep.join(paths)
def install():
if cmds.about(version=True) == "2018":
remove_googleapiclient()
def load(Loader,
representation,
name=None,
namespace=None,
data=None):
from avalon.vendor import six
from avalon import io
from avalon.maya import lib
from avalon.maya.pipeline import containerise
assert representation, "This is a bug"
if isinstance(representation, (six.string_types, io.ObjectId)):
representation = io.find_one({"_id": io.ObjectId(str(representation))})
version, subset, asset, project = io.parenthood(representation)
assert all([representation, version, subset, asset, project]), (
"This is a bug"
)
context = {
"project": project,
"asset": asset,
"subset": subset,
"version": version,
"representation": representation,
}
if data:
assert isinstance(data, dict), "Data must be a dictionary"
else:
data = dict()
name = name or subset["name"]
namespace = namespace or lib.unique_namespace(
asset["name"] + "_",
prefix="_" if asset["name"][0].isdigit() else "",
suffix="_",
)
Loader.log.info(
"Running '%s' on '%s'" % (Loader.__name__, asset["name"])
)
try:
loader = Loader(context)
with lib.maintained_selection():
loader.process(name, namespace, context, data)
except OSError as e:
log.info("WARNING: %s" % e)
return list()
nodes = loader[:]
if not nodes:
return
return containerise(
name=name,
namespace=namespace,
nodes=loader[:],
context=context,
loader=Loader.__name__)
def update(container, version=-1):
from avalon import io
from avalon import api
node = container["objectName"]
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
current_representation = io.find_one({
"_id": io.ObjectId(container["representation"])
})
assert current_representation is not None, "This is a bug"
version_, subset, asset, project = io.parenthood(current_representation)
if version == -1:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"]
}, sort=[("name", -1)])
else:
new_version = io.find_one({
"type": "version",
"parent": subset["_id"],
"name": version,
})
new_representation = io.find_one({
"type": "representation",
"parent": new_version["_id"],
"name": current_representation["name"]
})
assert new_version is not None, "This is a bug"
template_publish = project["config"]["template"]["publish"]
fname = template_publish.format(**{
"root": api.registered_root(),
"project": project["name"],
"asset": asset["name"],
"silo": asset["silo"],
"subset": subset["name"],
"version": new_version["name"],
"representation": current_representation["name"],
})
file_type = {
"ma": "mayaAscii",
"mb": "mayaBinary",
"abc": "Alembic"
}.get(new_representation["name"])
assert file_type, ("Unsupported representation: %s" % new_representation)
assert os.path.exists(fname), "%s does not exist." % fname
cmds.file(fname, loadReference=reference_node, type=file_type)
cmds.setAttr(container["objectName"] + ".representation",
str(new_representation["_id"]),
type="string")
def remove(container):
node = container["objectName"]
reference_node = next((node for node in cmds.sets(node, query=True)
if cmds.nodeType(node) == "reference"), None)
assert reference_node, ("Imported container not supported; "
"container must be referenced.")
log.info("Removing '%s' from Maya.." % container["name"])
namespace = cmds.referenceQuery(reference_node, namespace=True)
fname = cmds.referenceQuery(reference_node, filename=True)
cmds.file(fname, removeReference=True)
try:
cmds.delete(node)
except ValueError:
pass
try:
cmds.namespace(removeNamespace=namespace, deleteNamespaceContent=True)
except RuntimeError:
pass
class BackwardsCompatibleLoader(avalon.pipeline.Loader):
def load(self,
context,
name=None,
namespace=None,
data=None):
return load(Loader=self.__class__,
representation=context['representation'],
name=name,
namespace=namespace,
data=data)
def remove(self, container):
return remove(container)
def update(self, container, representation):
version = representation['context']['version']
return update(container, version=version)
| true | true |
f7f815caa4c519f8000a5e2987255de17afbfd62 | 3,956 | py | Python | helper/webSearchCloseWords.py | Bobyuan1015/KDA | ce442922deb93b1bfe2ad7c418f1c63f5c40e000 | [
"MIT"
] | 1 | 2020-05-14T08:31:17.000Z | 2020-05-14T08:31:17.000Z | helper/webSearchCloseWords.py | Bobyuan1015/KDA | ce442922deb93b1bfe2ad7c418f1c63f5c40e000 | [
"MIT"
] | null | null | null | helper/webSearchCloseWords.py | Bobyuan1015/KDA | ce442922deb93b1bfe2ad7c418f1c63f5c40e000 | [
"MIT"
] | 1 | 2020-05-14T08:31:45.000Z | 2020-05-14T08:31:45.000Z | # -*- coding: utf-8 -*-
"""
File Name: url_content.py
Description : the main detail logic of auditing url sms
Author : yuanfang
date: 2019/12/13
"""
from lxml import html
import pandas as pd
import sys
import os
import pathlib
import re
import requests
from helper.cut import func_timer
project_path = str(pathlib.Path(os.path.abspath(os.curdir)))
sys.path.append(project_path)
print(sys.path)
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
xpaths=['/html/body/div[1]/div[2]/div[2]/p[11]//text()',
'/html/body/div[1]/div[2]/div[2]/p[6]//text()',
'/html/body/div[1]/div[2]/div[2]/p[8]//text()',
'/html/body/div[1]/div[2]/div[2]/p[13]//text()',
'/html/body/div[1]/div[2]/div[2]/p[2]//text()']
def web_search(text,closeWords=None):
"""Get the synonyms from a very official chinese synonym web
:param text: a chinese word phrase. type: str
:return: a list of close words to the param word, then join the list into a list type:str
"""
if len(get_chinese(text)) <1:
return '0'
if closeWords != '0':
return closeWords
def getci(text):
tree = html.fromstring(text)
close_words = []
for xpath_ in xpaths:
text = tree.xpath(xpath_)
if len(text) > 0:
for ci in text:
close_words.extend(ci.split())
print('close:',close_words)
return list(set(close_words))
print('web_search ->', text)
while True: # 一直循环,知道访问站点成功
try:
page = requests.get('https://kmcha.com/similar/' + text, headers=headers, timeout=2)
# print(page.text)
close_words = match_ci(page.text)
# print(close_words)
# print(' 近义词:',content)
return ','.join(close_words)
# print('response:',response)
# response = requests.get(url)
# content = response.content.decode()
# print('content:', content)
# return test_remove_redundants(content)
except requests.exceptions.ConnectionError:
print('ConnectionError -- please wait 3 seconds')
return '0'
# time.sleep(3)
except requests.exceptions.ChunkedEncodingError:
print('ChunkedEncodingError -- please wait 3 seconds')
# time.sleep(3)
return '0'
except Exception as e:
print('Unfortunitely -- An Unknow Error Happened, Please wait 3 seconds e:', e)
# time.sleep(3)
return '0'
@func_timer
def web_search_close_keys(file):
df = pd.read_csv(file)
df['close_words'] = '0'
df.fillna('0')
df['close_words'] = df.apply(lambda row: web_search(row['finale_all_keys'], row['close_words']), axis=1)
df.to_csv('keys_.csv',index=False)
def get_chinese(content):
"""
pick chinese only from a text
:param text: type: str
:return: chines text type: str
"""
print('content:',content)
return re.sub('[^\u4e00-\u9fff]+', '', content)
def remove_redundant(text):
words = text.split('的同义词')
return list(set(words))
stops=['的']
def match_ci(text):
start='的相似词'
end='热门查询'
close_words=[]
if start in text and end in text:
start_index = text.find(start)+len(start)
end_index = text.find(end)
ci_sentences = text[start_index:end_index]
temp = [close_words.extend(remove_redundant(get_chinese(s.strip()))) for s in ci_sentences.split(' ')]
cis = [ci for ci in close_words if len(ci) > 0 and ci not in stops]
return cis
# df = pd.read_csv('key.csv')
# print(type(df))
# print(df.columns)
# # df.drop(df[df.keys.isnull()].index,inplace=True)
# df['closed_words'] = df['keys'].apply(web_search)
# df.to_csv('done_keys.csv',index=False)
| 31.396825 | 133 | 0.598332 |
from lxml import html
import pandas as pd
import sys
import os
import pathlib
import re
import requests
from helper.cut import func_timer
project_path = str(pathlib.Path(os.path.abspath(os.curdir)))
sys.path.append(project_path)
print(sys.path)
headers = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'}
xpaths=['/html/body/div[1]/div[2]/div[2]/p[11]//text()',
'/html/body/div[1]/div[2]/div[2]/p[6]//text()',
'/html/body/div[1]/div[2]/div[2]/p[8]//text()',
'/html/body/div[1]/div[2]/div[2]/p[13]//text()',
'/html/body/div[1]/div[2]/div[2]/p[2]//text()']
def web_search(text,closeWords=None):
if len(get_chinese(text)) <1:
return '0'
if closeWords != '0':
return closeWords
def getci(text):
tree = html.fromstring(text)
close_words = []
for xpath_ in xpaths:
text = tree.xpath(xpath_)
if len(text) > 0:
for ci in text:
close_words.extend(ci.split())
print('close:',close_words)
return list(set(close_words))
print('web_search ->', text)
while True:
try:
page = requests.get('https://kmcha.com/similar/' + text, headers=headers, timeout=2)
close_words = match_ci(page.text)
return ','.join(close_words)
except requests.exceptions.ConnectionError:
print('ConnectionError -- please wait 3 seconds')
return '0'
except requests.exceptions.ChunkedEncodingError:
print('ChunkedEncodingError -- please wait 3 seconds')
return '0'
except Exception as e:
print('Unfortunitely -- An Unknow Error Happened, Please wait 3 seconds e:', e)
return '0'
@func_timer
def web_search_close_keys(file):
df = pd.read_csv(file)
df['close_words'] = '0'
df.fillna('0')
df['close_words'] = df.apply(lambda row: web_search(row['finale_all_keys'], row['close_words']), axis=1)
df.to_csv('keys_.csv',index=False)
def get_chinese(content):
print('content:',content)
return re.sub('[^\u4e00-\u9fff]+', '', content)
def remove_redundant(text):
words = text.split('的同义词')
return list(set(words))
stops=['的']
def match_ci(text):
start='的相似词'
end='热门查询'
close_words=[]
if start in text and end in text:
start_index = text.find(start)+len(start)
end_index = text.find(end)
ci_sentences = text[start_index:end_index]
temp = [close_words.extend(remove_redundant(get_chinese(s.strip()))) for s in ci_sentences.split(' ')]
cis = [ci for ci in close_words if len(ci) > 0 and ci not in stops]
return cis
| true | true |
f7f8163cad5fa84180ece9307309b7c564c1b104 | 289 | py | Python | example/sj.py | tinashime/Python27 | b632918c7368a9bcfc5af8353e136247d954fb5e | [
"bzip2-1.0.6"
] | null | null | null | example/sj.py | tinashime/Python27 | b632918c7368a9bcfc5af8353e136247d954fb5e | [
"bzip2-1.0.6"
] | null | null | null | example/sj.py | tinashime/Python27 | b632918c7368a9bcfc5af8353e136247d954fb5e | [
"bzip2-1.0.6"
] | null | null | null | number=23
guess=int(raw_input('Enter an integer : '))
if guess==number:
print 'Congratulations, you guessed it.'
print "(but you do not win any prizes!)"
elif guess<number:
print 'No, it is a little higher than that'
else:
print 'No, it is a little lower than that'
print 'Done'
| 22.230769 | 45 | 0.702422 | number=23
guess=int(raw_input('Enter an integer : '))
if guess==number:
print 'Congratulations, you guessed it.'
print "(but you do not win any prizes!)"
elif guess<number:
print 'No, it is a little higher than that'
else:
print 'No, it is a little lower than that'
print 'Done'
| false | true |
f7f817a1e11fd3feffb256fc63b5f3d1548683d2 | 5,007 | py | Python | pptx/shapes/graphfrm.py | iroin/python-pptx | 5d1012d8c0df104025d9565dafc411077039c66f | [
"MIT"
] | null | null | null | pptx/shapes/graphfrm.py | iroin/python-pptx | 5d1012d8c0df104025d9565dafc411077039c66f | [
"MIT"
] | null | null | null | pptx/shapes/graphfrm.py | iroin/python-pptx | 5d1012d8c0df104025d9565dafc411077039c66f | [
"MIT"
] | 1 | 2020-02-17T20:46:02.000Z | 2020-02-17T20:46:02.000Z | # encoding: utf-8
"""Graphic Frame shape and related objects.
A graphic frame is a common container for table, chart, smart art, and media
objects.
"""
from pptx.enum.shapes import MSO_SHAPE_TYPE
from pptx.shapes.base import BaseShape
from pptx.shared import ParentedElementProxy
from pptx.spec import (
GRAPHIC_DATA_URI_CHART,
GRAPHIC_DATA_URI_OLEOBJ,
GRAPHIC_DATA_URI_TABLE,
)
from pptx.table import Table
class GraphicFrame(BaseShape):
"""Container shape for table, chart, smart art, and media objects.
Corresponds to a ``<p:graphicFrame>`` element in the shape tree.
"""
@property
def chart(self):
"""The |Chart| object containing the chart in this graphic frame.
Raises |ValueError| if this graphic frame does not contain a chart.
"""
if not self.has_chart:
raise ValueError("shape does not contain a chart")
return self.chart_part.chart
@property
def chart_part(self):
"""The |ChartPart| object containing the chart in this graphic frame."""
return self.part.related_parts[self._element.chart_rId]
@property
def has_chart(self):
"""|True| if this graphic frame contains a chart object. |False| otherwise.
When |True|, the chart object can be accessed using the ``.chart`` property.
"""
return self._element.graphicData_uri == GRAPHIC_DATA_URI_CHART
@property
def has_table(self):
"""|True| if this graphic frame contains a table object, |False| otherwise.
When |True|, the table object can be accessed using the `.table` property.
"""
return self._element.graphicData_uri == GRAPHIC_DATA_URI_TABLE
@property
def ole_format(self):
"""Optional _OleFormat object for this graphic-frame shape.
Raises `ValueError` on a GraphicFrame instance that does not contain an OLE
object.
An shape that contains an OLE object will have `.shape_type` of either
`EMBEDDED_OLE_OBJECT` or `LINKED_OLE_OBJECT`.
"""
if not self._element.has_oleobj:
raise ValueError("not an OLE-object shape")
return _OleFormat(self._element.graphicData, self._parent)
@property
def shadow(self):
"""Unconditionally raises |NotImplementedError|.
Access to the shadow effect for graphic-frame objects is
content-specific (i.e. different for charts, tables, etc.) and has
not yet been implemented.
"""
raise NotImplementedError("shadow property on GraphicFrame not yet supported")
@property
def shape_type(self):
"""Optional member of `MSO_SHAPE_TYPE` identifying the type of this shape.
Possible values are ``MSO_SHAPE_TYPE.CHART``, ``MSO_SHAPE_TYPE.TABLE``,
``MSO_SHAPE_TYPE.EMBEDDED_OLE_OBJECT``, ``MSO_SHAPE_TYPE.LINKED_OLE_OBJECT``.
This value is `None` when none of these four types apply, for example when the
shape contains SmartArt.
"""
graphicData_uri = self._element.graphicData_uri
if graphicData_uri == GRAPHIC_DATA_URI_CHART:
return MSO_SHAPE_TYPE.CHART
elif graphicData_uri == GRAPHIC_DATA_URI_TABLE:
return MSO_SHAPE_TYPE.TABLE
elif graphicData_uri == GRAPHIC_DATA_URI_OLEOBJ:
return (
MSO_SHAPE_TYPE.EMBEDDED_OLE_OBJECT
if self._element.is_embedded_ole_obj
else MSO_SHAPE_TYPE.LINKED_OLE_OBJECT
)
else:
return None
@property
def table(self):
"""
The |Table| object contained in this graphic frame. Raises
|ValueError| if this graphic frame does not contain a table.
"""
if not self.has_table:
raise ValueError("shape does not contain a table")
tbl = self._element.graphic.graphicData.tbl
return Table(tbl, self)
class _OleFormat(ParentedElementProxy):
"""Provides attributes on an embedded OLE object."""
def __init__(self, graphicData, parent):
super(_OleFormat, self).__init__(graphicData, parent)
self._graphicData = graphicData
@property
def blob(self):
"""Optional bytes of OLE object, suitable for loading or saving as a file.
This value is None if the embedded object does not represent a "file".
"""
return self.part.related_parts[self._graphicData.blob_rId].blob
@property
def prog_id(self):
"""str "progId" attribute of this embedded OLE object.
The progId is a str like "Excel.Sheet.12" that identifies the "file-type" of the
embedded object, or perhaps more precisely, the application (aka. "server" in
OLE parlance) to be used to open this object.
"""
return self._graphicData.progId
@property
def show_as_icon(self):
"""True when OLE object should appear as an icon (rather than preview)."""
return self._graphicData.showAsIcon
| 34.294521 | 88 | 0.666267 |
from pptx.enum.shapes import MSO_SHAPE_TYPE
from pptx.shapes.base import BaseShape
from pptx.shared import ParentedElementProxy
from pptx.spec import (
GRAPHIC_DATA_URI_CHART,
GRAPHIC_DATA_URI_OLEOBJ,
GRAPHIC_DATA_URI_TABLE,
)
from pptx.table import Table
class GraphicFrame(BaseShape):
@property
def chart(self):
if not self.has_chart:
raise ValueError("shape does not contain a chart")
return self.chart_part.chart
@property
def chart_part(self):
return self.part.related_parts[self._element.chart_rId]
@property
def has_chart(self):
return self._element.graphicData_uri == GRAPHIC_DATA_URI_CHART
@property
def has_table(self):
return self._element.graphicData_uri == GRAPHIC_DATA_URI_TABLE
@property
def ole_format(self):
if not self._element.has_oleobj:
raise ValueError("not an OLE-object shape")
return _OleFormat(self._element.graphicData, self._parent)
@property
def shadow(self):
raise NotImplementedError("shadow property on GraphicFrame not yet supported")
@property
def shape_type(self):
graphicData_uri = self._element.graphicData_uri
if graphicData_uri == GRAPHIC_DATA_URI_CHART:
return MSO_SHAPE_TYPE.CHART
elif graphicData_uri == GRAPHIC_DATA_URI_TABLE:
return MSO_SHAPE_TYPE.TABLE
elif graphicData_uri == GRAPHIC_DATA_URI_OLEOBJ:
return (
MSO_SHAPE_TYPE.EMBEDDED_OLE_OBJECT
if self._element.is_embedded_ole_obj
else MSO_SHAPE_TYPE.LINKED_OLE_OBJECT
)
else:
return None
@property
def table(self):
if not self.has_table:
raise ValueError("shape does not contain a table")
tbl = self._element.graphic.graphicData.tbl
return Table(tbl, self)
class _OleFormat(ParentedElementProxy):
def __init__(self, graphicData, parent):
super(_OleFormat, self).__init__(graphicData, parent)
self._graphicData = graphicData
@property
def blob(self):
return self.part.related_parts[self._graphicData.blob_rId].blob
@property
def prog_id(self):
return self._graphicData.progId
@property
def show_as_icon(self):
return self._graphicData.showAsIcon
| true | true |
f7f8182cbda9c79f34c6f56d8907bde981842cd9 | 937 | py | Python | world_tour_app/urls.py | mathildebadoual/world_tour_app | 7b02697fdced48fec3a9a59d70313a3efef9edfa | [
"MIT"
] | null | null | null | world_tour_app/urls.py | mathildebadoual/world_tour_app | 7b02697fdced48fec3a9a59d70313a3efef9edfa | [
"MIT"
] | null | null | null | world_tour_app/urls.py | mathildebadoual/world_tour_app | 7b02697fdced48fec3a9a59d70313a3efef9edfa | [
"MIT"
] | null | null | null | """hw URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/2.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]+static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| 36.038462 | 77 | 0.723586 | from django.contrib import admin
from django.urls import path, include
from django.conf.urls.static import static
from django.conf import settings
urlpatterns = [
path('polls/', include('polls.urls')),
path('admin/', admin.site.urls),
]+static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| true | true |
f7f8183be75b9e905369489752679ce63c9e777c | 483 | py | Python | jbank/migrations/0053_wsediconnection_target_identifier.py | bachvtuan/django-jbank | 1b384936d93b802d92442167efca292d2aaa2f47 | [
"MIT"
] | null | null | null | jbank/migrations/0053_wsediconnection_target_identifier.py | bachvtuan/django-jbank | 1b384936d93b802d92442167efca292d2aaa2f47 | [
"MIT"
] | 2 | 2020-11-05T17:30:12.000Z | 2021-02-24T23:54:35.000Z | jbank/migrations/0053_wsediconnection_target_identifier.py | bachvtuan/django-jbank | 1b384936d93b802d92442167efca292d2aaa2f47 | [
"MIT"
] | 1 | 2021-12-16T09:27:04.000Z | 2021-12-16T09:27:04.000Z | # Generated by Django 2.2.3 on 2019-11-30 20:04
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0052_auto_20191130_1927"),
]
operations = [
migrations.AddField(
model_name="wsediconnection",
name="target_identifier",
field=models.CharField(default=1, max_length=32, verbose_name="target identifier"),
preserve_default=False,
),
]
| 24.15 | 95 | 0.6294 |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("jbank", "0052_auto_20191130_1927"),
]
operations = [
migrations.AddField(
model_name="wsediconnection",
name="target_identifier",
field=models.CharField(default=1, max_length=32, verbose_name="target identifier"),
preserve_default=False,
),
]
| true | true |
f7f8188212715bcb74f1edf4502c82045327c4e8 | 4,726 | py | Python | stem_cell_hypothesis/en_albert_base/joint/pos_srl.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | 4 | 2021-09-17T15:23:31.000Z | 2022-02-28T10:18:04.000Z | stem_cell_hypothesis/en_albert_base/joint/pos_srl.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | stem_cell_hypothesis/en_albert_base/joint/pos_srl.py | emorynlp/stem-cell-hypothesis | 48a628093d93d653865fbac6409d179cddd99293 | [
"Apache-2.0"
] | null | null | null | # -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2021-01-06 16:12
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.loss_balancer import MovingAverageBalancer
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.english import ONTONOTES5_POS_ENGLISH_TRAIN, ONTONOTES5_POS_ENGLISH_TEST, \
ONTONOTES5_POS_ENGLISH_DEV, ONTONOTES5_ENGLISH_TRAIN, ONTONOTES5_ENGLISH_TEST, ONTONOTES5_ENGLISH_DEV, \
ONTONOTES5_CON_ENGLISH_TRAIN, ONTONOTES5_CON_ENGLISH_DEV, ONTONOTES5_CON_ENGLISH_TEST, ONTONOTES5_DEP_ENGLISH_TEST, \
ONTONOTES5_DEP_ENGLISH_DEV, ONTONOTES5_DEP_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_DEV, \
ONTONOTES5_SRL_ENGLISH_TEST
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
'pos': TransformerTagging(
ONTONOTES5_POS_ENGLISH_TRAIN,
ONTONOTES5_POS_ENGLISH_DEV,
ONTONOTES5_POS_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
# 'ner': BiaffineNamedEntityRecognition(
# ONTONOTES5_ENGLISH_TRAIN,
# ONTONOTES5_ENGLISH_DEV,
# ONTONOTES5_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# doc_level_offset=True,
# ),
'srl': SpanRankingSemanticRoleLabeling(
ONTONOTES5_SRL_ENGLISH_TRAIN,
ONTONOTES5_SRL_ENGLISH_DEV,
ONTONOTES5_SRL_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
doc_level_offset=True,
),
# 'dep': BiaffineDependencyParsing(
# ONTONOTES5_DEP_ENGLISH_TRAIN,
# ONTONOTES5_DEP_ENGLISH_DEV,
# ONTONOTES5_DEP_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
# 'con': CRFConstituencyParsing(
# ONTONOTES5_CON_ENGLISH_TRAIN,
# ONTONOTES5_CON_ENGLISH_DEV,
# ONTONOTES5_CON_ENGLISH_TEST,
# SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
# lr=1e-3,
# ),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_albert_base_pos_srl_en_{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'albert-base-v2',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=4,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
loss_balancer=MovingAverageBalancer(5, intrinsic_weighting=True),
# prefetch=10,
# cache='data/tmp'
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{"-".join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
# torch.multiprocessing.set_start_method('spawn') # See https://github.com/pytorch/pytorch/issues/40403
main()
| 40.393162 | 121 | 0.63796 |
from typing import List
from elit.common.dataset import SortingSamplerBuilder
from elit.common.transform import NormalizeToken
from elit.components.mtl.loss_balancer import MovingAverageBalancer
from elit.components.mtl.multi_task_learning import MultiTaskLearning
from elit.components.mtl.tasks.constituency import CRFConstituencyParsing
from elit.components.mtl.tasks.dep import BiaffineDependencyParsing
from elit.components.mtl.tasks.ner.biaffine_ner import BiaffineNamedEntityRecognition
from elit.components.mtl.tasks.pos import TransformerTagging
from elit.components.mtl.tasks.srl.rank_srl import SpanRankingSemanticRoleLabeling
from elit.datasets.parsing.ptb import PTB_TOKEN_MAPPING
from elit.datasets.srl.ontonotes5.english import ONTONOTES5_POS_ENGLISH_TRAIN, ONTONOTES5_POS_ENGLISH_TEST, \
ONTONOTES5_POS_ENGLISH_DEV, ONTONOTES5_ENGLISH_TRAIN, ONTONOTES5_ENGLISH_TEST, ONTONOTES5_ENGLISH_DEV, \
ONTONOTES5_CON_ENGLISH_TRAIN, ONTONOTES5_CON_ENGLISH_DEV, ONTONOTES5_CON_ENGLISH_TEST, ONTONOTES5_DEP_ENGLISH_TEST, \
ONTONOTES5_DEP_ENGLISH_DEV, ONTONOTES5_DEP_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_TRAIN, ONTONOTES5_SRL_ENGLISH_DEV, \
ONTONOTES5_SRL_ENGLISH_TEST
from elit.layers.embeddings.contextual_word_embedding import ContextualWordEmbedding
from elit.metrics.mtl import MetricDict
from elit.utils.log_util import cprint
from stem_cell_hypothesis import cdroot
def main():
cdroot()
scores: List[MetricDict] = []
for i in range(3):
tasks = {
'pos': TransformerTagging(
ONTONOTES5_POS_ENGLISH_TRAIN,
ONTONOTES5_POS_ENGLISH_DEV,
ONTONOTES5_POS_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
),
'srl': SpanRankingSemanticRoleLabeling(
ONTONOTES5_SRL_ENGLISH_TRAIN,
ONTONOTES5_SRL_ENGLISH_DEV,
ONTONOTES5_SRL_ENGLISH_TEST,
SortingSamplerBuilder(batch_size=64, batch_max_tokens=6400),
lr=1e-3,
doc_level_offset=True,
),
}
mtl = MultiTaskLearning()
save_dir = f'data/model/mtl/ontonotes_albert_base_pos_srl_en_{i}'
cprint(f'Model will be saved in [cyan]{save_dir}[/cyan]')
mtl.fit(
ContextualWordEmbedding(
'token',
'albert-base-v2',
average_subwords=True,
max_sequence_length=512,
word_dropout=.2,
),
tasks,
save_dir,
30,
lr=1e-3,
encoder_lr=5e-5,
grad_norm=1,
gradient_accumulation=4,
eval_trn=False,
transform=NormalizeToken(PTB_TOKEN_MAPPING, 'token'),
loss_balancer=MovingAverageBalancer(5, intrinsic_weighting=True),
)
cprint(f'Model saved in [cyan]{save_dir}[/cyan]')
mtl.load(save_dir)
if 'dep' in mtl.tasks:
mtl['dep'].config.tree = True
mtl['dep'].config.proj = True
mtl.save_config(save_dir)
for k, v in mtl.tasks.items():
v.trn = tasks[k].trn
v.dev = tasks[k].dev
v.tst = tasks[k].tst
metric = mtl.evaluate(save_dir)[0]
scores.append(metric)
print(f'{"-".join(tasks.keys())} {len(scores)} runs scores:')
for each in scores:
cprint(each.cstr())
if __name__ == '__main__':
import torch
| true | true |
f7f81915b09cd99f6bdee1fda40dc4ef276d8e35 | 2,032 | py | Python | tree/test_.py | technolingo/AlgoStructuresPy | 4ac95542dec48b61dab3a0f33c5bacbe5a3cb3f1 | [
"MIT"
] | null | null | null | tree/test_.py | technolingo/AlgoStructuresPy | 4ac95542dec48b61dab3a0f33c5bacbe5a3cb3f1 | [
"MIT"
] | null | null | null | tree/test_.py | technolingo/AlgoStructuresPy | 4ac95542dec48b61dab3a0f33c5bacbe5a3cb3f1 | [
"MIT"
] | null | null | null | import gc
import pytest
from .index import Tree, Node
class TestTree():
def setup_method(self):
self.node = Node('a')
self.tree = Tree()
def teardown_method(self):
''' tearDown doesn't seem to be able to delete lists created in
each test, thus resulting errors if each test is not run seperately.
will come back to this when a better solution is found.
'''
self.node.children = []
self.node = None
del self.node
gc.collect()
def test_node_properties(self):
assert self.node.data == 'a'
assert self.node.children == []
assert len(self.node.children) == 0
def test_node_add_children(self):
self.node.add('b')
assert len(self.node.children) == 1
assert self.node.children[0].data == 'b'
assert len(self.node.children[0].children) == 0
@pytest.mark.skip
def test_node_remove_children(self):
self.node.add('b')
self.node.add('b')
self.node.add('c')
assert len(self.node.children) == 3
self.node.remove('b')
assert len(self.node.children) == 1
assert self.node.children[0].data == 'c'
@pytest.mark.skip
def test_tree_properties(self):
self.assertIsNone(self.tree.root)
@pytest.mark.skip
def test_tree_traverse_breadth(self):
letters = []
self.tree.root = self.node
self.tree.root.add('b')
self.tree.root.add('c')
self.tree.root.children[0].add('d')
def f(node): letters.append(node.data)
self.tree.traverse_breadth(f)
assert letters == ['a', 'b', 'c', 'd']
@pytest.mark.skip
def test_tree_traverse_depth(self):
letters = []
self.tree.root = self.node
self.tree.root.add('b')
self.tree.root.add('d')
self.tree.root.children[0].add('c')
def f(node): letters.append(node.data)
self.tree.traverse_depth(f)
assert letters == ['a', 'b', 'c', 'd']
| 28.619718 | 80 | 0.584154 | import gc
import pytest
from .index import Tree, Node
class TestTree():
def setup_method(self):
self.node = Node('a')
self.tree = Tree()
def teardown_method(self):
self.node.children = []
self.node = None
del self.node
gc.collect()
def test_node_properties(self):
assert self.node.data == 'a'
assert self.node.children == []
assert len(self.node.children) == 0
def test_node_add_children(self):
self.node.add('b')
assert len(self.node.children) == 1
assert self.node.children[0].data == 'b'
assert len(self.node.children[0].children) == 0
@pytest.mark.skip
def test_node_remove_children(self):
self.node.add('b')
self.node.add('b')
self.node.add('c')
assert len(self.node.children) == 3
self.node.remove('b')
assert len(self.node.children) == 1
assert self.node.children[0].data == 'c'
@pytest.mark.skip
def test_tree_properties(self):
self.assertIsNone(self.tree.root)
@pytest.mark.skip
def test_tree_traverse_breadth(self):
letters = []
self.tree.root = self.node
self.tree.root.add('b')
self.tree.root.add('c')
self.tree.root.children[0].add('d')
def f(node): letters.append(node.data)
self.tree.traverse_breadth(f)
assert letters == ['a', 'b', 'c', 'd']
@pytest.mark.skip
def test_tree_traverse_depth(self):
letters = []
self.tree.root = self.node
self.tree.root.add('b')
self.tree.root.add('d')
self.tree.root.children[0].add('c')
def f(node): letters.append(node.data)
self.tree.traverse_depth(f)
assert letters == ['a', 'b', 'c', 'd']
| true | true |
f7f8199dd726f68f1029a8124bd906d3e5bddbb9 | 19,478 | py | Python | src/main/python/systemds/context/systemds_context.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/context/systemds_context.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | null | null | null | src/main/python/systemds/context/systemds_context.py | shiruke/systemds | cdd7b9ca15c3f17ec15045e85b107e26a4d7e7a7 | [
"Apache-2.0"
] | 1 | 2021-02-24T22:50:06.000Z | 2021-02-24T22:50:06.000Z | # -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
__all__ = ["SystemDSContext"]
import copy
import json
import os
import socket
import threading
import time
from glob import glob
from queue import Empty, Queue
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from py4j.java_gateway import GatewayParameters, JavaGateway
from py4j.protocol import Py4JNetworkError
from systemds.operator import Frame, Matrix, OperationNode, Scalar, Source
from systemds.script_building import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.helpers import get_module_dir
class SystemDSContext(object):
"""A context with a connection to a java instance with which SystemDS operations are executed.
The java process is started and is running using a random tcp port for instruction parsing."""
java_gateway: JavaGateway
def __init__(self, port: int = -1):
"""Starts a new instance of SystemDSContext, in which the connection to a JVM systemds instance is handled
Any new instance of this SystemDS Context, would start a separate new JVM.
Standard out and standard error form the JVM is also handled in this class, filling up Queues,
that can be read from to get the printed statements from the JVM.
"""
command = self.__build_startup_command()
process, port = self.__try_startup(command, port)
# Handle Std out from the subprocess.
self.__stdout = Queue()
self.__stderr = Queue()
self.__stdout_thread = Thread(target=self.__enqueue_output, args=(
process.stdout, self.__stdout), daemon=True)
self.__stderr_thread = Thread(target=self.__enqueue_output, args=(
process.stderr, self.__stderr), daemon=True)
self.__stdout_thread.start()
self.__stderr_thread.start()
# Py4j connect to the started process.
gwp = GatewayParameters(port=port, eager_load=True)
self.java_gateway = JavaGateway(
gateway_parameters=gwp, java_process=process)
def get_stdout(self, lines: int = -1):
"""Getter for the stdout of the java subprocess
The output is taken from the stdout queue and returned in a new list.
:param lines: The number of lines to try to read from the stdout queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stdout.qsize() < lines:
return [self.__stdout.get() for x in range(self.__stdout.qsize())]
else:
return [self.__stdout.get() for x in range(lines)]
def get_stderr(self, lines: int = -1):
"""Getter for the stderr of the java subprocess
The output is taken from the stderr queue and returned in a new list.
:param lines: The number of lines to try to read from the stderr queue.
default -1 prints all current lines in the queue.
"""
if lines == -1 or self.__stderr.qsize() < lines:
return [self.__stderr.get() for x in range(self.__stderr.qsize())]
else:
return [self.__stderr.get() for x in range(lines)]
def exception_and_close(self, e: Exception):
"""
Method for printing exception, printing stdout and error, while also closing the context correctly.
:param e: the exception thrown
"""
# e = sys.exc_info()[0]
message = "Exception Encountered! closing JVM\n"
message += "standard out :\n" + "\n".join(self.get_stdout())
message += "standard error :\n" + "\n".join(self.get_stdout())
message += "Exception : " + str(e)
self.close()
raise RuntimeError(message)
def __try_startup(self, command, port, rep=0):
""" Try to perform startup of system.
:param command: The command to execute for starting JMLC content
:param port: The port to try to connect to to.
:param rep: The number of repeated tries to startup the jvm.
"""
if port == -1:
assignedPort = self.__get_open_port()
elif rep == 0:
assignedPort = port
else:
assignedPort = self.__get_open_port()
fullCommand = []
fullCommand.extend(command)
fullCommand.append(str(assignedPort))
process = Popen(fullCommand, stdout=PIPE, stdin=PIPE, stderr=PIPE)
try:
self.__verify_startup(process)
return process, assignedPort
except Exception as e:
self.close()
if rep > 3:
raise Exception(
"Failed to start SystemDS context with " + str(rep) + " repeated tries")
else:
rep += 1
print("Failed to startup JVM process, retrying: " + str(rep))
sleep(0.5)
return self.__try_startup(command, port, rep)
def __verify_startup(self, process):
first_stdout = process.stdout.readline()
if(not b"GatewayServer Started" in first_stdout):
stderr = process.stderr.readline().decode("utf-8")
if(len(stderr) > 1):
raise Exception(
"Exception in startup of GatewayServer: " + stderr)
outputs = []
outputs.append(first_stdout.decode("utf-8"))
max_tries = 10
for i in range(max_tries):
next_line = process.stdout.readline()
if(b"GatewayServer Started" in next_line):
print("WARNING: Stdout corrupted by prints: " + str(outputs))
print("Startup success")
break
else:
outputs.append(next_line)
if (i == max_tries-1):
raise Exception("Error in startup of systemDS gateway process: \n gateway StdOut: " + str(
outputs) + " \n gateway StdErr" + process.stderr.readline().decode("utf-8"))
def __build_startup_command(self):
command = ["java", "-cp"]
root = os.environ.get("SYSTEMDS_ROOT")
if root == None:
# If there is no systemds install default to use the PIP packaged java files.
root = os.path.join(get_module_dir(), "systemds-java")
# nt means its Windows
cp_separator = ";" if os.name == "nt" else ":"
if os.environ.get("SYSTEMDS_ROOT") != None:
lib_cp = os.path.join(root, "target", "lib", "*")
systemds_cp = os.path.join(root, "target", "SystemDS.jar")
classpath = cp_separator.join([lib_cp, systemds_cp])
command.append(classpath)
files = glob(os.path.join(root, "conf", "log4j*.properties"))
if len(files) > 1:
print(
"WARNING: Multiple logging files found selecting: " + files[0])
if len(files) == 0:
print("WARNING: No log4j file found at: "
+ os.path.join(root, "conf")
+ " therefore using default settings")
else:
command.append("-Dlog4j.configuration=file:" + files[0])
else:
lib_cp = os.path.join(root, "lib", "*")
command.append(lib_cp)
command.append("org.apache.sysds.api.PythonDMLScript")
return command
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
# no errors to handle to allow continuation
return None
def close(self):
"""Close the connection to the java process and do necessary cleanup."""
if(self.__stdout_thread.is_alive()):
self.__stdout_thread.join(0)
if(self.__stdout_thread.is_alive()):
self.__stderr_thread.join(0)
pid = self.java_gateway.java_process.pid
if self.java_gateway.java_gateway_server is not None:
try:
self.java_gateway.shutdown(True)
except Py4JNetworkError as e:
if "Gateway is not connected" not in str(e):
self.java_gateway.java_process.kill()
os.kill(pid, 14)
def __enqueue_output(self, out, queue):
"""Method for handling the output from java.
It is locating the string handeling inside a different thread, since the 'out.readline' is a blocking command.
"""
for line in iter(out.readline, b""):
queue.put(line.decode("utf-8").strip())
def __get_open_port(self):
"""Get a random available port.
and hope that no other process steals it while we wait for the JVM to startup
"""
# https://stackoverflow.com/questions/2838244/get-open-tcp-port-in-python
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def full(self, shape: Tuple[int, int], value: Union[float, int]) -> 'Matrix':
"""Generates a matrix completely filled with a value
:param sds_context: SystemDS context
:param shape: shape (rows and cols) of the matrix TODO tensor
:param value: the value to fill all cells with
:return: the OperationNode representing this operation
"""
unnamed_input_nodes = [value]
named_input_nodes = {'rows': shape[0], 'cols': shape[1]}
return Matrix(self, 'matrix', unnamed_input_nodes, named_input_nodes)
def seq(self, start: Union[float, int], stop: Union[float, int] = None,
step: Union[float, int] = 1) -> 'Matrix':
"""Create a single column vector with values from `start` to `stop` and an increment of `step`.
If no stop is defined and only one parameter is given, then start will be 0 and the parameter will be interpreted as
stop.
:param sds_context: SystemDS context
:param start: the starting value
:param stop: the maximum value
:param step: the step size
:return: the OperationNode representing this operation
"""
if stop is None:
stop = start
start = 0
unnamed_input_nodes = [start, stop, step]
return Matrix(self, 'seq', unnamed_input_nodes)
def rand(self, rows: int, cols: int,
min: Union[float, int] = None, max: Union[float, int] = None, pdf: str = "uniform",
sparsity: Union[float, int] = None, seed: Union[float, int] = None,
lambd: Union[float, int] = 1) -> 'Matrix':
"""Generates a matrix filled with random values
:param sds_context: SystemDS context
:param rows: number of rows
:param cols: number of cols
:param min: min value for cells
:param max: max value for cells
:param pdf: "uniform"/"normal"/"poison" distribution
:param sparsity: fraction of non-zero cells
:param seed: random seed
:param lambd: lamda value for "poison" distribution
:return:
"""
available_pdfs = ["uniform", "normal", "poisson"]
if rows < 0:
raise ValueError("In rand statement, can only assign rows a long (integer) value >= 0 "
"-- attempted to assign value: {r}".format(r=rows))
if cols < 0:
raise ValueError("In rand statement, can only assign cols a long (integer) value >= 0 "
"-- attempted to assign value: {c}".format(c=cols))
if pdf not in available_pdfs:
raise ValueError("The pdf passed is invalid! given: {g}, expected: {e}".format(
g=pdf, e=available_pdfs))
pdf = '\"' + pdf + '\"'
named_input_nodes = {
'rows': rows, 'cols': cols, 'pdf': pdf, 'lambda': lambd}
if min is not None:
named_input_nodes['min'] = min
if max is not None:
named_input_nodes['max'] = max
if sparsity is not None:
named_input_nodes['sparsity'] = sparsity
if seed is not None:
named_input_nodes['seed'] = seed
return Matrix(self, 'rand', [], named_input_nodes=named_input_nodes)
def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
""" Read an file from disk. Supportted types include:
CSV, Matrix Market(coordinate), Text(i,j,v), SystemDS Binay
See: http://apache.github.io/systemds/site/dml-language-reference#readwrite-built-in-functions for more details
:return: an Operation Node, containing the read data.
"""
mdt_filepath = path + ".mtd"
if os.path.exists(mdt_filepath):
with open(mdt_filepath) as jspec_file:
mtd = json.load(jspec_file)
kwargs["data_type"] = mtd["data_type"]
data_type = kwargs.get("data_type", None)
file_format = kwargs.get("format", None)
if data_type == "matrix":
kwargs["data_type"] = f'"{data_type}"'
return Matrix(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "frame":
kwargs["data_type"] = f'"{data_type}"'
if isinstance(file_format, str):
kwargs["format"] = f'"{kwargs["format"]}"'
return Frame(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "scalar":
kwargs["data_type"] = f'"{data_type}"'
output_type = OutputType.from_str(kwargs.get("value_type", None))
kwargs["value_type"] = f'"{output_type.name}"'
return Scalar(self, "read", [f'"{path}"'], named_input_nodes=kwargs, output_type=output_type)
print("WARNING: Unknown type read please add a mtd file, or specify in arguments")
return OperationNode(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'Scalar':
""" Construct an scalar value, this can contain str, float, double, integers and booleans.
:return: An `OperationNode` containing the scalar value.
"""
if type(v) is str:
if not ((v[0] == '"' and v[-1] == '"') or (v[0] == "'" and v[-1] == "'")):
v = f'"{v}"'
# output type assign simply assigns the given variable to the value
# therefore the output type is assign.
return Scalar(self, v, assign=True, output_type=OutputType.from_str(v))
def from_numpy(self, mat: np.array,
*args: Sequence[VALID_INPUT_TYPES],
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Generate DAGNode representing matrix with data given by a numpy array, which will be sent to SystemDS
on need.
:param mat: the numpy array
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ['\'./tmp/{file_name}\'']
if len(mat.shape) == 2:
named_params = {'rows': mat.shape[0], 'cols': mat.shape[1]}
elif len(mat.shape) == 1:
named_params = {'rows': mat.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params.update(kwargs)
return Matrix(self, 'read', unnamed_params, named_params, local_data=mat)
def from_pandas(self, df: pd.DataFrame,
*args: Sequence[VALID_INPUT_TYPES], **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Frame:
"""Generate DAGNode representing frame with data given by a pandas dataframe, which will be sent to SystemDS
on need.
:param df: the pandas dataframe
:param args: unnamed parameters
:param kwargs: named parameters
"""
unnamed_params = ["'./tmp/{file_name}'"]
if len(df.shape) == 2:
named_params = {'rows': df.shape[0], 'cols': df.shape[1]}
elif len(df.shape) == 1:
named_params = {'rows': df.shape[0], 'cols': 1}
else:
# TODO Support tensors.
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params["data_type"] = '"frame"'
self._pd_dataframe = df
named_params.update(kwargs)
return Frame(self, "read", unnamed_params, named_params, local_data=df)
def federated(self, addresses: Iterable[str],
ranges: Iterable[Tuple[Iterable[int], Iterable[int]]], *args,
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
"""Create federated matrix object.
:param sds_context: the SystemDS context
:param addresses: addresses of the federated workers
:param ranges: for each federated worker a pair of begin and end index of their held matrix
:param args: unnamed params
:param kwargs: named params
:return: the OperationNode representing this operation
"""
addresses_str = 'list(' + \
','.join(map(lambda s: f'"{s}"', addresses)) + ')'
ranges_str = 'list('
for begin, end in ranges:
ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))}),'
ranges_str = ranges_str[:-1]
ranges_str += ')'
named_params = {'addresses': addresses_str, 'ranges': ranges_str}
named_params.update(kwargs)
return Matrix(self, 'federated', args, named_params)
def source(self, path: str, name: str, print_imported_methods: bool = False):
"""Import methods from a given dml file.
The importing is done thorugh the DML command source, and adds all defined methods from
the script to the Source object returned in python. This gives the flexibility to call the methods
directly on the object returned.
In systemds a method called func_01 can then be imported using
```python
res = self.sds.source("PATH_TO_FILE", "UNIQUE_NAME").func_01().compute(verbose = True)
```
:param path: The absolute or relative path to the file to import
:param name: The name to give the imported file in the script, this name must be unique
:param print_imported_methods: boolean specifying if the imported methods should be printed.
"""
return Source(self, path, name, print_imported_methods)
| 42.251627 | 124 | 0.607352 |
__all__ = ["SystemDSContext"]
import copy
import json
import os
import socket
import threading
import time
from glob import glob
from queue import Empty, Queue
from subprocess import PIPE, Popen
from threading import Thread
from time import sleep
from typing import Dict, Iterable, Sequence, Tuple, Union
import numpy as np
import pandas as pd
from py4j.java_gateway import GatewayParameters, JavaGateway
from py4j.protocol import Py4JNetworkError
from systemds.operator import Frame, Matrix, OperationNode, Scalar, Source
from systemds.script_building import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
from systemds.utils.helpers import get_module_dir
class SystemDSContext(object):
java_gateway: JavaGateway
def __init__(self, port: int = -1):
command = self.__build_startup_command()
process, port = self.__try_startup(command, port)
self.__stdout = Queue()
self.__stderr = Queue()
self.__stdout_thread = Thread(target=self.__enqueue_output, args=(
process.stdout, self.__stdout), daemon=True)
self.__stderr_thread = Thread(target=self.__enqueue_output, args=(
process.stderr, self.__stderr), daemon=True)
self.__stdout_thread.start()
self.__stderr_thread.start()
gwp = GatewayParameters(port=port, eager_load=True)
self.java_gateway = JavaGateway(
gateway_parameters=gwp, java_process=process)
def get_stdout(self, lines: int = -1):
if lines == -1 or self.__stdout.qsize() < lines:
return [self.__stdout.get() for x in range(self.__stdout.qsize())]
else:
return [self.__stdout.get() for x in range(lines)]
def get_stderr(self, lines: int = -1):
if lines == -1 or self.__stderr.qsize() < lines:
return [self.__stderr.get() for x in range(self.__stderr.qsize())]
else:
return [self.__stderr.get() for x in range(lines)]
def exception_and_close(self, e: Exception):
message = "Exception Encountered! closing JVM\n"
message += "standard out :\n" + "\n".join(self.get_stdout())
message += "standard error :\n" + "\n".join(self.get_stdout())
message += "Exception : " + str(e)
self.close()
raise RuntimeError(message)
def __try_startup(self, command, port, rep=0):
if port == -1:
assignedPort = self.__get_open_port()
elif rep == 0:
assignedPort = port
else:
assignedPort = self.__get_open_port()
fullCommand = []
fullCommand.extend(command)
fullCommand.append(str(assignedPort))
process = Popen(fullCommand, stdout=PIPE, stdin=PIPE, stderr=PIPE)
try:
self.__verify_startup(process)
return process, assignedPort
except Exception as e:
self.close()
if rep > 3:
raise Exception(
"Failed to start SystemDS context with " + str(rep) + " repeated tries")
else:
rep += 1
print("Failed to startup JVM process, retrying: " + str(rep))
sleep(0.5)
return self.__try_startup(command, port, rep)
def __verify_startup(self, process):
first_stdout = process.stdout.readline()
if(not b"GatewayServer Started" in first_stdout):
stderr = process.stderr.readline().decode("utf-8")
if(len(stderr) > 1):
raise Exception(
"Exception in startup of GatewayServer: " + stderr)
outputs = []
outputs.append(first_stdout.decode("utf-8"))
max_tries = 10
for i in range(max_tries):
next_line = process.stdout.readline()
if(b"GatewayServer Started" in next_line):
print("WARNING: Stdout corrupted by prints: " + str(outputs))
print("Startup success")
break
else:
outputs.append(next_line)
if (i == max_tries-1):
raise Exception("Error in startup of systemDS gateway process: \n gateway StdOut: " + str(
outputs) + " \n gateway StdErr" + process.stderr.readline().decode("utf-8"))
def __build_startup_command(self):
command = ["java", "-cp"]
root = os.environ.get("SYSTEMDS_ROOT")
if root == None:
root = os.path.join(get_module_dir(), "systemds-java")
cp_separator = ";" if os.name == "nt" else ":"
if os.environ.get("SYSTEMDS_ROOT") != None:
lib_cp = os.path.join(root, "target", "lib", "*")
systemds_cp = os.path.join(root, "target", "SystemDS.jar")
classpath = cp_separator.join([lib_cp, systemds_cp])
command.append(classpath)
files = glob(os.path.join(root, "conf", "log4j*.properties"))
if len(files) > 1:
print(
"WARNING: Multiple logging files found selecting: " + files[0])
if len(files) == 0:
print("WARNING: No log4j file found at: "
+ os.path.join(root, "conf")
+ " therefore using default settings")
else:
command.append("-Dlog4j.configuration=file:" + files[0])
else:
lib_cp = os.path.join(root, "lib", "*")
command.append(lib_cp)
command.append("org.apache.sysds.api.PythonDMLScript")
return command
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.close()
return None
def close(self):
if(self.__stdout_thread.is_alive()):
self.__stdout_thread.join(0)
if(self.__stdout_thread.is_alive()):
self.__stderr_thread.join(0)
pid = self.java_gateway.java_process.pid
if self.java_gateway.java_gateway_server is not None:
try:
self.java_gateway.shutdown(True)
except Py4JNetworkError as e:
if "Gateway is not connected" not in str(e):
self.java_gateway.java_process.kill()
os.kill(pid, 14)
def __enqueue_output(self, out, queue):
for line in iter(out.readline, b""):
queue.put(line.decode("utf-8").strip())
def __get_open_port(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
s.listen(1)
port = s.getsockname()[1]
s.close()
return port
def full(self, shape: Tuple[int, int], value: Union[float, int]) -> 'Matrix':
unnamed_input_nodes = [value]
named_input_nodes = {'rows': shape[0], 'cols': shape[1]}
return Matrix(self, 'matrix', unnamed_input_nodes, named_input_nodes)
def seq(self, start: Union[float, int], stop: Union[float, int] = None,
step: Union[float, int] = 1) -> 'Matrix':
if stop is None:
stop = start
start = 0
unnamed_input_nodes = [start, stop, step]
return Matrix(self, 'seq', unnamed_input_nodes)
def rand(self, rows: int, cols: int,
min: Union[float, int] = None, max: Union[float, int] = None, pdf: str = "uniform",
sparsity: Union[float, int] = None, seed: Union[float, int] = None,
lambd: Union[float, int] = 1) -> 'Matrix':
available_pdfs = ["uniform", "normal", "poisson"]
if rows < 0:
raise ValueError("In rand statement, can only assign rows a long (integer) value >= 0 "
"-- attempted to assign value: {r}".format(r=rows))
if cols < 0:
raise ValueError("In rand statement, can only assign cols a long (integer) value >= 0 "
"-- attempted to assign value: {c}".format(c=cols))
if pdf not in available_pdfs:
raise ValueError("The pdf passed is invalid! given: {g}, expected: {e}".format(
g=pdf, e=available_pdfs))
pdf = '\"' + pdf + '\"'
named_input_nodes = {
'rows': rows, 'cols': cols, 'pdf': pdf, 'lambda': lambd}
if min is not None:
named_input_nodes['min'] = min
if max is not None:
named_input_nodes['max'] = max
if sparsity is not None:
named_input_nodes['sparsity'] = sparsity
if seed is not None:
named_input_nodes['seed'] = seed
return Matrix(self, 'rand', [], named_input_nodes=named_input_nodes)
def read(self, path: os.PathLike, **kwargs: Dict[str, VALID_INPUT_TYPES]) -> OperationNode:
mdt_filepath = path + ".mtd"
if os.path.exists(mdt_filepath):
with open(mdt_filepath) as jspec_file:
mtd = json.load(jspec_file)
kwargs["data_type"] = mtd["data_type"]
data_type = kwargs.get("data_type", None)
file_format = kwargs.get("format", None)
if data_type == "matrix":
kwargs["data_type"] = f'"{data_type}"'
return Matrix(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "frame":
kwargs["data_type"] = f'"{data_type}"'
if isinstance(file_format, str):
kwargs["format"] = f'"{kwargs["format"]}"'
return Frame(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
elif data_type == "scalar":
kwargs["data_type"] = f'"{data_type}"'
output_type = OutputType.from_str(kwargs.get("value_type", None))
kwargs["value_type"] = f'"{output_type.name}"'
return Scalar(self, "read", [f'"{path}"'], named_input_nodes=kwargs, output_type=output_type)
print("WARNING: Unknown type read please add a mtd file, or specify in arguments")
return OperationNode(self, "read", [f'"{path}"'], named_input_nodes=kwargs)
def scalar(self, v: Dict[str, VALID_INPUT_TYPES]) -> 'Scalar':
if type(v) is str:
if not ((v[0] == '"' and v[-1] == '"') or (v[0] == "'" and v[-1] == "'")):
v = f'"{v}"'
return Scalar(self, v, assign=True, output_type=OutputType.from_str(v))
def from_numpy(self, mat: np.array,
*args: Sequence[VALID_INPUT_TYPES],
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
unnamed_params = ['\'./tmp/{file_name}\'']
if len(mat.shape) == 2:
named_params = {'rows': mat.shape[0], 'cols': mat.shape[1]}
elif len(mat.shape) == 1:
named_params = {'rows': mat.shape[0], 'cols': 1}
else:
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params.update(kwargs)
return Matrix(self, 'read', unnamed_params, named_params, local_data=mat)
def from_pandas(self, df: pd.DataFrame,
*args: Sequence[VALID_INPUT_TYPES], **kwargs: Dict[str, VALID_INPUT_TYPES]) -> Frame:
unnamed_params = ["'./tmp/{file_name}'"]
if len(df.shape) == 2:
named_params = {'rows': df.shape[0], 'cols': df.shape[1]}
elif len(df.shape) == 1:
named_params = {'rows': df.shape[0], 'cols': 1}
else:
raise ValueError("Only two dimensional arrays supported")
unnamed_params.extend(args)
named_params["data_type"] = '"frame"'
self._pd_dataframe = df
named_params.update(kwargs)
return Frame(self, "read", unnamed_params, named_params, local_data=df)
def federated(self, addresses: Iterable[str],
ranges: Iterable[Tuple[Iterable[int], Iterable[int]]], *args,
**kwargs: Dict[str, VALID_INPUT_TYPES]) -> Matrix:
addresses_str = 'list(' + \
','.join(map(lambda s: f'"{s}"', addresses)) + ')'
ranges_str = 'list('
for begin, end in ranges:
ranges_str += f'list({",".join(map(str, begin))}), list({",".join(map(str, end))}),'
ranges_str = ranges_str[:-1]
ranges_str += ')'
named_params = {'addresses': addresses_str, 'ranges': ranges_str}
named_params.update(kwargs)
return Matrix(self, 'federated', args, named_params)
def source(self, path: str, name: str, print_imported_methods: bool = False):
return Source(self, path, name, print_imported_methods)
| true | true |
f7f81a7d72172441ce2851b5c74d2ec312c4106e | 230 | py | Python | code/python/echomesh/util/Call.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 30 | 2015-02-18T14:07:00.000Z | 2021-12-11T15:19:01.000Z | code/python/echomesh/util/Call.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 16 | 2015-01-01T23:17:24.000Z | 2015-04-18T23:49:27.000Z | code/python/echomesh/util/Call.py | rec/echomesh | be668971a687b141660fd2e5635d2fd598992a01 | [
"MIT"
] | 31 | 2015-03-11T20:04:07.000Z | 2020-11-02T13:56:59.000Z | from __future__ import absolute_import, division, print_function, unicode_literals
import six
def call(f):
return f() if six.callable(f) else f
def call_recursive(f):
while six.callable(f):
f = f()
return f
| 19.166667 | 82 | 0.695652 | from __future__ import absolute_import, division, print_function, unicode_literals
import six
def call(f):
return f() if six.callable(f) else f
def call_recursive(f):
while six.callable(f):
f = f()
return f
| true | true |
f7f81b93166963ec5549a4f9cfa1d386990afbc9 | 4,309 | py | Python | ai4good/webapp/tests/dev_web_tests_v2.py | AIforGoodSimulator/model-server | 17662dc2a18b3e12e5c8ee51731e288088a51cf5 | [
"MIT"
] | 11 | 2020-06-12T11:46:26.000Z | 2022-02-09T23:10:34.000Z | ai4good/webapp/tests/dev_web_tests_v2.py | AIforGoodSimulator/model-server | 17662dc2a18b3e12e5c8ee51731e288088a51cf5 | [
"MIT"
] | 54 | 2020-07-17T10:13:33.000Z | 2021-03-08T16:38:29.000Z | ai4good/webapp/tests/dev_web_tests_v2.py | AIforGoodSimulator/model-server | 17662dc2a18b3e12e5c8ee51731e288088a51cf5 | [
"MIT"
] | 10 | 2020-07-03T16:12:50.000Z | 2021-02-06T15:51:16.000Z | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import re
import argparse
import sys
# Get command line parameters to pass to Zalenium
parser = argparse.ArgumentParser()
parser.add_argument('--zaluser', default='none')
parser.add_argument('--zalpassword', default='none')
parser.add_argument('--zalhost', default='none')
args = parser.parse_args()
driver = webdriver.Remote(
command_executor='http://'+args.zaluser+':'+args.zalpassword+'@'+args.zalhost+'/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME)
home_url = "http://ai4good-dev.azurewebsites.net/sim/run_model"
run_model_url = home_url + "sim/run_model"
validate_model_url = home_url + "sim/validate_model"
driver.get(home_url)
# test code
driver.set_window_size(1920, 1029)
assert driver.title in ("AI4Good COVID-19 Model Server authentication", "Updating...")
driver.find_element(By.ID, "login-email").click()
driver.find_element(By.ID, "login-email").send_keys("test@test.com")
driver.find_element(By.ID, "login-password").send_keys("test123")
driver.find_element(By.ID, "login-submit-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.ID, "landing-button").click()
driver.execute_script("window.scrollTo(0,0)")
assert driver.title in ("AI4Good COVID-19 Model Server", "Updating...")
driver.find_element(By.ID, "name-camp").click()
driver.find_element(By.ID, "name-camp").send_keys("Moira")
driver.find_element(By.ID, "location").click()
driver.find_element(By.ID, "location").send_keys("Some region")
driver.find_element(By.ID, "total-area").click()
driver.find_element(By.ID, "total-area").send_keys("100000")
driver.find_element(By.ID, "page-1-button").click()
driver.find_element(By.CSS_SELECTOR, ".justify-content-center").click()
driver.find_element(By.ID, "accommodation-area-type1").click()
driver.find_element(By.ID, "accommodation-area-type1").send_keys("10000")
driver.find_element(By.ID, "accommodation-no-unit-type1").click()
driver.find_element(By.ID, "accommodation-no-unit-type1").send_keys("20")
driver.find_element(By.ID, "accommodation-no-person-type1").click()
driver.find_element(By.ID, "accommodation-no-person-type1").send_keys("5000")
driver.find_element(By.ID, "page-2-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.ID, "available-ICU-beds").click()
driver.find_element(By.ID, "available-ICU-beds").send_keys("100")
driver.find_element(By.CSS_SELECTOR, "#remove-high-risk-off-site > .custom-radio:nth-child(3) > .custom-control-label").click()
driver.find_element(By.ID, "isolation-centre-capacity").click()
driver.find_element(By.ID, "isolation-centre-capacity").send_keys("300")
driver.find_element(By.CSS_SELECTOR, "#community-shielding > .custom-radio:nth-child(1) > .custom-control-label").click()
driver.find_element(By.CSS_SELECTOR, "#community-surveillance-program > .custom-radio:nth-child(2) > .custom-control-label").click()
driver.find_element(By.ID, "page-3-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.CSS_SELECTOR, "#radio-intervene-social > .custom-radio:nth-child(3) > .custom-control-label").click()
driver.find_element(By.CSS_SELECTOR, "#tabs-health-intervent > .nav-item:nth-child(2)").click()
driver.find_element(By.CSS_SELECTOR, "#slider-health-intervent > .rc-slider").click()
driver.find_element(By.ID, "activity-no-place-admin").click()
driver.find_element(By.ID, "activity-no-place-admin").send_keys("5")
driver.find_element(By.ID, "activity-no-person-admin").click()
driver.find_element(By.ID, "activity-no-person-admin").click()
driver.find_element(By.ID, "activity-no-person-admin").send_keys("100")
driver.find_element(By.ID, "activity-no-visit-admin").click()
driver.find_element(By.ID, "activity-no-visit-admin").send_keys("200")
driver.find_element(By.ID, "page-4-button").click()
driver.execute_script("window.scrollTo(0,0)")
assert r"Please wait for the simulation to complete" in driver.page_source
driver.close()
| 53.197531 | 132 | 0.773265 | from selenium import webdriver
from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChains
from selenium.webdriver.support import expected_conditions
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.keys import Keys
import re
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('--zaluser', default='none')
parser.add_argument('--zalpassword', default='none')
parser.add_argument('--zalhost', default='none')
args = parser.parse_args()
driver = webdriver.Remote(
command_executor='http://'+args.zaluser+':'+args.zalpassword+'@'+args.zalhost+'/wd/hub',
desired_capabilities=DesiredCapabilities.CHROME)
home_url = "http://ai4good-dev.azurewebsites.net/sim/run_model"
run_model_url = home_url + "sim/run_model"
validate_model_url = home_url + "sim/validate_model"
driver.get(home_url)
driver.set_window_size(1920, 1029)
assert driver.title in ("AI4Good COVID-19 Model Server authentication", "Updating...")
driver.find_element(By.ID, "login-email").click()
driver.find_element(By.ID, "login-email").send_keys("test@test.com")
driver.find_element(By.ID, "login-password").send_keys("test123")
driver.find_element(By.ID, "login-submit-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.ID, "landing-button").click()
driver.execute_script("window.scrollTo(0,0)")
assert driver.title in ("AI4Good COVID-19 Model Server", "Updating...")
driver.find_element(By.ID, "name-camp").click()
driver.find_element(By.ID, "name-camp").send_keys("Moira")
driver.find_element(By.ID, "location").click()
driver.find_element(By.ID, "location").send_keys("Some region")
driver.find_element(By.ID, "total-area").click()
driver.find_element(By.ID, "total-area").send_keys("100000")
driver.find_element(By.ID, "page-1-button").click()
driver.find_element(By.CSS_SELECTOR, ".justify-content-center").click()
driver.find_element(By.ID, "accommodation-area-type1").click()
driver.find_element(By.ID, "accommodation-area-type1").send_keys("10000")
driver.find_element(By.ID, "accommodation-no-unit-type1").click()
driver.find_element(By.ID, "accommodation-no-unit-type1").send_keys("20")
driver.find_element(By.ID, "accommodation-no-person-type1").click()
driver.find_element(By.ID, "accommodation-no-person-type1").send_keys("5000")
driver.find_element(By.ID, "page-2-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.ID, "available-ICU-beds").click()
driver.find_element(By.ID, "available-ICU-beds").send_keys("100")
driver.find_element(By.CSS_SELECTOR, "#remove-high-risk-off-site > .custom-radio:nth-child(3) > .custom-control-label").click()
driver.find_element(By.ID, "isolation-centre-capacity").click()
driver.find_element(By.ID, "isolation-centre-capacity").send_keys("300")
driver.find_element(By.CSS_SELECTOR, "#community-shielding > .custom-radio:nth-child(1) > .custom-control-label").click()
driver.find_element(By.CSS_SELECTOR, "#community-surveillance-program > .custom-radio:nth-child(2) > .custom-control-label").click()
driver.find_element(By.ID, "page-3-button").click()
driver.execute_script("window.scrollTo(0,0)")
driver.find_element(By.CSS_SELECTOR, "#radio-intervene-social > .custom-radio:nth-child(3) > .custom-control-label").click()
driver.find_element(By.CSS_SELECTOR, "#tabs-health-intervent > .nav-item:nth-child(2)").click()
driver.find_element(By.CSS_SELECTOR, "#slider-health-intervent > .rc-slider").click()
driver.find_element(By.ID, "activity-no-place-admin").click()
driver.find_element(By.ID, "activity-no-place-admin").send_keys("5")
driver.find_element(By.ID, "activity-no-person-admin").click()
driver.find_element(By.ID, "activity-no-person-admin").click()
driver.find_element(By.ID, "activity-no-person-admin").send_keys("100")
driver.find_element(By.ID, "activity-no-visit-admin").click()
driver.find_element(By.ID, "activity-no-visit-admin").send_keys("200")
driver.find_element(By.ID, "page-4-button").click()
driver.execute_script("window.scrollTo(0,0)")
assert r"Please wait for the simulation to complete" in driver.page_source
driver.close()
| true | true |
f7f81c8164baa526a6efe106e0f1fea736cb09bb | 1,543 | py | Python | nginx-parser/app.py | fly304625/nginx-geo-metricsreporter | e6d66da02e9677f090d2c1b2a059e7509d638dc7 | [
"MIT"
] | 16 | 2020-08-02T07:59:14.000Z | 2022-03-19T18:12:35.000Z | nginx-parser/app.py | fly304625/nginx-geo-metricsreporter | e6d66da02e9677f090d2c1b2a059e7509d638dc7 | [
"MIT"
] | null | null | null | nginx-parser/app.py | fly304625/nginx-geo-metricsreporter | e6d66da02e9677f090d2c1b2a059e7509d638dc7 | [
"MIT"
] | 4 | 2021-06-01T10:42:48.000Z | 2022-03-19T18:11:53.000Z | import os
import sys
import re
import requests
import json
import time
import logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
URL = "http://localhost:8080"
INPUT_LOG = "/var/log/nginx/access.log"
INTERVAL=5
LOGREGFORMAT ='(?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - \[(?P<dateandtime>\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|\-)\d{4})\] ((\"(GET|POST) )(?P<url>.+)(http\/1\.1")) (?P<statuscode>\d{3}) (?P<bytessent>\d+) (["](?P<refferer>(\-)|(.+))["]) (["](?P<useragent>.+)["]) (["](?P<host>.+)["])'
LOGREGFORMAT = re.compile(LOGREGFORMAT, re.IGNORECASE)
def readFile():
with open(INPUT_LOG, "r") as f:
SMRF1 = f.readlines()
return SMRF1
def send(json_data,URL):
headers = {'content-type': 'application/json'}
requests.post(URL, data=json.dumps(json_data),headers=headers)
initial = readFile()
root.debug("parser started")
while True:
current = readFile()
if initial != current:
for line in current:
if line not in initial:
data = re.search(LOGREGFORMAT, line)
try:
datadict = data.groupdict()
except:
root.debug("Invalid log format (None type)")
send(datadict,URL)
root.debug(datadict)
initial = current
time.sleep(int(INTERVAL))
| 31.489796 | 306 | 0.600778 | import os
import sys
import re
import requests
import json
import time
import logging
root = logging.getLogger()
root.setLevel(logging.DEBUG)
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(message)s')
handler.setFormatter(formatter)
root.addHandler(handler)
URL = "http://localhost:8080"
INPUT_LOG = "/var/log/nginx/access.log"
INTERVAL=5
LOGREGFORMAT ='(?P<ipaddress>\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}) - - \[(?P<dateandtime>\d{2}\/[a-z]{3}\/\d{4}:\d{2}:\d{2}:\d{2} (\+|\-)\d{4})\] ((\"(GET|POST) )(?P<url>.+)(http\/1\.1")) (?P<statuscode>\d{3}) (?P<bytessent>\d+) (["](?P<refferer>(\-)|(.+))["]) (["](?P<useragent>.+)["]) (["](?P<host>.+)["])'
LOGREGFORMAT = re.compile(LOGREGFORMAT, re.IGNORECASE)
def readFile():
with open(INPUT_LOG, "r") as f:
SMRF1 = f.readlines()
return SMRF1
def send(json_data,URL):
headers = {'content-type': 'application/json'}
requests.post(URL, data=json.dumps(json_data),headers=headers)
initial = readFile()
root.debug("parser started")
while True:
current = readFile()
if initial != current:
for line in current:
if line not in initial:
data = re.search(LOGREGFORMAT, line)
try:
datadict = data.groupdict()
except:
root.debug("Invalid log format (None type)")
send(datadict,URL)
root.debug(datadict)
initial = current
time.sleep(int(INTERVAL))
| true | true |
f7f81d8cd61fe619520c40f6ecbd1e6c384b3f00 | 1,846 | py | Python | python_legacy/others_src/pgu/examples/layout1.py | garred/only_fighters | 55c3cc06884f3226d75a800e1ee79afd2c23aa8b | [
"CC-BY-3.0",
"CC0-1.0"
] | null | null | null | python_legacy/others_src/pgu/examples/layout1.py | garred/only_fighters | 55c3cc06884f3226d75a800e1ee79afd2c23aa8b | [
"CC-BY-3.0",
"CC0-1.0"
] | null | null | null | python_legacy/others_src/pgu/examples/layout1.py | garred/only_fighters | 55c3cc06884f3226d75a800e1ee79afd2c23aa8b | [
"CC-BY-3.0",
"CC0-1.0"
] | null | null | null | """<title>an example of layout usage</title>"""
import pygame
from pygame.locals import *
# the following line is not needed if pgu is installed
import sys; sys.path.insert(0, "..")
from pgu import layout
pygame.font.init()
screen = pygame.display.set_mode((320,320),SWSURFACE)
bg = (255,255,255)
fg = (0,0,0)
screen.fill(bg)
class Obj: pass
l = layout.Layout(pygame.Rect(0,0,320,320))
e = Obj()
e.image = pygame.image.load("cuzco.png")
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
e.align = 1
l.add(e) #aligned object
font = pygame.font.SysFont("default", 24)
w,h = font.size(" ")
l.add(-1) #start of new block
for word in """Welcome to my little demo of the layout module. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e) #inline object
l.add((w,h)) #space
l.add((0,h)) #br
##The layout object will layout words, and document elements for you
##::
l.add(-1) #start of new block
for word in """The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e) #inline object
l.add((w,h)) #space
##
l.resize()
for e in l.widgets:
screen.blit(e.image,(e.rect.x,e.rect.y))
pygame.display.flip()
_quit = 0
while not _quit:
for e in pygame.event.get():
if e.type is QUIT: _quit = 1
pygame.time.wait(10)
| 28.84375 | 271 | 0.66468 | import pygame
from pygame.locals import *
import sys; sys.path.insert(0, "..")
from pgu import layout
pygame.font.init()
screen = pygame.display.set_mode((320,320),SWSURFACE)
bg = (255,255,255)
fg = (0,0,0)
screen.fill(bg)
class Obj: pass
l = layout.Layout(pygame.Rect(0,0,320,320))
e = Obj()
e.image = pygame.image.load("cuzco.png")
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
e.align = 1
l.add(e)
font = pygame.font.SysFont("default", 24)
w,h = font.size(" ")
l.add(-1)
for word in """Welcome to my little demo of the layout module. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e)
l.add((w,h))
l.add((0,h))
sure you will be very impressed by it. blah blah blah. The demo does not do a whole lot, but I'm sure you will be very impressed by it. blah blah blah.""".split(" "):
e = Obj()
e.image = font.render(word,1,fg)
e.rect = pygame.Rect(0,0,e.image.get_width(),e.image.get_height())
l.add(e)
l.add((w,h))
l.resize()
for e in l.widgets:
screen.blit(e.image,(e.rect.x,e.rect.y))
pygame.display.flip()
_quit = 0
while not _quit:
for e in pygame.event.get():
if e.type is QUIT: _quit = 1
pygame.time.wait(10)
| true | true |
f7f81f4af340c307828c508fe8aa2aab7be33787 | 1,684 | py | Python | zarc/models_2017-08-13-03:28:46.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-03T16:20:35.000Z | 2019-06-03T16:20:35.000Z | zarc/models_2017-08-13-03:28:46.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 20 | 2020-01-28T22:02:29.000Z | 2022-03-29T22:28:34.000Z | zarc/models_2017-08-13-03:28:46.py | nyimbi/caseke | ce4a0fa44cd383bc23900e42f81656f089c8fdd9 | [
"MIT"
] | 1 | 2019-06-10T17:20:48.000Z | 2019-06-10T17:20:48.000Z | # coding: utf-8
# AUTOGENERATED BY gen_script.sh from kp4.py
# Copyright (C) Nyimbi Odero, Sun Aug 13 03:28:10 EAT 2017
from sqlalchemy import func
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn, UserExtensionMixin
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import ImageManager
from sqlalchemy_utils import aggregated, force_auto_coercion
from sqlalchemy.orm import relationship, query, defer, deferred
# IMPORT Postgresql Specific Types
from sqlalchemy.dialects.postgresql import (
ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE,
DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER,
INTERVAL, JSON, JSONB, MACADDR, NUMERIC, OID, REAL, SMALLINT, TEXT,
TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE,
DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR )
from sqlalchemy.dialects.postgresql import aggregate_order_by
from sqlalchemy import (Column, Integer, String, ForeignKey,
Sequence, Float, Text, BigInteger, Date,
DateTime, Time, Boolean, Index, CheckConstraint,
UniqueConstraint,ForeignKeyConstraint, Numeric, LargeBinary , Table)
from datetime import timedelta, datetime, date
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql import func
from .mixins import *
# Here is how to extend the User model
#class UserExtended(Model, UserExtensionMixin):
# contact_group_id = Column(Integer, ForeignKey('contact_group.id'), nullable=True)
# contact_group = relationship('ContactGroup')
# UTILITY CLASSES
import arrow, enum
import enum
# Initialize sqlalchemy_utils
#force_auto_coercion()
| 39.162791 | 99 | 0.787411 |
from sqlalchemy import func
from flask_appbuilder import Model
from flask_appbuilder.models.mixins import AuditMixin, FileColumn, ImageColumn, UserExtensionMixin
from flask_appbuilder.models.decorators import renders
from flask_appbuilder.filemanager import ImageManager
from sqlalchemy_utils import aggregated, force_auto_coercion
from sqlalchemy.orm import relationship, query, defer, deferred
from sqlalchemy.dialects.postgresql import (
ARRAY, BIGINT, BIT, BOOLEAN, BYTEA, CHAR, CIDR, DATE,
DOUBLE_PRECISION, ENUM, FLOAT, HSTORE, INET, INTEGER,
INTERVAL, JSON, JSONB, MACADDR, NUMERIC, OID, REAL, SMALLINT, TEXT,
TIME, TIMESTAMP, UUID, VARCHAR, INT4RANGE, INT8RANGE, NUMRANGE,
DATERANGE, TSRANGE, TSTZRANGE, TSVECTOR )
from sqlalchemy.dialects.postgresql import aggregate_order_by
from sqlalchemy import (Column, Integer, String, ForeignKey,
Sequence, Float, Text, BigInteger, Date,
DateTime, Time, Boolean, Index, CheckConstraint,
UniqueConstraint,ForeignKeyConstraint, Numeric, LargeBinary , Table)
from datetime import timedelta, datetime, date
from sqlalchemy.dialects.postgresql import *
from sqlalchemy.sql import func
from .mixins import *
import arrow, enum
import enum
| true | true |
f7f81f928ea9655dd41ec2147f98e3861799e8fb | 823 | py | Python | Python/miscellaneous/plt.py | N-Lambin/TFE-EEG | 0308e790a8f1045c85cbc1ec9e054e6136af58a3 | [
"MIT"
] | 1 | 2018-05-23T12:42:47.000Z | 2018-05-23T12:42:47.000Z | Python/miscellaneous/plt.py | N-Lambin/TFE-EEG | 0308e790a8f1045c85cbc1ec9e054e6136af58a3 | [
"MIT"
] | null | null | null | Python/miscellaneous/plt.py | N-Lambin/TFE-EEG | 0308e790a8f1045c85cbc1ec9e054e6136af58a3 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
import csv
import numpy as np
from scipy import signal
fileDir = 'winkLeft\\winkLeft10Data\\'
fileName = 'AF3.csv'
fs = 128
x = []
with open(".\\csv\\csvCleanData\\" + fileDir + fileName, "r") as csvfile:
csv_reader = csv.reader(csvfile, delimiter = '\r')
for row in csv_reader:
x.append(int(row[0]))
x = np.array(x)
f, fd = signal.periodogram(x, fs, nfft=64)
plt.semilogy(f, fd, 'r')
fileDir = 'neutral\\neutral10Data\\'
fileName = 'AF3.csv'
fs = 128
x = []
with open(".\\csv\\csvCleanData\\" + fileDir + fileName, "r") as csvfile:
csv_reader = csv.reader(csvfile, delimiter = '\r')
for row in csv_reader:
x.append(int(row[0]))
x = np.array(x)
f, fd = signal.periodogram(x, fs, nfft=64)
plt.semilogy(f, fd, 'b')
plt.show() | 23.514286 | 73 | 0.623329 | import matplotlib.pyplot as plt
import csv
import numpy as np
from scipy import signal
fileDir = 'winkLeft\\winkLeft10Data\\'
fileName = 'AF3.csv'
fs = 128
x = []
with open(".\\csv\\csvCleanData\\" + fileDir + fileName, "r") as csvfile:
csv_reader = csv.reader(csvfile, delimiter = '\r')
for row in csv_reader:
x.append(int(row[0]))
x = np.array(x)
f, fd = signal.periodogram(x, fs, nfft=64)
plt.semilogy(f, fd, 'r')
fileDir = 'neutral\\neutral10Data\\'
fileName = 'AF3.csv'
fs = 128
x = []
with open(".\\csv\\csvCleanData\\" + fileDir + fileName, "r") as csvfile:
csv_reader = csv.reader(csvfile, delimiter = '\r')
for row in csv_reader:
x.append(int(row[0]))
x = np.array(x)
f, fd = signal.periodogram(x, fs, nfft=64)
plt.semilogy(f, fd, 'b')
plt.show() | true | true |
f7f82130439a3bcb0140725355c1c08def86ad18 | 4,403 | py | Python | leasing/management/commands/add_default_collection_letter_templates.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | leasing/management/commands/add_default_collection_letter_templates.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | leasing/management/commands/add_default_collection_letter_templates.py | hkotkanen/mvj | a22d40869ef1b13924da428f3026d248acef81a7 | [
"MIT"
] | null | null | null | import argparse
import os
import tempfile
from pathlib import Path
from shutil import copyfile
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from leasing.models import CollectionLetterTemplate
TEMPLATE_NAMES = {
'Irtisanomis- ja oikeudenkäyntiuhalla, tilapäinen yritystontti': {
'filename': 'irtisanomis_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx',
},
'Purku- ja oikeudenkäyntiuhalla, asuntotontti': {
'filename': 'purku_ja_oikeudenkayntiuhka_asuntotontti_template.docx',
},
'Purku- ja oikeudenkäyntiuhalla, tilapäinen yritystontti': {
'filename': 'purku_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx',
},
'Purku-uhalla, asuntotontti': {
'filename': 'purku_uhka_asuntotontti_template.docx',
},
'Purku-uhalla, yritystontti': {
'filename': 'purku_uhka_yritystontti_template.docx',
},
'Oikeudenkäyntiuhka': {
'filename': 'oikeudenkayntiuhka_template.docx',
},
}
"""
Irtisanomis- ja oikeudenkäyntiuhalla, tilapäinen yritystontti
Purku- ja oikeudenkäyntiuhalla, asuntotontti
Purku- ja oikeudenkäyntiuhalla, tilapäinen yritystontti
Purku-uhalla, asuntotontti
Purku-uhalla, yritystontti
Oikeudenkäyntiuhka
irtisanomis_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx
oikeudenkayntiuhka_template.doc
purku_ja_oikeudenkayntiuhka_asuntotontti_template.docx
purku_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx
purku_uhka_asuntotontti_template.docx
purku_uhka_yritystontti_template.docx
"""
class IsReadableDirectory(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not os.path.isdir(values):
raise argparse.ArgumentTypeError('Directory "{}" is not a directory.'.format(values))
if os.access(values, os.R_OK):
setattr(namespace, self.dest, values)
else:
raise argparse.ArgumentTypeError('Directory "{}" is not readable.'.format(values))
class Command(BaseCommand):
help = 'Add default collection letter templates'
def add_arguments(self, parser):
parser.add_argument('source_directory', action=IsReadableDirectory, help='Directory holding the templates')
def check_is_directory_writable(self, directory):
if not os.path.isdir(directory):
self.stdout.write('Directory "{}" does not exist. Please create it.'.format(directory))
return False
try:
fp = tempfile.TemporaryFile(dir=directory)
fp.close()
return True
except PermissionError:
self.stdout.write('Can not create file in directory "{}".'.format(directory))
return False
def handle(self, *args, **options):
destination_path = Path(settings.MEDIA_ROOT) / CollectionLetterTemplate.file.field.upload_to
if not self.check_is_directory_writable(destination_path):
raise CommandError('Directory "" is not writable'.format(destination_path))
source_path = Path(options['source_directory'])
from auditlog.registry import auditlog
auditlog.unregister(CollectionLetterTemplate)
for name, template in TEMPLATE_NAMES.items():
self.stdout.write(name)
source_filename = source_path / template['filename']
if not source_filename.exists():
self.stdout.write(' Template file "{}" does not exist in the source directory {}'.format(
template['filename'], source_path))
continue
try:
clt = CollectionLetterTemplate.objects.get(name=name)
self.stdout.write(' Template already exists. Overwriting.')
destination_filename = clt.file.name
except CollectionLetterTemplate.DoesNotExist:
self.stdout.write(' Creating new template.')
destination_filename = Path(CollectionLetterTemplate.file.field.upload_to) / template['filename']
CollectionLetterTemplate.objects.create(name=name, file=str(destination_filename))
destination_path = Path(settings.MEDIA_ROOT) / destination_filename
self.stdout.write(' Copying "{}" to "{}"'.format(source_filename, destination_path))
copyfile(source_filename, destination_path)
| 39.3125 | 115 | 0.70452 | import argparse
import os
import tempfile
from pathlib import Path
from shutil import copyfile
from django.conf import settings
from django.core.management.base import BaseCommand, CommandError
from leasing.models import CollectionLetterTemplate
TEMPLATE_NAMES = {
'Irtisanomis- ja oikeudenkäyntiuhalla, tilapäinen yritystontti': {
'filename': 'irtisanomis_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx',
},
'Purku- ja oikeudenkäyntiuhalla, asuntotontti': {
'filename': 'purku_ja_oikeudenkayntiuhka_asuntotontti_template.docx',
},
'Purku- ja oikeudenkäyntiuhalla, tilapäinen yritystontti': {
'filename': 'purku_ja_oikeudenkayntiuhka_tilapainen_yritystontti_template.docx',
},
'Purku-uhalla, asuntotontti': {
'filename': 'purku_uhka_asuntotontti_template.docx',
},
'Purku-uhalla, yritystontti': {
'filename': 'purku_uhka_yritystontti_template.docx',
},
'Oikeudenkäyntiuhka': {
'filename': 'oikeudenkayntiuhka_template.docx',
},
}
class IsReadableDirectory(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
if not os.path.isdir(values):
raise argparse.ArgumentTypeError('Directory "{}" is not a directory.'.format(values))
if os.access(values, os.R_OK):
setattr(namespace, self.dest, values)
else:
raise argparse.ArgumentTypeError('Directory "{}" is not readable.'.format(values))
class Command(BaseCommand):
help = 'Add default collection letter templates'
def add_arguments(self, parser):
parser.add_argument('source_directory', action=IsReadableDirectory, help='Directory holding the templates')
def check_is_directory_writable(self, directory):
if not os.path.isdir(directory):
self.stdout.write('Directory "{}" does not exist. Please create it.'.format(directory))
return False
try:
fp = tempfile.TemporaryFile(dir=directory)
fp.close()
return True
except PermissionError:
self.stdout.write('Can not create file in directory "{}".'.format(directory))
return False
def handle(self, *args, **options):
destination_path = Path(settings.MEDIA_ROOT) / CollectionLetterTemplate.file.field.upload_to
if not self.check_is_directory_writable(destination_path):
raise CommandError('Directory "" is not writable'.format(destination_path))
source_path = Path(options['source_directory'])
from auditlog.registry import auditlog
auditlog.unregister(CollectionLetterTemplate)
for name, template in TEMPLATE_NAMES.items():
self.stdout.write(name)
source_filename = source_path / template['filename']
if not source_filename.exists():
self.stdout.write(' Template file "{}" does not exist in the source directory {}'.format(
template['filename'], source_path))
continue
try:
clt = CollectionLetterTemplate.objects.get(name=name)
self.stdout.write(' Template already exists. Overwriting.')
destination_filename = clt.file.name
except CollectionLetterTemplate.DoesNotExist:
self.stdout.write(' Creating new template.')
destination_filename = Path(CollectionLetterTemplate.file.field.upload_to) / template['filename']
CollectionLetterTemplate.objects.create(name=name, file=str(destination_filename))
destination_path = Path(settings.MEDIA_ROOT) / destination_filename
self.stdout.write(' Copying "{}" to "{}"'.format(source_filename, destination_path))
copyfile(source_filename, destination_path)
| true | true |
f7f821f70f7d04958ca243c546dea9c2411c92e4 | 14,518 | py | Python | pace/encryption/acc_encrypt_test.py | LaudateCorpus1/PACE-python | eb61250886e51647bd1edb6d8f4fa7f83eb0bc81 | [
"BSD-2-Clause"
] | 7 | 2016-11-01T17:36:17.000Z | 2021-03-12T08:54:36.000Z | pace/encryption/acc_encrypt_test.py | LaudateCorpus1/PACE-python | eb61250886e51647bd1edb6d8f4fa7f83eb0bc81 | [
"BSD-2-Clause"
] | 1 | 2016-11-29T00:38:28.000Z | 2016-12-06T14:10:24.000Z | pace/encryption/acc_encrypt_test.py | LaudateCorpus1/PACE-python | eb61250886e51647bd1edb6d8f4fa7f83eb0bc81 | [
"BSD-2-Clause"
] | 6 | 2020-09-09T08:33:17.000Z | 2022-01-06T07:02:40.000Z | ## **************
## Copyright 2014 MIT Lincoln Laboratory
## Project: PACE
## Authors: ATLH
## Description: Unit tests for acc_encrypt
## Modifications:
## Date Name Modification
## ---- ---- ------------
## 23 Dec 2014 ATLH Original file
## **************
import os
import sys
this_dir = os.path.dirname(os.path.dirname(__file__))
base_dir = os.path.join(this_dir, '../..')
sys.path.append(base_dir)
import random
import logging
import unittest
from StringIO import StringIO
from pyaccumulo import Mutation, Cell, Range
from pace.encryption.encryption_exceptions import EncryptionException, DecryptionException
from pace.encryption.acc_encrypt import AccumuloEncrypt, ConfigurationException
from pace.encryption.encryption_pki import DummyEncryptionPKI
from pace.pki.accumulo_keystore import AccumuloKeyStore
from pace.encryption.AES_encrypt import Pycrypto_AES_CFB
from pace.common.fakeconn import FakeConnection
class AccumuloCryptTest(unittest.TestCase):
def setUp(self):
# Keep the same PKI around, since it generates a new RSA key
# for key wraps each time
self.pki = DummyEncryptionPKI()
def test_error_handling(self):
'''
Tests error handling of configuration sections
'''
not_valid_section = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colSection]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB'
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, not_valid_section, self.pki)
no_encryption = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Identity\n'+\
'encryption = Identity')
self.assertRaises(ConfigurationException, AccumuloEncrypt, no_encryption, self.pki)
no_key_id = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, no_key_id, self.pki)
algorithm_not_supported = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_RSA\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, algorithm_not_supported, self.pki)
def test_encryptor_dict(self):
'''
Tests the format of the created encryptor_dict
'''
all_sections = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colVisibility]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[value]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
ac = AccumuloEncrypt(all_sections, self.pki)
encryptor_dict = ac.encrypt_dict
keys = ['row','colQualifier','colFamily','colVisibility',
'value']
for k in keys:
self.assertTrue(k in encryptor_dict, '%s is not in dictionary' % k)
encryptor = encryptor_dict[k]
self.assertEqual(encryptor.encryption, Pycrypto_AES_CFB )
self.assertEqual(encryptor.cell_sections, [k])
def test_with_accumulo_conn(self):
'''
Tests the interplay with a fake accumulo connection
'''
all_sections = '[row]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colQualifier]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colVisibility]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[value]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB'
#create mutation
mut = Mutation('row1')
mut.put(cf='cf1',cq='cq1', cv='cv1', ts = 12345, val = 'val1')
mut.put(cf='cf2',cq='cq2', cv='', ts = 67890, val = 'val2')
ae = AccumuloEncrypt(StringIO(all_sections), self.pki)
enc_muts = ae.encrypt(mut)
#write mutation along fake connection
conn = FakeConnection()
conn.create_table('enc_test')
conn.write('enc_test', enc_muts[0])
conn.write('enc_test', enc_muts[1])
#create ground truth
conn.create_table('ground')
conn.write('ground', mut)
#retrieve encrypted mutation
dec_cells = []
for c in conn.scan('enc_test'):
dec_cells.append(ae.decrypt(c))
gt_cells = []
for c in conn.scan('ground'):
gt_cells.append(c)
self.assertEqual(sorted(gt_cells), sorted(dec_cells))
def _run_search(self, config, row, cols, correct_cells):
'''
Tests the encrypting search functionality
'''
#create range & mutation to search for
mut1 = Mutation('arow')
mut1.put(cf='cf1',cq='cq1', cv='', ts = 1, val = 'val1')
mut1.put(cf='cf2',cq='cq2', cv='', ts = 2, val = 'val2')
mut1.put(cf='cf1',cq='cq1', cv='', ts = 3, val = 'val3')
mut1.put(cf='cf2',cq='cq3', cv='', ts = 4, val = 'val4')
mut1.put(cf='cf3',cq='cq4', cv='', ts = 5, val = 'val5')
mut2 = Mutation('brow')
mut2.put(cf='cf1',cq='cq1', cv='', ts = 6, val = 'val1')
mut2.put(cf='cf2',cq='cq2', cv='', ts = 7, val = 'val2')
ae = AccumuloEncrypt(StringIO(config), self.pki)
enc_muts1 = ae.encrypt(mut1)
enc_muts2 = ae.encrypt(mut2)
enc_row, enc_cols = ae.encrypt_search(row, cols)
#write mutation along fake connection
conn = FakeConnection()
conn.create_table('enc_test')
for mut in enc_muts1 + enc_muts2:
conn.write('enc_test', mut)
#retrieve encrypted mutation with search
dec_cells = []
for c in conn.scan('enc_test',
scanrange=Range(srow=enc_row, erow=enc_row,
sinclude=True, einclude=True),
cols=enc_cols):
dec_cells.append(ae.decrypt(c))
self.assertEqual(sorted(dec_cells), sorted(correct_cells))
def test_det_row_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
def test_unencrypted_search(self):
config = '[colFamily]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_CBC\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
def test_det_row_cf_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'arow',
[['cf1'],['cf3']],
[Cell('arow','cf1','cq1','',1,'val1'),
Cell('arow','cf1','cq1','',3,'val3'),
Cell('arow','cf3','cq4','',5,'val5')])
def test_det_cf_search(self):
config = '[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'arow',
[['cf1'],['cf3']],
[Cell('arow','cf1','cq1','',1,'val1'),
Cell('arow','cf1','cq1','',3,'val3'),
Cell('arow','cf3','cq4','',5,'val5')])
def test_det_cq_search(self):
config = '[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_row_cf_cq_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cf_cq_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily,colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cq_cf_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily,colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cf_cq_switch_search(self):
config = '[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_non_det_row(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'encryption = Pycrypto_AES_CBC\n'
ae = AccumuloEncrypt(StringIO(config), self.pki)
self.assertRaises(EncryptionException, ae.encrypt_search, 'arow', [['cf1']])
def test_det_non_det_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_CBC\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_CBC\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
| 38.922252 | 104 | 0.464802 | l, Range
from pace.encryption.encryption_exceptions import EncryptionException, DecryptionException
from pace.encryption.acc_encrypt import AccumuloEncrypt, ConfigurationException
from pace.encryption.encryption_pki import DummyEncryptionPKI
from pace.pki.accumulo_keystore import AccumuloKeyStore
from pace.encryption.AES_encrypt import Pycrypto_AES_CFB
from pace.common.fakeconn import FakeConnection
class AccumuloCryptTest(unittest.TestCase):
def setUp(self):
self.pki = DummyEncryptionPKI()
def test_error_handling(self):
not_valid_section = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colSection]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB'
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, not_valid_section, self.pki)
no_encryption = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Identity\n'+\
'encryption = Identity')
self.assertRaises(ConfigurationException, AccumuloEncrypt, no_encryption, self.pki)
no_key_id = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, no_key_id, self.pki)
algorithm_not_supported = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_RSA\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
self.assertRaises(ConfigurationException, AccumuloEncrypt, algorithm_not_supported, self.pki)
def test_encryptor_dict(self):
all_sections = StringIO('[row]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colVisibility]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[value]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB')
ac = AccumuloEncrypt(all_sections, self.pki)
encryptor_dict = ac.encrypt_dict
keys = ['row','colQualifier','colFamily','colVisibility',
'value']
for k in keys:
self.assertTrue(k in encryptor_dict, '%s is not in dictionary' % k)
encryptor = encryptor_dict[k]
self.assertEqual(encryptor.encryption, Pycrypto_AES_CFB )
self.assertEqual(encryptor.cell_sections, [k])
def test_with_accumulo_conn(self):
all_sections = '[row]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colQualifier]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[colVisibility]\n'+\
'key_id = table1\n'+\
'encryption = Pycrypto_AES_CFB\n'+\
'[value]\n'+\
'key_id = Pycrypto_AES_CFB\n'+\
'encryption = Pycrypto_AES_CFB'
mut = Mutation('row1')
mut.put(cf='cf1',cq='cq1', cv='cv1', ts = 12345, val = 'val1')
mut.put(cf='cf2',cq='cq2', cv='', ts = 67890, val = 'val2')
ae = AccumuloEncrypt(StringIO(all_sections), self.pki)
enc_muts = ae.encrypt(mut)
conn = FakeConnection()
conn.create_table('enc_test')
conn.write('enc_test', enc_muts[0])
conn.write('enc_test', enc_muts[1])
conn.create_table('ground')
conn.write('ground', mut)
dec_cells = []
for c in conn.scan('enc_test'):
dec_cells.append(ae.decrypt(c))
gt_cells = []
for c in conn.scan('ground'):
gt_cells.append(c)
self.assertEqual(sorted(gt_cells), sorted(dec_cells))
def _run_search(self, config, row, cols, correct_cells):
mut1 = Mutation('arow')
mut1.put(cf='cf1',cq='cq1', cv='', ts = 1, val = 'val1')
mut1.put(cf='cf2',cq='cq2', cv='', ts = 2, val = 'val2')
mut1.put(cf='cf1',cq='cq1', cv='', ts = 3, val = 'val3')
mut1.put(cf='cf2',cq='cq3', cv='', ts = 4, val = 'val4')
mut1.put(cf='cf3',cq='cq4', cv='', ts = 5, val = 'val5')
mut2 = Mutation('brow')
mut2.put(cf='cf1',cq='cq1', cv='', ts = 6, val = 'val1')
mut2.put(cf='cf2',cq='cq2', cv='', ts = 7, val = 'val2')
ae = AccumuloEncrypt(StringIO(config), self.pki)
enc_muts1 = ae.encrypt(mut1)
enc_muts2 = ae.encrypt(mut2)
enc_row, enc_cols = ae.encrypt_search(row, cols)
conn = FakeConnection()
conn.create_table('enc_test')
for mut in enc_muts1 + enc_muts2:
conn.write('enc_test', mut)
dec_cells = []
for c in conn.scan('enc_test',
scanrange=Range(srow=enc_row, erow=enc_row,
sinclude=True, einclude=True),
cols=enc_cols):
dec_cells.append(ae.decrypt(c))
self.assertEqual(sorted(dec_cells), sorted(correct_cells))
def test_det_row_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
def test_unencrypted_search(self):
config = '[colFamily]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_CBC\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
def test_det_row_cf_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'arow',
[['cf1'],['cf3']],
[Cell('arow','cf1','cq1','',1,'val1'),
Cell('arow','cf1','cq1','',3,'val3'),
Cell('arow','cf3','cq4','',5,'val5')])
def test_det_cf_search(self):
config = '[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
self._run_search(config,
'arow',
[['cf1'],['cf3']],
[Cell('arow','cf1','cq1','',1,'val1'),
Cell('arow','cf1','cq1','',3,'val3'),
Cell('arow','cf3','cq4','',5,'val5')])
def test_det_cq_search(self):
config = '[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_row_cf_cq_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cf_cq_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily,colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cq_cf_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily,colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_det_cf_cq_switch_search(self):
config = '[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_SIV\n'
self._run_search(config,
'brow',
[['cf1','cq1']],
[Cell('brow','cf1','cq1','',6,'val1')])
def test_non_det_row(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'encryption = Pycrypto_AES_CBC\n'
ae = AccumuloEncrypt(StringIO(config), self.pki)
self.assertRaises(EncryptionException, ae.encrypt_search, 'arow', [['cf1']])
def test_det_non_det_search(self):
config = '[row]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'encryption = Pycrypto_AES_SIV\n'+\
'[colFamily]\n'+\
'key_id = Pycrypto_AES_CBC\n'+\
'cell_sections = colFamily\n'+\
'encryption = Pycrypto_AES_CBC\n'+\
'[colQualifier]\n'+\
'key_id = Pycrypto_AES_SIV\n'+\
'cell_sections = colQualifier\n'+\
'encryption = Pycrypto_AES_CBC\n'
self._run_search(config,
'brow',
None,
[Cell('brow','cf1','cq1','',6,'val1'),
Cell('brow','cf2','cq2','',7,'val2')])
| true | true |
f7f82203fd775e0f4ec80f88938f8325c3cc504e | 7,445 | py | Python | tests/unit/test_simulator.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | 1 | 2016-12-14T21:05:47.000Z | 2016-12-14T21:05:47.000Z | tests/unit/test_simulator.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | null | null | null | tests/unit/test_simulator.py | dkucsc/mavas | db3f7bd35944b7d0f510c62760eaa071bf53d7d8 | [
"Apache-2.0"
] | null | null | null | """
Tests for simulated data
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import datetime
import ga4gh.datamodel as datamodel
import ga4gh.datamodel.datasets as datasets
import ga4gh.datamodel.reads as reads
import ga4gh.datamodel.references as references
import ga4gh.datamodel.variants as variants
class TestSimulatedVariantSet(unittest.TestCase):
"""
Test properties of the SimulatedVariantSet
"""
def setUp(self):
self.randomSeed = 0
self.numCalls = 2
# ensure variantDensity is >= 1 so we get deterministic behavoir
self.variantDensity = 1
self.simulatedVariantSet = self._getSimulatedVariantSet()
self.referenceName = 'ref'
self.startPosition = 100
self.endPosition = 103
self.callSetIds = ['unused']
self.bases = ["A", "C", "G", "T"]
def _getSimulatedVariantSet(self):
dataset = datasets.Dataset('dataset1')
referenceSet = references.SimulatedReferenceSet("srs1")
simulatedVariantSet = variants.SimulatedVariantSet(
dataset, referenceSet, 'variantSet1', randomSeed=self.randomSeed,
numCalls=self.numCalls, variantDensity=self.variantDensity)
return simulatedVariantSet
def _getSimulatedVariantsList(self, simulatedVariantSet=None):
if simulatedVariantSet is None:
simulatedVariantSet = self.simulatedVariantSet
simulatedVariants = simulatedVariantSet.getVariants(
self.referenceName, self.startPosition, self.endPosition,
self.callSetIds)
variantList = list(simulatedVariants)
return variantList
def testConstruction(self):
# initializing SimulatedVariantSet should store values correctly
self.assertEqual(
self.randomSeed, self.simulatedVariantSet._randomSeed)
self.assertEqual(
self.numCalls, self.simulatedVariantSet._numCalls)
self.assertEqual(
self.variantDensity, self.simulatedVariantSet._variantDensity)
self.assertEqual(
self.simulatedVariantSet.getCreationTime(),
self.simulatedVariantSet.getUpdatedTime())
def testGetVariants(self):
# calling getVariants should produce the expected results
variantList = self._getSimulatedVariantsList()
self.assertEqual(
len(variantList), self.endPosition - self.startPosition)
for offset, simulatedVariant in enumerate(variantList):
start = self.startPosition + offset
variantSetCompoundId = self.simulatedVariantSet.getCompoundId()
variantCompoundId = datamodel.VariantCompoundId.parse(
simulatedVariant.id)
self.assertEqual(
variantSetCompoundId.variant_set_id,
self.simulatedVariantSet.getId())
self.assertEqual(
variantSetCompoundId.variant_set_id,
variantCompoundId.variant_set_id)
self.assertEqual(
variantCompoundId.reference_name, self.referenceName)
self.assertEqual(
variantCompoundId.start, str(simulatedVariant.start))
self.assertEqual(
simulatedVariant.variant_set_id,
self.simulatedVariantSet.getId())
self.assertEqual(
simulatedVariant.reference_name, self.referenceName)
self.assertEqual(
simulatedVariant.created, simulatedVariant.updated)
self.assertEqual(simulatedVariant.start, start)
self.assertEqual(simulatedVariant.end, start + 1)
self.assertIn(simulatedVariant.reference_bases, self.bases)
self.assertIn(
simulatedVariant.alternate_bases[0], self.bases)
self.assertEqual(len(simulatedVariant.calls), self.numCalls)
def testConsistency(self):
# two SimulatedBackend objects given the same parameters
# should produce identical variant lists
variantListOne = self._getSimulatedVariantsList()
simulatedVariantSetTwo = self._getSimulatedVariantSet()
variantListTwo = self._getSimulatedVariantsList(
simulatedVariantSetTwo)
self._assertEqualVariantLists(variantListOne, variantListTwo)
def testSelfConsistent(self):
# the same SimulatedBackend should produce identical
# variant lists given the same parameters
variantListOne = self._getSimulatedVariantsList()
variantListTwo = self._getSimulatedVariantsList()
self.assertEqual(variantListOne, variantListTwo)
def _assertEqualVariantLists(self, variantListOne, variantListTwo):
# need to make time-dependent fields equal before the comparison,
# otherwise we're introducing a race condition
timeDependentFields = ['created', 'updated']
for variantList in (variantListOne, variantListTwo):
for variant in variantList:
for field in timeDependentFields:
setattr(variant, field, 0)
self.assertEqual(variantListOne, variantListTwo)
class TestSimulatedVariantAnnotationSet(unittest.TestCase):
def setUp(self):
self.randomSeed = 1
self.numCalls = 2
# ensure variantDensity is >= 1 so we get deterministic behavoir
self.variantDensity = 1
self.referenceName = 'ref'
self.startPosition = 100
self.endPosition = 120
self.callSetIds = ['unused']
self.bases = ["A", "C", "G", "T"]
def testCreation(self):
dataset = datasets.Dataset('dataset1')
referenceSet = references.SimulatedReferenceSet("srs1")
localId = "variantAnnotationSetId"
simulatedVariantSet = variants.SimulatedVariantSet(
dataset, referenceSet, 'variantSet1', randomSeed=self.randomSeed,
numCalls=self.numCalls, variantDensity=self.variantDensity)
simulatedVariantAnnotationSet = variants.SimulatedVariantAnnotationSet(
simulatedVariantSet, localId, self.randomSeed)
annotations = simulatedVariantAnnotationSet.getVariantAnnotations(
self.referenceName, self.startPosition, self.endPosition)
self.assertEquals(
simulatedVariantSet.toProtocolElement().id,
simulatedVariantAnnotationSet.toProtocolElement().variant_set_id,
"Variant Set ID should match the annotation's variant set ID")
for variant, ann in annotations:
self.assertEquals(datetime.datetime.strptime(
ann.created, "%Y-%m-%dT%H:%M:%S.%fZ").strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"), ann.created,
"Expect time format to be in ISO8601")
self.assertEqual(variant.id, ann.variant_id)
class TestSimulatedReadGroupSet(unittest.TestCase):
"""
Test properties of the simulated ReadGroupSet
"""
def testCreation(self):
dataset = datasets.Dataset('dataset1')
localId = "readGroupSetId"
referenceSet = references.SimulatedReferenceSet("srs1")
simulatedReadGroupSet = reads.SimulatedReadGroupSet(
dataset, localId, referenceSet)
for readGroup in simulatedReadGroupSet.getReadGroups():
alignments = list(readGroup.getReadAlignments())
self.assertGreater(len(alignments), 0)
| 43.284884 | 79 | 0.678308 | from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import datetime
import ga4gh.datamodel as datamodel
import ga4gh.datamodel.datasets as datasets
import ga4gh.datamodel.reads as reads
import ga4gh.datamodel.references as references
import ga4gh.datamodel.variants as variants
class TestSimulatedVariantSet(unittest.TestCase):
def setUp(self):
self.randomSeed = 0
self.numCalls = 2
self.variantDensity = 1
self.simulatedVariantSet = self._getSimulatedVariantSet()
self.referenceName = 'ref'
self.startPosition = 100
self.endPosition = 103
self.callSetIds = ['unused']
self.bases = ["A", "C", "G", "T"]
def _getSimulatedVariantSet(self):
dataset = datasets.Dataset('dataset1')
referenceSet = references.SimulatedReferenceSet("srs1")
simulatedVariantSet = variants.SimulatedVariantSet(
dataset, referenceSet, 'variantSet1', randomSeed=self.randomSeed,
numCalls=self.numCalls, variantDensity=self.variantDensity)
return simulatedVariantSet
def _getSimulatedVariantsList(self, simulatedVariantSet=None):
if simulatedVariantSet is None:
simulatedVariantSet = self.simulatedVariantSet
simulatedVariants = simulatedVariantSet.getVariants(
self.referenceName, self.startPosition, self.endPosition,
self.callSetIds)
variantList = list(simulatedVariants)
return variantList
def testConstruction(self):
self.assertEqual(
self.randomSeed, self.simulatedVariantSet._randomSeed)
self.assertEqual(
self.numCalls, self.simulatedVariantSet._numCalls)
self.assertEqual(
self.variantDensity, self.simulatedVariantSet._variantDensity)
self.assertEqual(
self.simulatedVariantSet.getCreationTime(),
self.simulatedVariantSet.getUpdatedTime())
def testGetVariants(self):
variantList = self._getSimulatedVariantsList()
self.assertEqual(
len(variantList), self.endPosition - self.startPosition)
for offset, simulatedVariant in enumerate(variantList):
start = self.startPosition + offset
variantSetCompoundId = self.simulatedVariantSet.getCompoundId()
variantCompoundId = datamodel.VariantCompoundId.parse(
simulatedVariant.id)
self.assertEqual(
variantSetCompoundId.variant_set_id,
self.simulatedVariantSet.getId())
self.assertEqual(
variantSetCompoundId.variant_set_id,
variantCompoundId.variant_set_id)
self.assertEqual(
variantCompoundId.reference_name, self.referenceName)
self.assertEqual(
variantCompoundId.start, str(simulatedVariant.start))
self.assertEqual(
simulatedVariant.variant_set_id,
self.simulatedVariantSet.getId())
self.assertEqual(
simulatedVariant.reference_name, self.referenceName)
self.assertEqual(
simulatedVariant.created, simulatedVariant.updated)
self.assertEqual(simulatedVariant.start, start)
self.assertEqual(simulatedVariant.end, start + 1)
self.assertIn(simulatedVariant.reference_bases, self.bases)
self.assertIn(
simulatedVariant.alternate_bases[0], self.bases)
self.assertEqual(len(simulatedVariant.calls), self.numCalls)
def testConsistency(self):
variantListOne = self._getSimulatedVariantsList()
simulatedVariantSetTwo = self._getSimulatedVariantSet()
variantListTwo = self._getSimulatedVariantsList(
simulatedVariantSetTwo)
self._assertEqualVariantLists(variantListOne, variantListTwo)
def testSelfConsistent(self):
variantListOne = self._getSimulatedVariantsList()
variantListTwo = self._getSimulatedVariantsList()
self.assertEqual(variantListOne, variantListTwo)
def _assertEqualVariantLists(self, variantListOne, variantListTwo):
timeDependentFields = ['created', 'updated']
for variantList in (variantListOne, variantListTwo):
for variant in variantList:
for field in timeDependentFields:
setattr(variant, field, 0)
self.assertEqual(variantListOne, variantListTwo)
class TestSimulatedVariantAnnotationSet(unittest.TestCase):
def setUp(self):
self.randomSeed = 1
self.numCalls = 2
# ensure variantDensity is >= 1 so we get deterministic behavoir
self.variantDensity = 1
self.referenceName = 'ref'
self.startPosition = 100
self.endPosition = 120
self.callSetIds = ['unused']
self.bases = ["A", "C", "G", "T"]
def testCreation(self):
dataset = datasets.Dataset('dataset1')
referenceSet = references.SimulatedReferenceSet("srs1")
localId = "variantAnnotationSetId"
simulatedVariantSet = variants.SimulatedVariantSet(
dataset, referenceSet, 'variantSet1', randomSeed=self.randomSeed,
numCalls=self.numCalls, variantDensity=self.variantDensity)
simulatedVariantAnnotationSet = variants.SimulatedVariantAnnotationSet(
simulatedVariantSet, localId, self.randomSeed)
annotations = simulatedVariantAnnotationSet.getVariantAnnotations(
self.referenceName, self.startPosition, self.endPosition)
self.assertEquals(
simulatedVariantSet.toProtocolElement().id,
simulatedVariantAnnotationSet.toProtocolElement().variant_set_id,
"Variant Set ID should match the annotation's variant set ID")
for variant, ann in annotations:
self.assertEquals(datetime.datetime.strptime(
ann.created, "%Y-%m-%dT%H:%M:%S.%fZ").strftime(
"%Y-%m-%dT%H:%M:%S.%fZ"), ann.created,
"Expect time format to be in ISO8601")
self.assertEqual(variant.id, ann.variant_id)
class TestSimulatedReadGroupSet(unittest.TestCase):
def testCreation(self):
dataset = datasets.Dataset('dataset1')
localId = "readGroupSetId"
referenceSet = references.SimulatedReferenceSet("srs1")
simulatedReadGroupSet = reads.SimulatedReadGroupSet(
dataset, localId, referenceSet)
for readGroup in simulatedReadGroupSet.getReadGroups():
alignments = list(readGroup.getReadAlignments())
self.assertGreater(len(alignments), 0)
| true | true |
f7f8220aca745baa4e07e83b72ced29f641c64eb | 16,318 | py | Python | clients/client/python/ory_client/model/plugin_config.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/plugin_config.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | clients/client/python/ory_client/model/plugin_config.py | tobbbles/sdk | 017ca2fd46019bafd1853913b6c0f2b0fe687621 | [
"Apache-2.0"
] | null | null | null | """
Ory APIs
Documentation for all public and administrative Ory APIs. Administrative APIs can only be accessed with a valid Personal Access Token. Public APIs are mostly used in browsers. # noqa: E501
The version of the OpenAPI document: v0.0.1-alpha.19
Contact: support@ory.sh
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from ory_client.model_utils import ( # noqa: F401
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_client.exceptions import ApiAttributeError
def lazy_import():
from ory_client.model.plugin_config_args import PluginConfigArgs
from ory_client.model.plugin_config_interface import PluginConfigInterface
from ory_client.model.plugin_config_linux import PluginConfigLinux
from ory_client.model.plugin_config_network import PluginConfigNetwork
from ory_client.model.plugin_config_rootfs import PluginConfigRootfs
from ory_client.model.plugin_config_user import PluginConfigUser
from ory_client.model.plugin_env import PluginEnv
from ory_client.model.plugin_mount import PluginMount
globals()['PluginConfigArgs'] = PluginConfigArgs
globals()['PluginConfigInterface'] = PluginConfigInterface
globals()['PluginConfigLinux'] = PluginConfigLinux
globals()['PluginConfigNetwork'] = PluginConfigNetwork
globals()['PluginConfigRootfs'] = PluginConfigRootfs
globals()['PluginConfigUser'] = PluginConfigUser
globals()['PluginEnv'] = PluginEnv
globals()['PluginMount'] = PluginMount
class PluginConfig(ModelNormal):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
Attributes:
allowed_values (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
with a capitalized key describing the allowed value and an allowed
value. These dicts store the allowed enum values.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
discriminator_value_class_map (dict): A dict to go from the discriminator
variable value to the discriminator class name.
validations (dict): The key is the tuple path to the attribute
and the for var_name this is (var_name,). The value is a dict
that stores validations for max_length, min_length, max_items,
min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,
inclusive_minimum, and regex.
additional_properties_type (tuple): A tuple of classes accepted
as additional properties values.
"""
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
"""
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501
_nullable = False
@cached_property
def openapi_types():
"""
This must be a method because a model may have properties that are
of type self, this must run after the class is loaded
Returns
openapi_types (dict): The key is attribute name
and the value is attribute type.
"""
lazy_import()
return {
'args': (PluginConfigArgs,), # noqa: E501
'description': (str,), # noqa: E501
'documentation': (str,), # noqa: E501
'entrypoint': ([str],), # noqa: E501
'env': ([PluginEnv],), # noqa: E501
'interface': (PluginConfigInterface,), # noqa: E501
'ipc_host': (bool,), # noqa: E501
'linux': (PluginConfigLinux,), # noqa: E501
'mounts': ([PluginMount],), # noqa: E501
'network': (PluginConfigNetwork,), # noqa: E501
'pid_host': (bool,), # noqa: E501
'propagated_mount': (str,), # noqa: E501
'work_dir': (str,), # noqa: E501
'docker_version': (str,), # noqa: E501
'user': (PluginConfigUser,), # noqa: E501
'rootfs': (PluginConfigRootfs,), # noqa: E501
}
@cached_property
def discriminator():
return None
attribute_map = {
'args': 'Args', # noqa: E501
'description': 'Description', # noqa: E501
'documentation': 'Documentation', # noqa: E501
'entrypoint': 'Entrypoint', # noqa: E501
'env': 'Env', # noqa: E501
'interface': 'Interface', # noqa: E501
'ipc_host': 'IpcHost', # noqa: E501
'linux': 'Linux', # noqa: E501
'mounts': 'Mounts', # noqa: E501
'network': 'Network', # noqa: E501
'pid_host': 'PidHost', # noqa: E501
'propagated_mount': 'PropagatedMount', # noqa: E501
'work_dir': 'WorkDir', # noqa: E501
'docker_version': 'DockerVersion', # noqa: E501
'user': 'User', # noqa: E501
'rootfs': 'rootfs', # noqa: E501
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, args, description, documentation, entrypoint, env, interface, ipc_host, linux, mounts, network, pid_host, propagated_mount, work_dir, *args, **kwargs): # noqa: E501
"""PluginConfig - a model defined in OpenAPI
Args:
args (PluginConfigArgs):
description (str): description
documentation (str): documentation
entrypoint ([str]): entrypoint
env ([PluginEnv]): env
interface (PluginConfigInterface):
ipc_host (bool): ipc host
linux (PluginConfigLinux):
mounts ([PluginMount]): mounts
network (PluginConfigNetwork):
pid_host (bool): pid host
propagated_mount (str): propagated mount
work_dir (str): work dir
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
docker_version (str): Docker Version used to create the plugin. [optional] # noqa: E501
user (PluginConfigUser): [optional] # noqa: E501
rootfs (PluginConfigRootfs): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.args = args
self.description = description
self.documentation = documentation
self.entrypoint = entrypoint
self.env = env
self.interface = interface
self.ipc_host = ipc_host
self.linux = linux
self.mounts = mounts
self.network = network
self.pid_host = pid_host
self.propagated_mount = propagated_mount
self.work_dir = work_dir
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, args, description, documentation, entrypoint, env, interface, ipc_host, linux, mounts, network, pid_host, propagated_mount, work_dir, *args, **kwargs): # noqa: E501
"""PluginConfig - a model defined in OpenAPI
Args:
args (PluginConfigArgs):
description (str): description
documentation (str): documentation
entrypoint ([str]): entrypoint
env ([PluginEnv]): env
interface (PluginConfigInterface):
ipc_host (bool): ipc host
linux (PluginConfigLinux):
mounts ([PluginMount]): mounts
network (PluginConfigNetwork):
pid_host (bool): pid host
propagated_mount (str): propagated mount
work_dir (str): work dir
Keyword Args:
_check_type (bool): if True, values for parameters in openapi_types
will be type checked and a TypeError will be
raised if the wrong type is input.
Defaults to True
_path_to_item (tuple/list): This is a list of keys or values to
drill down to the model in received_data
when deserializing a response
_spec_property_naming (bool): True if the variable names in the input data
are serialized names, as specified in the OpenAPI document.
False if the variable names in the input data
are pythonic names, e.g. snake case (default)
_configuration (Configuration): the instance to use when
deserializing a file_type parameter.
If passed, type conversion is attempted
If omitted no type conversion is done.
_visited_composed_classes (tuple): This stores a tuple of
classes that we have traveled through so that
if we see that class again we will not use its
discriminator again.
When traveling through a discriminator, the
composed schema that is
is traveled through is added to this set.
For example if Animal has a discriminator
petType and we pass in "Dog", and the class Dog
allOf includes Animal, we move through Animal
once using the discriminator, and pick Dog.
Then in Dog, we will make an instance of the
Animal class but this time we won't travel
through its discriminator because we passed in
_visited_composed_classes = (Animal,)
docker_version (str): Docker Version used to create the plugin. [optional] # noqa: E501
user (PluginConfigUser): [optional] # noqa: E501
rootfs (PluginConfigRootfs): [optional] # noqa: E501
"""
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.args = args
self.description = description
self.documentation = documentation
self.entrypoint = entrypoint
self.env = env
self.interface = interface
self.ipc_host = ipc_host
self.linux = linux
self.mounts = mounts
self.network = network
self.pid_host = pid_host
self.propagated_mount = propagated_mount
self.work_dir = work_dir
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
# discard variable.
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| 44.463215 | 197 | 0.586101 |
import re
import sys
from ory_client.model_utils import (
ApiTypeError,
ModelComposed,
ModelNormal,
ModelSimple,
cached_property,
change_keys_js_to_python,
convert_js_args_to_python_args,
date,
datetime,
file_type,
none_type,
validate_get_composed_info,
)
from ..model_utils import OpenApiModel
from ory_client.exceptions import ApiAttributeError
def lazy_import():
from ory_client.model.plugin_config_args import PluginConfigArgs
from ory_client.model.plugin_config_interface import PluginConfigInterface
from ory_client.model.plugin_config_linux import PluginConfigLinux
from ory_client.model.plugin_config_network import PluginConfigNetwork
from ory_client.model.plugin_config_rootfs import PluginConfigRootfs
from ory_client.model.plugin_config_user import PluginConfigUser
from ory_client.model.plugin_env import PluginEnv
from ory_client.model.plugin_mount import PluginMount
globals()['PluginConfigArgs'] = PluginConfigArgs
globals()['PluginConfigInterface'] = PluginConfigInterface
globals()['PluginConfigLinux'] = PluginConfigLinux
globals()['PluginConfigNetwork'] = PluginConfigNetwork
globals()['PluginConfigRootfs'] = PluginConfigRootfs
globals()['PluginConfigUser'] = PluginConfigUser
globals()['PluginEnv'] = PluginEnv
globals()['PluginMount'] = PluginMount
class PluginConfig(ModelNormal):
allowed_values = {
}
validations = {
}
@cached_property
def additional_properties_type():
lazy_import()
return (bool, date, datetime, dict, float, int, list, str, none_type,)
_nullable = False
@cached_property
def openapi_types():
lazy_import()
return {
'args': (PluginConfigArgs,),
'description': (str,),
'documentation': (str,),
'entrypoint': ([str],),
'env': ([PluginEnv],),
'interface': (PluginConfigInterface,),
'ipc_host': (bool,),
'linux': (PluginConfigLinux,),
'mounts': ([PluginMount],),
'network': (PluginConfigNetwork,),
'pid_host': (bool,),
'propagated_mount': (str,),
'work_dir': (str,),
'docker_version': (str,),
'user': (PluginConfigUser,),
'rootfs': (PluginConfigRootfs,),
}
@cached_property
def discriminator():
return None
attribute_map = {
'args': 'Args',
'description': 'Description',
'documentation': 'Documentation',
'entrypoint': 'Entrypoint',
'env': 'Env',
'interface': 'Interface',
'ipc_host': 'IpcHost',
'linux': 'Linux',
'mounts': 'Mounts',
'network': 'Network',
'pid_host': 'PidHost',
'propagated_mount': 'PropagatedMount',
'work_dir': 'WorkDir',
'docker_version': 'DockerVersion',
'user': 'User',
'rootfs': 'rootfs',
}
read_only_vars = {
}
_composed_schemas = {}
@classmethod
@convert_js_args_to_python_args
def _from_openapi_data(cls, args, description, documentation, entrypoint, env, interface, ipc_host, linux, mounts, network, pid_host, propagated_mount, work_dir, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
self = super(OpenApiModel, cls).__new__(cls)
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.args = args
self.description = description
self.documentation = documentation
self.entrypoint = entrypoint
self.env = env
self.interface = interface
self.ipc_host = ipc_host
self.linux = linux
self.mounts = mounts
self.network = network
self.pid_host = pid_host
self.propagated_mount = propagated_mount
self.work_dir = work_dir
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
return self
required_properties = set([
'_data_store',
'_check_type',
'_spec_property_naming',
'_path_to_item',
'_configuration',
'_visited_composed_classes',
])
@convert_js_args_to_python_args
def __init__(self, args, description, documentation, entrypoint, env, interface, ipc_host, linux, mounts, network, pid_host, propagated_mount, work_dir, *args, **kwargs):
_check_type = kwargs.pop('_check_type', True)
_spec_property_naming = kwargs.pop('_spec_property_naming', False)
_path_to_item = kwargs.pop('_path_to_item', ())
_configuration = kwargs.pop('_configuration', None)
_visited_composed_classes = kwargs.pop('_visited_composed_classes', ())
if args:
raise ApiTypeError(
"Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % (
args,
self.__class__.__name__,
),
path_to_item=_path_to_item,
valid_classes=(self.__class__,),
)
self._data_store = {}
self._check_type = _check_type
self._spec_property_naming = _spec_property_naming
self._path_to_item = _path_to_item
self._configuration = _configuration
self._visited_composed_classes = _visited_composed_classes + (self.__class__,)
self.args = args
self.description = description
self.documentation = documentation
self.entrypoint = entrypoint
self.env = env
self.interface = interface
self.ipc_host = ipc_host
self.linux = linux
self.mounts = mounts
self.network = network
self.pid_host = pid_host
self.propagated_mount = propagated_mount
self.work_dir = work_dir
for var_name, var_value in kwargs.items():
if var_name not in self.attribute_map and \
self._configuration is not None and \
self._configuration.discard_unknown_keys and \
self.additional_properties_type is None:
continue
setattr(self, var_name, var_value)
if var_name in self.read_only_vars:
raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate "
f"class with read only attributes.")
| true | true |
f7f823c3da1af9166c219ff99cae57d0bc609267 | 10,074 | py | Python | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | Jie-Fang/Paddle | 7a9bd0c5d8eff2f4fd6b58ee786c556a07aa23d6 | [
"Apache-2.0"
] | 1 | 2020-11-03T04:57:40.000Z | 2020-11-03T04:57:40.000Z | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | Jie-Fang/Paddle | 7a9bd0c5d8eff2f4fd6b58ee786c556a07aa23d6 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_dist_fleet_base.py | Jie-Fang/Paddle | 7a9bd0c5d8eff2f4fd6b58ee786c556a07aa23d6 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
"""
high level unit test for distribute fleet.
"""
import argparse
import os
import pickle
import subprocess
import sys
import time
import traceback
import math
import collections
import socket
from contextlib import closing
import six
import unittest
import numpy as np
import tempfile
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
RUN_STEP = 5
LEARNING_RATE = 0.01
class FleetDistRunnerBase(object):
"""
run_pserver,run_trainer : after init role, using transpiler split program
net : implment by child class, the network of model
do training : exe run program
"""
def run_pserver(self, args):
if args.role.upper() != "PSERVER":
raise ValueError("args role must be PSERVER")
role = role_maker.UserDefinedRoleMaker(
current_id=args.current_id,
role=role_maker.Role.SERVER,
worker_num=args.trainers,
server_endpoints=args.endpoints.split(","))
fleet.init(role)
strategy = DistributeTranspilerConfig()
strategy.sync_mode = args.sync_mode
strategy.geo_sgd_mode = args.geo_sgd_mode
strategy.geo_sgd_need_push_nums = args.geo_sgd_need_push_nums
avg_cost = self.net()
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
fleet.init_server()
fleet.run_server()
def run_trainer(self, args):
if args.role.upper() != "TRAINER":
raise ValueError("args role must be TRAINER")
role = role_maker.UserDefinedRoleMaker(
current_id=args.current_id,
role=role_maker.Role.WORKER,
worker_num=args.trainers,
server_endpoints=args.endpoints.split(","))
fleet.init(role)
strategy = DistributeTranspilerConfig()
strategy.sync_mode = args.sync_mode
strategy.geo_sgd_mode = args.geo_sgd_mode
strategy.geo_sgd_need_push_nums = args.geo_sgd_need_push_nums
avg_cost = self.net()
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
out = self.do_training(fleet)
def net(self, batch_size=4, lr=0.01):
raise NotImplementedError(
"get_model should be implemented by child classes.")
def do_training(self, fleet):
raise NotImplementedError(
"do_training should be implemented by child classes.")
class TestFleetBase(unittest.TestCase):
"""
start_pserver,start_trainer : add start cmd to test
run_cluster : using multi process to test distribute program
"""
def _setup_config(self):
raise NotImplementedError("tests should have _setup_config implemented")
def setUp(self):
self._sync_mode = True
self._trainers = 2
self._pservers = 2
self._port_set = set()
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
self._geo_sgd = False
self._geo_sgd_need_push_nums = 5
self._setup_config()
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _start_pserver(self, cmd, required_envs):
ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)
ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
ps0_proc = subprocess.Popen(
ps0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps0_pipe,
env=required_envs)
ps1_proc = subprocess.Popen(
ps1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps1_pipe,
env=required_envs)
return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
def _start_trainer(self, cmd, required_envs):
tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)
tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=required_envs)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=required_envs)
return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe
def _run_cluster(self, model, envs):
env = {'CPU_NUM': '1'}
python_path = self._python_interp
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
python_path += " -m coverage run --branch -p"
env.update(envs)
tr_cmd = "{0} {1} --role trainer --endpoints {2} --current_id {{}} --trainers {3}".format(
python_path, model, self._ps_endpoints, self._trainers)
ps_cmd = "{0} {1} --role pserver --endpoints {2} --current_id {{}} --trainers {3}".format(
python_path, model, self._ps_endpoints, self._trainers)
if self._sync_mode:
tr_cmd += " --sync_mode"
ps_cmd += " --sync_mode"
if self._geo_sgd:
tr_cmd += " --geo_sgd_mode {0} --geo_sgd_need_push_nums {1}".format(
self._geo_sgd, self._geo_sgd_need_push_nums)
ps_cmd += " --geo_sgd_mode {0} --geo_sgd_need_push_nums {1}".format(
self._geo_sgd, self._geo_sgd_need_push_nums)
# Run dist train to compare with local results
ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)
# Wait until trainer process terminate
while True:
stat0 = tr0.poll()
time.sleep(0.1)
if stat0 is not None:
break
while True:
stat1 = tr1.poll()
time.sleep(0.1)
if stat1 is not None:
break
tr0_out, tr0_err = tr0.communicate()
tr1_out, tr1_err = tr1.communicate()
# close trainer file
tr0_pipe.close()
tr1_pipe.close()
ps0_pipe.close()
ps1_pipe.close()
ps0.terminate()
ps1.terminate()
'''
with open("/tmp/tr0_out.log", "wb+") as wn:
wn.write(tr0_out)
with open("/tmp/tr1_out.log", "wb+") as wn:
wn.write(tr1_out)
# print server log
'''
# print server log
'''
with open("/tmp/ps0_err.log", "r") as fn:
sys.stderr.write("ps0 stderr: %s\n" % fn.read())
with open("/tmp/ps1_err.log", "r") as fn:
sys.stderr.write("ps1 stderr: %s\n" % fn.read())
'''
# print log
'''
with open("/tmp/tr0_err.log", "r") as fn:
sys.stderr.write('trainer 0 stderr: %s\n' % fn.read())
with open("/tmp/tr1_err.log", "r") as fn:
sys.stderr.write('trainer 1 stderr: %s\n' % fn.read())
'''
return 0, 0
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000", # 5sec to fail fast
"http_proxy": ""
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def runtime_main(test_class):
parser = argparse.ArgumentParser(description='Run Fleet test.')
parser.add_argument(
'--role', type=str, required=True, choices=['pserver', 'trainer'])
parser.add_argument('--endpoints', type=str, required=False, default="")
parser.add_argument('--current_id', type=int, required=False, default=0)
parser.add_argument('--trainers', type=int, required=False, default=1)
parser.add_argument('--sync_mode', action='store_true')
parser.add_argument(
'--geo_sgd_mode', type=bool, required=False, default=False)
parser.add_argument(
'--geo_sgd_need_push_nums', type=int, required=False, default=2)
args = parser.parse_args()
model = test_class()
if args.role == "pserver":
model.run_pserver(args)
else:
model.run_trainer(args)
| 33.468439 | 98 | 0.609986 |
from __future__ import print_function
import argparse
import os
import pickle
import subprocess
import sys
import time
import traceback
import math
import collections
import socket
from contextlib import closing
import six
import unittest
import numpy as np
import tempfile
import paddle.fluid as fluid
import paddle.fluid.incubate.fleet.base.role_maker as role_maker
from paddle.fluid.incubate.fleet.parameter_server.distribute_transpiler import fleet
from paddle.fluid.transpiler.distribute_transpiler import DistributeTranspilerConfig
RUN_STEP = 5
LEARNING_RATE = 0.01
class FleetDistRunnerBase(object):
def run_pserver(self, args):
if args.role.upper() != "PSERVER":
raise ValueError("args role must be PSERVER")
role = role_maker.UserDefinedRoleMaker(
current_id=args.current_id,
role=role_maker.Role.SERVER,
worker_num=args.trainers,
server_endpoints=args.endpoints.split(","))
fleet.init(role)
strategy = DistributeTranspilerConfig()
strategy.sync_mode = args.sync_mode
strategy.geo_sgd_mode = args.geo_sgd_mode
strategy.geo_sgd_need_push_nums = args.geo_sgd_need_push_nums
avg_cost = self.net()
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
fleet.init_server()
fleet.run_server()
def run_trainer(self, args):
if args.role.upper() != "TRAINER":
raise ValueError("args role must be TRAINER")
role = role_maker.UserDefinedRoleMaker(
current_id=args.current_id,
role=role_maker.Role.WORKER,
worker_num=args.trainers,
server_endpoints=args.endpoints.split(","))
fleet.init(role)
strategy = DistributeTranspilerConfig()
strategy.sync_mode = args.sync_mode
strategy.geo_sgd_mode = args.geo_sgd_mode
strategy.geo_sgd_need_push_nums = args.geo_sgd_need_push_nums
avg_cost = self.net()
optimizer = fluid.optimizer.SGD(LEARNING_RATE)
optimizer = fleet.distributed_optimizer(optimizer, strategy)
optimizer.minimize(avg_cost)
out = self.do_training(fleet)
def net(self, batch_size=4, lr=0.01):
raise NotImplementedError(
"get_model should be implemented by child classes.")
def do_training(self, fleet):
raise NotImplementedError(
"do_training should be implemented by child classes.")
class TestFleetBase(unittest.TestCase):
def _setup_config(self):
raise NotImplementedError("tests should have _setup_config implemented")
def setUp(self):
self._sync_mode = True
self._trainers = 2
self._pservers = 2
self._port_set = set()
self._ps_endpoints = "127.0.0.1:%s,127.0.0.1:%s" % (
self._find_free_port(), self._find_free_port())
self._python_interp = sys.executable
self._geo_sgd = False
self._geo_sgd_need_push_nums = 5
self._setup_config()
def _find_free_port(self):
def __free_port():
with closing(socket.socket(socket.AF_INET,
socket.SOCK_STREAM)) as s:
s.bind(('', 0))
return s.getsockname()[1]
while True:
port = __free_port()
if port not in self._port_set:
self._port_set.add(port)
return port
def _start_pserver(self, cmd, required_envs):
ps0_cmd, ps1_cmd = cmd.format(0), cmd.format(1)
ps0_pipe = open(tempfile.gettempdir() + "/ps0_err.log", "wb+")
ps1_pipe = open(tempfile.gettempdir() + "/ps1_err.log", "wb+")
ps0_proc = subprocess.Popen(
ps0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps0_pipe,
env=required_envs)
ps1_proc = subprocess.Popen(
ps1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=ps1_pipe,
env=required_envs)
return ps0_proc, ps1_proc, ps0_pipe, ps1_pipe
def _start_trainer(self, cmd, required_envs):
tr0_cmd, tr1_cmd = cmd.format(0), cmd.format(1)
tr0_pipe = open(tempfile.gettempdir() + "/tr0_err.log", "wb+")
tr1_pipe = open(tempfile.gettempdir() + "/tr1_err.log", "wb+")
tr0_proc = subprocess.Popen(
tr0_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr0_pipe,
env=required_envs)
tr1_proc = subprocess.Popen(
tr1_cmd.strip().split(" "),
stdout=subprocess.PIPE,
stderr=tr1_pipe,
env=required_envs)
return tr0_proc, tr1_proc, tr0_pipe, tr1_pipe
def _run_cluster(self, model, envs):
env = {'CPU_NUM': '1'}
python_path = self._python_interp
if os.getenv('WITH_COVERAGE', 'OFF') == 'ON':
envs['COVERAGE_FILE'] = os.getenv('COVERAGE_FILE', '')
python_path += " -m coverage run --branch -p"
env.update(envs)
tr_cmd = "{0} {1} --role trainer --endpoints {2} --current_id {{}} --trainers {3}".format(
python_path, model, self._ps_endpoints, self._trainers)
ps_cmd = "{0} {1} --role pserver --endpoints {2} --current_id {{}} --trainers {3}".format(
python_path, model, self._ps_endpoints, self._trainers)
if self._sync_mode:
tr_cmd += " --sync_mode"
ps_cmd += " --sync_mode"
if self._geo_sgd:
tr_cmd += " --geo_sgd_mode {0} --geo_sgd_need_push_nums {1}".format(
self._geo_sgd, self._geo_sgd_need_push_nums)
ps_cmd += " --geo_sgd_mode {0} --geo_sgd_need_push_nums {1}".format(
self._geo_sgd, self._geo_sgd_need_push_nums)
ps0, ps1, ps0_pipe, ps1_pipe = self._start_pserver(ps_cmd, env)
tr0, tr1, tr0_pipe, tr1_pipe = self._start_trainer(tr_cmd, env)
while True:
stat0 = tr0.poll()
time.sleep(0.1)
if stat0 is not None:
break
while True:
stat1 = tr1.poll()
time.sleep(0.1)
if stat1 is not None:
break
tr0_out, tr0_err = tr0.communicate()
tr1_out, tr1_err = tr1.communicate()
tr0_pipe.close()
tr1_pipe.close()
ps0_pipe.close()
ps1_pipe.close()
ps0.terminate()
ps1.terminate()
return 0, 0
def check_with_place(self,
model_file,
delta=1e-3,
check_error_log=False,
need_envs={}):
required_envs = {
"PATH": os.getenv("PATH", ""),
"PYTHONPATH": os.getenv("PYTHONPATH", ""),
"LD_LIBRARY_PATH": os.getenv("LD_LIBRARY_PATH", ""),
"FLAGS_rpc_deadline": "5000",
"http_proxy": ""
}
required_envs.update(need_envs)
if check_error_log:
required_envs["GLOG_v"] = "3"
required_envs["GLOG_logtostderr"] = "1"
tr0_losses, tr1_losses = self._run_cluster(model_file, required_envs)
def runtime_main(test_class):
parser = argparse.ArgumentParser(description='Run Fleet test.')
parser.add_argument(
'--role', type=str, required=True, choices=['pserver', 'trainer'])
parser.add_argument('--endpoints', type=str, required=False, default="")
parser.add_argument('--current_id', type=int, required=False, default=0)
parser.add_argument('--trainers', type=int, required=False, default=1)
parser.add_argument('--sync_mode', action='store_true')
parser.add_argument(
'--geo_sgd_mode', type=bool, required=False, default=False)
parser.add_argument(
'--geo_sgd_need_push_nums', type=int, required=False, default=2)
args = parser.parse_args()
model = test_class()
if args.role == "pserver":
model.run_pserver(args)
else:
model.run_trainer(args)
| true | true |
f7f824352e09942c4e85df299fe28abd9fed10a4 | 18,424 | py | Python | pandaf/002/data.py | cpausmit/Kraken | 54a5b69d274f928a5e53475b9c281815fadfc139 | [
"MIT"
] | null | null | null | pandaf/002/data.py | cpausmit/Kraken | 54a5b69d274f928a5e53475b9c281815fadfc139 | [
"MIT"
] | null | null | null | pandaf/002/data.py | cpausmit/Kraken | 54a5b69d274f928a5e53475b9c281815fadfc139 | [
"MIT"
] | 2 | 2017-03-22T17:33:38.000Z | 2017-09-29T02:38:24.000Z | from FWCore.ParameterSet.VarParsing import VarParsing
options =VarParsing('analysis')
options.register('config', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Single-switch config. Values: 03Feb2017, 23Sep2016, Spring16, Summer16')
options.register('globaltag', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Global tag')
options.register('connect', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Globaltag connect')
options.register('lumilist', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Good lumi list JSON')
options.register('isData', default = False, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'True if running on Data, False if running on MC')
options.register('useTrigger', default = True, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'Fill trigger information')
options.register('printLevel', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Debug level of the ntuplizer')
options.register('skipEvents', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Skip first events')
options._tags.pop('numEvent%d')
options._tagOrder.remove('numEvent%d')
options.parseArguments()
options.config = '23Sep2016'
## options.inputFiles = ['XX-LFN-XX']
## options.outputFile = 'kraken-output-file-tmp_000.root'
jetMETReco = True
muEGFixed = False
egmSmearingType = 'Moriond2017_JEC'
if options.config == '03Feb2017':
jetMETReco = False
muEGFixed = True
options.isData = True
options.globaltag = '80X_dataRun2_2016SeptRepro_v7'
elif options.config == '23Sep2016':
options.isData = True
options.globaltag = '80X_dataRun2_2016SeptRepro_v7'
elif options.config == 'Spring16':
options.isData = False
options.globaltag = '80X_mcRun2_asymptotic_2016_v3'
elif options.config == 'Summer16':
options.isData = False
options.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'
elif options.config:
raise RuntimeError('Unknown config ' + options.config)
import FWCore.ParameterSet.Config as cms
process = cms.Process('NTUPLES')
process.schedule = cms.Schedule()
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100
for cat in ['PandaProducer', 'JetPtMismatchAtLowPt', 'JetPtMismatch', 'NullTransverseMomentum', 'MissingJetConstituent']:
process.MessageLogger.categories.append(cat)
setattr(process.MessageLogger.cerr, cat, cms.untracked.PSet(limit = cms.untracked.int32(10)))
############
## SOURCE ##
############
### INPUT FILES
process.source = cms.Source('PoolSource',
skipEvents = cms.untracked.uint32(options.skipEvents),
fileNames = cms.untracked.vstring(options.inputFiles)
)
### NUMBER OF EVENTS
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(options.maxEvents)
)
### LUMI MASK
if options.lumilist != '':
import FWCore.PythonUtilities.LumiList as LumiList
process.source.lumisToProcess = LumiList.LumiList(filename = options.lumilist).getVLuminosityBlockRange()
##############
## SERVICES ##
##############
process.load('Configuration.Geometry.GeometryIdeal_cff')
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
if options.globaltag == '':
if options.isData:
process.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v7'
else:
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'
else:
process.GlobalTag.globaltag = options.globaltag
process.RandomNumberGeneratorService.panda = cms.PSet(
initialSeed = cms.untracked.uint32(1234567),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.slimmedElectrons = cms.PSet(
initialSeed = cms.untracked.uint32(89101112),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.slimmedPhotons = cms.PSet(
initialSeed = cms.untracked.uint32(13141516),
engineName = cms.untracked.string('TRandom3')
)
#############################
## RECO SEQUENCE AND SKIMS ##
#############################
import PandaProd.Producer.utils.egmidconf as egmidconf
### EGAMMA CORRECTIONS
from EgammaAnalysis.ElectronTools.regressionApplication_cff import slimmedElectrons as correctedElectrons
from EgammaAnalysis.ElectronTools.regressionApplication_cff import slimmedPhotons as correctedPhotons
from EgammaAnalysis.ElectronTools.regressionWeights_cfi import regressionWeights
regressionWeights(process)
process.correctedElectrons = correctedElectrons
process.correctedPhotons = correctedPhotons
process.selectedElectrons = cms.EDFilter('PATElectronSelector',
src = cms.InputTag('correctedElectrons'),
cut = cms.string('pt > 5 && abs(eta) < 2.5')
)
from PandaProd.Producer.utils.calibratedEgamma_cfi import calibratedPatElectrons, calibratedPatPhotons
process.slimmedElectrons = calibratedPatElectrons.clone(
electrons = 'selectedElectrons',
isMC = (not options.isData),
correctionFile = egmidconf.electronSmearingData[egmSmearingType]
)
process.slimmedPhotons = calibratedPatPhotons.clone(
photons = 'correctedPhotons',
isMC = (not options.isData),
correctionFile = egmidconf.photonSmearingData[egmSmearingType]
)
egmCorrectionSequence = cms.Sequence(
process.correctedElectrons +
process.correctedPhotons +
process.selectedElectrons +
process.slimmedElectrons +
process.slimmedPhotons
)
### PUPPI
# 80X does not contain the latest & greatest PuppiPhoton; need to rerun for all config
from PhysicsTools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
# Creates process.puppiMETSequence which includes 'puppi' and 'puppiForMET' (= EDProducer('PuppiPhoton'))
# *UGLY* also runs switchOnVIDPhotonIdProducer and sets up photon id Spring16_V2p2 internally
# which loads photonIDValueMapProducer and egmPhotonIDs
makePuppiesFromMiniAOD(process, createScheduledSequence = True)
# Just renaming
puppiSequence = process.puppiMETSequence
### PUPPI JET
from PandaProd.Producer.utils.makeJets_cff import makeJets
puppiJetSequence = makeJets(process, options.isData, 'AK4PFPuppi', 'puppi', 'Puppi')
### PUPPI MET
from PhysicsTools.PatUtils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
# Creates process.fullPatMetSequencePuppi
# With metType = 'Puppi', slimmedJetsPuppi is automatically selected as the jet source for type 1
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
metType = 'Puppi',
pfCandColl = 'puppiForMET',
recoMetFromPFCs = True,
jetFlavor = 'AK4PFPuppi',
postfix = 'Puppi'
)
# There is a bug in a function used by runMetCorAndUncFromMiniAOD (PhysicsTools.PatAlgos.tools.removeIfInSequence)
# The following module is supposed to be removed from the sequence but is not
# The bug appears when we don't call the no-postfix version of runMetCor.. first
process.fullPatMetSequencePuppi.remove(process.selectedPatJetsForMetT1T2CorrPuppi)
### EGAMMA ID
from PhysicsTools.SelectorUtils.tools.vid_id_tools import setupAllVIDIdsInModule, setupVIDElectronSelection, switchOnVIDElectronIdProducer, DataFormat
# Loads egmGsfElectronIDs
switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD)
setupAllVIDIdsInModule(process, 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Summer16_80X_V1_cff', setupVIDElectronSelection)
setupAllVIDIdsInModule(process, 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronHLTPreselecition_Summer16_V1_cff', setupVIDElectronSelection)
# original has @skipCurrentProcess
process.photonIDValueMapProducer.srcMiniAOD = 'slimmedPhotons'
process.load('PandaProd.Auxiliary.WorstIsolationProducer_cfi')
egmIdSequence = cms.Sequence(
process.photonIDValueMapProducer +
process.egmPhotonIDs +
process.egmGsfElectronIDs +
process.worstIsolationProducer
)
### QG TAGGING
process.load('RecoJets.JetProducers.QGTagger_cfi')
process.QGTagger.srcJets = 'slimmedJets'
### FAT JETS
from PandaProd.Producer.utils.makeFatJets_cff import initFatJets, makeFatJets
# pfCHS set up here
fatJetInitSequence = initFatJets(process, options.isData, ['AK8', 'CA15'])
ak8CHSSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFchs',
candidates = 'pfCHS'
)
ak8PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFPuppi',
candidates = 'puppi'
)
ca15CHSSequence = makeFatJets(
process,
isData = options.isData,
label = 'CA15PFchs',
candidates = 'pfCHS'
)
ca15PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'CA15PFPuppi',
candidates = 'puppi'
)
from PandaProd.Producer.utils.setupBTag import initBTag, setupDoubleBTag
initBTag(process, '', 'packedPFCandidates', 'offlineSlimmedPrimaryVertices')
ak8CHSDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsAK8PFchs', 'AK8PFchs', '', 'ak8')
ak8PuppiDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsAK8PFPuppi', 'AK8PFPuppi', '', 'ak8')
ca15CHSDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsCA15PFchs', 'CA15PFchs', '', 'ca15')
ca15PuppiDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsCA15PFPuppi', 'CA15PFPuppi', '', 'ca15')
fatJetSequence = cms.Sequence(
fatJetInitSequence +
ak8CHSSequence +
ak8PuppiSequence +
ca15CHSSequence +
ca15PuppiSequence +
ak8CHSDoubleBTagSequence +
ak8PuppiDoubleBTagSequence +
ca15CHSDoubleBTagSequence +
ca15PuppiDoubleBTagSequence
)
### GEN JET FLAVORS
if not options.isData:
process.load('PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi')
from PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi import ak4JetFlavourInfos
process.selectedHadronsAndPartons.particles = 'prunedGenParticles'
process.ak4GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'slimmedGenJets'
)
process.ak8GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuAK8',
rParam = 0.8
)
process.ca15GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuCA15',
jetAlgorithm = 'CambridgeAachen',
rParam = 1.5
)
genJetFlavorSequence = cms.Sequence(
process.selectedHadronsAndPartons +
process.ak4GenJetFlavourInfos +
process.ak8GenJetFlavourInfos +
process.ca15GenJetFlavourInfos
)
else:
genJetFlavorSequence = cms.Sequence()
### MONOX FILTER
process.load('PandaProd.Filters.MonoXFilter_cfi')
process.MonoXFilter.taggingMode = True
### RECO PATH
process.reco = cms.Path(
egmCorrectionSequence +
egmIdSequence +
puppiSequence +
puppiJetSequence +
process.fullPatMetSequencePuppi +
process.MonoXFilter +
process.QGTagger +
fatJetSequence +
genJetFlavorSequence
)
if muEGFixed:
### RE-EG-CORRECT PUPPI MET
# THIS FUNCTION IS BUGGY
# from PhysicsTools.PatUtils.tools.eGammaCorrection import eGammaCorrection
from PandaProd.Producer.utils.eGammaCorrection import eGammaCorrection
metCollections = [
'patPFMetRaw',
'patPFMetT1',
'patPFMetT0pcT1',
'patPFMetT1Smear',
'patPFMetT1Txy',
'patPFMetTxy'
]
variations = ['Up', 'Down']
for var in variations:
metCollections.extend([
'patPFMetT1JetEn' + var,
'patPFMetT1JetRes' + var,
'patPFMetT1SmearJetRes' + var,
'patPFMetT1ElectronEn' + var,
'patPFMetT1PhotonEn' + var,
'patPFMetT1MuonEn' + var,
'patPFMetT1TauEn' + var,
'patPFMetT1UnclusteredEn' + var,
])
# Extracts correction from the differences between pre- and post-GSFix e/g collections
# and inserts them into various corrected MET objects
puppiMETEGCorrSequence = eGammaCorrection(
process,
electronCollection = 'slimmedElectronsBeforeGSFix',
photonCollection = 'slimmedPhotonsBeforeGSFix',
corElectronCollection = 'slimmedElectrons',
corPhotonCollection = 'slimmedPhotons',
metCollections = metCollections,
pfCandMatching = False,
pfCandidateCollection = 'packedPFCandidates',
postfix = 'Puppi'
)
process.slimmedMETsPuppi.rawVariation = 'patPFMetRawPuppi'
# insert right after pat puppi met production
process.fullPatMetSequencePuppi.insert(process.fullPatMetSequencePuppi.index(process.patMetModuleSequencePuppi) + 1, puppiMETEGCorrSequence)
else:
### PF CLEANING (BAD MUON REMOVAL)
# Replace all references made so far to packedPFCandidates with cleanMuonsPFCandidates
from PhysicsTools.PatAlgos.tools.helpers import MassSearchReplaceAnyInputTagVisitor
replacePFCandidates = MassSearchReplaceAnyInputTagVisitor('packedPFCandidates', 'cleanMuonsPFCandidates', verbose = False)
for everywhere in [process.producers, process.filters, process.analyzers, process.psets, process.vpsets]:
for name, obj in everywhere.iteritems():
replacePFCandidates.doIt(obj, name)
from PhysicsTools.PatUtils.tools.muonRecoMitigation import muonRecoMitigation
# Adds badGlobalMuonTaggerMAOD, cloneGlobalMuonTaggerMAOD, badMuons, and cleanMuonsPFCandidates
muonRecoMitigation(
process,
pfCandCollection = 'packedPFCandidates',
runOnMiniAOD = True
)
# And of course this is against the convention (MET filters are true if event is *good*) but that's what the REMINIAOD developers chose.
process.Flag_badMuons = cms.Path(process.badGlobalMuonTaggerMAOD)
process.Flag_duplicateMuons = cms.Path(process.cloneGlobalMuonTaggerMAOD)
process.schedule += [process.Flag_badMuons, process.Flag_duplicateMuons]
pfCleaningSequence = cms.Sequence(
process.badMuons +
process.cleanMuonsPFCandidates
)
process.reco.insert(0, pfCleaningSequence)
if jetMETReco:
### JET RE-CORRECTION
from PhysicsTools.PatAlgos.producersLayer1.jetUpdater_cff import updatedPatJetCorrFactors, updatedPatJets
jecLevels= ['L1FastJet', 'L2Relative', 'L3Absolute']
if options.isData:
jecLevels.append('L2L3Residual')
process.updatedPatJetCorrFactors = updatedPatJetCorrFactors.clone(
src = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
levels = cms.vstring(*jecLevels),
)
process.slimmedJets = updatedPatJets.clone(
jetSource = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
addJetCorrFactors = cms.bool(True),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag('updatedPatJetCorrFactors')),
addBTagInfo = cms.bool(False),
addDiscriminators = cms.bool(False)
)
jetRecorrectionSequence = cms.Sequence(
process.updatedPatJetCorrFactors +
process.slimmedJets
)
process.reco.insert(process.reco.index(process.QGTagger), jetRecorrectionSequence)
### MET
# Collections naming aligned with 03Feb2017 reminiaod
# Creates process.fullPatMetSequenceUncorrected which includes slimmedMETsUncorrected
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
postfix = 'Uncorrected'
)
# See note on puppi met
process.fullPatMetSequenceUncorrected.remove(process.selectedPatJetsForMetT1T2CorrUncorrected)
# Creates process.fullPatMetSequenceMuEGClean which includes slimmedMETsMuEGClean
# Postfix MuEGClean is just for convenience - there is no EG cleaning actually applied
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
pfCandColl = 'cleanMuonsPFCandidates',
recoMetFromPFCs = True,
postfix = 'MuEGClean'
)
# See note on puppi met
process.fullPatMetSequenceMuEGClean.remove(process.selectedPatJetsForMetT1T2CorrMuEGClean)
process.reco += process.fullPatMetSequenceUncorrected
process.reco += process.fullPatMetSequenceMuEGClean
# Repeated calls to runMetCorAnd.. overwrites the MET source of patCaloMet
process.patCaloMet.metSource = 'metrawCaloPuppi'
#############
## NTULPES ##
#############
process.load('PandaProd.Producer.panda_cfi')
process.panda.isRealData = options.isData
process.panda.useTrigger = options.useTrigger
#process.panda.SelectEvents = ['reco'] # no skim
if options.isData:
process.panda.fillers.partons.enabled = False
process.panda.fillers.genParticles.enabled = False
process.panda.fillers.ak4GenJets.enabled = False
process.panda.fillers.ak8GenJets.enabled = False
process.panda.fillers.ca15GenJets.enabled = False
if not options.useTrigger:
process.panda.fillers.hlt.enabled = False
process.panda.fillers.pfMet.met = 'slimmedMETsMuEGClean'
process.panda.fillers.metNoFix = process.panda.fillers.puppiMet.clone(
met = 'slimmedMETsUncorrected'
)
if muEGFixed:
process.panda.fillers.electrons.gsUnfixedElectrons = cms.untracked.string('slimmedElectronsBeforeGSFix')
process.panda.fillers.photons.gsUnfixedPhotons = cms.untracked.string('slimmedPhotonsBeforeGSFix')
process.panda.fillers.metMuOnlyFix = process.panda.fillers.puppiMet.clone(
met = 'slimmedMETs'
)
process.panda.fillers.metFilters.dupECALClusters = cms.untracked.string('particleFlowEGammaGSFixed:dupECALClusters')
process.panda.fillers.metFilters.unfixedECALHits = cms.untracked.string('ecalMultiAndGSGlobalRecHitEB:hitsNotReplaced')
process.panda.outputFile = options.outputFile
process.panda.printLevel = options.printLevel
process.ntuples = cms.EndPath(process.panda)
process.schedule += [process.reco, process.ntuples]
if options.connect:
if options.connect == 'mit':
options.connect = 'frontier://(proxyurl=http://squid.cmsaf.mit.edu:3128)(proxyurl=http://squid1.cmsaf.mit.edu:3128)(proxyurl=http://squid2.cmsaf.mit.edu:3128)(serverurl=http://cmsfrontier.cern.ch:8000/FrontierProd)/CMS_CONDITIONS'
process.GlobalTag.connect = options.connect
for toGet in process.GlobalTag.toGet:
toGet.connect = options.connect
| 37.676892 | 238 | 0.753419 | from FWCore.ParameterSet.VarParsing import VarParsing
options =VarParsing('analysis')
options.register('config', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Single-switch config. Values: 03Feb2017, 23Sep2016, Spring16, Summer16')
options.register('globaltag', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Global tag')
options.register('connect', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Globaltag connect')
options.register('lumilist', default = '', mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.string, info = 'Good lumi list JSON')
options.register('isData', default = False, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'True if running on Data, False if running on MC')
options.register('useTrigger', default = True, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.bool, info = 'Fill trigger information')
options.register('printLevel', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Debug level of the ntuplizer')
options.register('skipEvents', default = 0, mult = VarParsing.multiplicity.singleton, mytype = VarParsing.varType.int, info = 'Skip first events')
options._tags.pop('numEvent%d')
options._tagOrder.remove('numEvent%d')
options.parseArguments()
options.config = '23Sep2016'
== '03Feb2017':
jetMETReco = False
muEGFixed = True
options.isData = True
options.globaltag = '80X_dataRun2_2016SeptRepro_v7'
elif options.config == '23Sep2016':
options.isData = True
options.globaltag = '80X_dataRun2_2016SeptRepro_v7'
elif options.config == 'Spring16':
options.isData = False
options.globaltag = '80X_mcRun2_asymptotic_2016_v3'
elif options.config == 'Summer16':
options.isData = False
options.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'
elif options.config:
raise RuntimeError('Unknown config ' + options.config)
import FWCore.ParameterSet.Config as cms
process = cms.Process('NTUPLES')
process.schedule = cms.Schedule()
process.load('FWCore.MessageService.MessageLogger_cfi')
process.MessageLogger.cerr.FwkReport.reportEvery = 100
for cat in ['PandaProducer', 'JetPtMismatchAtLowPt', 'JetPtMismatch', 'NullTransverseMomentum', 'MissingJetConstituent']:
process.MessageLogger.categories.append(cat)
setattr(process.MessageLogger.cerr, cat, cms.untracked.PSet(limit = cms.untracked.int32(10)))
input = cms.untracked.int32(options.maxEvents)
)
'':
import FWCore.PythonUtilities.LumiList as LumiList
process.source.lumisToProcess = LumiList.LumiList(filename = options.lumilist).getVLuminosityBlockRange()
load('Configuration.StandardSequences.FrontierConditions_GlobalTag_condDBv2_cff')
if options.globaltag == '':
if options.isData:
process.GlobalTag.globaltag = '80X_dataRun2_2016SeptRepro_v7'
else:
process.GlobalTag.globaltag = '80X_mcRun2_asymptotic_2016_TrancheIV_v8'
else:
process.GlobalTag.globaltag = options.globaltag
process.RandomNumberGeneratorService.panda = cms.PSet(
initialSeed = cms.untracked.uint32(1234567),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.slimmedElectrons = cms.PSet(
initialSeed = cms.untracked.uint32(89101112),
engineName = cms.untracked.string('TRandom3')
)
process.RandomNumberGeneratorService.slimmedPhotons = cms.PSet(
initialSeed = cms.untracked.uint32(13141516),
engineName = cms.untracked.string('TRandom3')
)
egmidconf.electronSmearingData[egmSmearingType]
)
process.slimmedPhotons = calibratedPatPhotons.clone(
photons = 'correctedPhotons',
isMC = (not options.isData),
correctionFile = egmidconf.photonSmearingData[egmSmearingType]
)
egmCorrectionSequence = cms.Sequence(
process.correctedElectrons +
process.correctedPhotons +
process.selectedElectrons +
process.slimmedElectrons +
process.slimmedPhotons
)
Tools.PatAlgos.slimming.puppiForMET_cff import makePuppiesFromMiniAOD
makePuppiesFromMiniAOD(process, createScheduledSequence = True)
puppiSequence = process.puppiMETSequence
er.utils.makeJets_cff import makeJets
puppiJetSequence = makeJets(process, options.isData, 'AK4PFPuppi', 'puppi', 'Puppi')
Utils.tools.runMETCorrectionsAndUncertainties import runMetCorAndUncFromMiniAOD
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
metType = 'Puppi',
pfCandColl = 'puppiForMET',
recoMetFromPFCs = True,
jetFlavor = 'AK4PFPuppi',
postfix = 'Puppi'
)
process.fullPatMetSequencePuppi.remove(process.selectedPatJetsForMetT1T2CorrPuppi)
### EGAMMA ID
from PhysicsTools.SelectorUtils.tools.vid_id_tools import setupAllVIDIdsInModule, setupVIDElectronSelection, switchOnVIDElectronIdProducer, DataFormat
# Loads egmGsfElectronIDs
switchOnVIDElectronIdProducer(process, DataFormat.MiniAOD)
setupAllVIDIdsInModule(process, 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronID_Summer16_80X_V1_cff', setupVIDElectronSelection)
setupAllVIDIdsInModule(process, 'RecoEgamma.ElectronIdentification.Identification.cutBasedElectronHLTPreselecition_Summer16_V1_cff', setupVIDElectronSelection)
# original has @skipCurrentProcess
process.photonIDValueMapProducer.srcMiniAOD = 'slimmedPhotons'
process.load('PandaProd.Auxiliary.WorstIsolationProducer_cfi')
egmIdSequence = cms.Sequence(
process.photonIDValueMapProducer +
process.egmPhotonIDs +
process.egmGsfElectronIDs +
process.worstIsolationProducer
)
### QG TAGGING
process.load('RecoJets.JetProducers.QGTagger_cfi')
process.QGTagger.srcJets = 'slimmedJets'
### FAT JETS
from PandaProd.Producer.utils.makeFatJets_cff import initFatJets, makeFatJets
# pfCHS set up here
fatJetInitSequence = initFatJets(process, options.isData, ['AK8', 'CA15'])
ak8CHSSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFchs',
candidates = 'pfCHS'
)
ak8PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'AK8PFPuppi',
candidates = 'puppi'
)
ca15CHSSequence = makeFatJets(
process,
isData = options.isData,
label = 'CA15PFchs',
candidates = 'pfCHS'
)
ca15PuppiSequence = makeFatJets(
process,
isData = options.isData,
label = 'CA15PFPuppi',
candidates = 'puppi'
)
from PandaProd.Producer.utils.setupBTag import initBTag, setupDoubleBTag
initBTag(process, '', 'packedPFCandidates', 'offlineSlimmedPrimaryVertices')
ak8CHSDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsAK8PFchs', 'AK8PFchs', '', 'ak8')
ak8PuppiDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsAK8PFPuppi', 'AK8PFPuppi', '', 'ak8')
ca15CHSDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsCA15PFchs', 'CA15PFchs', '', 'ca15')
ca15PuppiDoubleBTagSequence = setupDoubleBTag(process, 'packedPatJetsCA15PFPuppi', 'CA15PFPuppi', '', 'ca15')
fatJetSequence = cms.Sequence(
fatJetInitSequence +
ak8CHSSequence +
ak8PuppiSequence +
ca15CHSSequence +
ca15PuppiSequence +
ak8CHSDoubleBTagSequence +
ak8PuppiDoubleBTagSequence +
ca15CHSDoubleBTagSequence +
ca15PuppiDoubleBTagSequence
)
### GEN JET FLAVORS
if not options.isData:
process.load('PhysicsTools.JetMCAlgos.HadronAndPartonSelector_cfi')
from PhysicsTools.JetMCAlgos.AK4PFJetsMCFlavourInfos_cfi import ak4JetFlavourInfos
process.selectedHadronsAndPartons.particles = 'prunedGenParticles'
process.ak4GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'slimmedGenJets'
)
process.ak8GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuAK8',
rParam = 0.8
)
process.ca15GenJetFlavourInfos = ak4JetFlavourInfos.clone(
jets = 'genJetsNoNuCA15',
jetAlgorithm = 'CambridgeAachen',
rParam = 1.5
)
genJetFlavorSequence = cms.Sequence(
process.selectedHadronsAndPartons +
process.ak4GenJetFlavourInfos +
process.ak8GenJetFlavourInfos +
process.ca15GenJetFlavourInfos
)
else:
genJetFlavorSequence = cms.Sequence()
### MONOX FILTER
process.load('PandaProd.Filters.MonoXFilter_cfi')
process.MonoXFilter.taggingMode = True
### RECO PATH
process.reco = cms.Path(
egmCorrectionSequence +
egmIdSequence +
puppiSequence +
puppiJetSequence +
process.fullPatMetSequencePuppi +
process.MonoXFilter +
process.QGTagger +
fatJetSequence +
genJetFlavorSequence
)
if muEGFixed:
### RE-EG-CORRECT PUPPI MET
# THIS FUNCTION IS BUGGY
# from PhysicsTools.PatUtils.tools.eGammaCorrection import eGammaCorrection
from PandaProd.Producer.utils.eGammaCorrection import eGammaCorrection
metCollections = [
'patPFMetRaw',
'patPFMetT1',
'patPFMetT0pcT1',
'patPFMetT1Smear',
'patPFMetT1Txy',
'patPFMetTxy'
]
variations = ['Up', 'Down']
for var in variations:
metCollections.extend([
'patPFMetT1JetEn' + var,
'patPFMetT1JetRes' + var,
'patPFMetT1SmearJetRes' + var,
'patPFMetT1ElectronEn' + var,
'patPFMetT1PhotonEn' + var,
'patPFMetT1MuonEn' + var,
'patPFMetT1TauEn' + var,
'patPFMetT1UnclusteredEn' + var,
])
# Extracts correction from the differences between pre- and post-GSFix e/g collections
# and inserts them into various corrected MET objects
puppiMETEGCorrSequence = eGammaCorrection(
process,
electronCollection = 'slimmedElectronsBeforeGSFix',
photonCollection = 'slimmedPhotonsBeforeGSFix',
corElectronCollection = 'slimmedElectrons',
corPhotonCollection = 'slimmedPhotons',
metCollections = metCollections,
pfCandMatching = False,
pfCandidateCollection = 'packedPFCandidates',
postfix = 'Puppi'
)
process.slimmedMETsPuppi.rawVariation = 'patPFMetRawPuppi'
# insert right after pat puppi met production
process.fullPatMetSequencePuppi.insert(process.fullPatMetSequencePuppi.index(process.patMetModuleSequencePuppi) + 1, puppiMETEGCorrSequence)
else:
### PF CLEANING (BAD MUON REMOVAL)
# Replace all references made so far to packedPFCandidates with cleanMuonsPFCandidates
from PhysicsTools.PatAlgos.tools.helpers import MassSearchReplaceAnyInputTagVisitor
replacePFCandidates = MassSearchReplaceAnyInputTagVisitor('packedPFCandidates', 'cleanMuonsPFCandidates', verbose = False)
for everywhere in [process.producers, process.filters, process.analyzers, process.psets, process.vpsets]:
for name, obj in everywhere.iteritems():
replacePFCandidates.doIt(obj, name)
from PhysicsTools.PatUtils.tools.muonRecoMitigation import muonRecoMitigation
# Adds badGlobalMuonTaggerMAOD, cloneGlobalMuonTaggerMAOD, badMuons, and cleanMuonsPFCandidates
muonRecoMitigation(
process,
pfCandCollection = 'packedPFCandidates',
runOnMiniAOD = True
)
# And of course this is against the convention (MET filters are true if event is *good*) but that's what the REMINIAOD developers chose.
process.Flag_badMuons = cms.Path(process.badGlobalMuonTaggerMAOD)
process.Flag_duplicateMuons = cms.Path(process.cloneGlobalMuonTaggerMAOD)
process.schedule += [process.Flag_badMuons, process.Flag_duplicateMuons]
pfCleaningSequence = cms.Sequence(
process.badMuons +
process.cleanMuonsPFCandidates
)
process.reco.insert(0, pfCleaningSequence)
if jetMETReco:
ersLayer1.jetUpdater_cff import updatedPatJetCorrFactors, updatedPatJets
jecLevels= ['L1FastJet', 'L2Relative', 'L3Absolute']
if options.isData:
jecLevels.append('L2L3Residual')
process.updatedPatJetCorrFactors = updatedPatJetCorrFactors.clone(
src = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
levels = cms.vstring(*jecLevels),
)
process.slimmedJets = updatedPatJets.clone(
jetSource = cms.InputTag('slimmedJets', '', cms.InputTag.skipCurrentProcess()),
addJetCorrFactors = cms.bool(True),
jetCorrFactorsSource = cms.VInputTag(cms.InputTag('updatedPatJetCorrFactors')),
addBTagInfo = cms.bool(False),
addDiscriminators = cms.bool(False)
)
jetRecorrectionSequence = cms.Sequence(
process.updatedPatJetCorrFactors +
process.slimmedJets
)
process.reco.insert(process.reco.index(process.QGTagger), jetRecorrectionSequence)
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
postfix = 'Uncorrected'
)
process.fullPatMetSequenceUncorrected.remove(process.selectedPatJetsForMetT1T2CorrUncorrected)
runMetCorAndUncFromMiniAOD(
process,
isData = options.isData,
pfCandColl = 'cleanMuonsPFCandidates',
recoMetFromPFCs = True,
postfix = 'MuEGClean'
)
process.fullPatMetSequenceMuEGClean.remove(process.selectedPatJetsForMetT1T2CorrMuEGClean)
process.reco += process.fullPatMetSequenceUncorrected
process.reco += process.fullPatMetSequenceMuEGClean
process.patCaloMet.metSource = 'metrawCaloPuppi'
rs.partons.enabled = False
process.panda.fillers.genParticles.enabled = False
process.panda.fillers.ak4GenJets.enabled = False
process.panda.fillers.ak8GenJets.enabled = False
process.panda.fillers.ca15GenJets.enabled = False
if not options.useTrigger:
process.panda.fillers.hlt.enabled = False
process.panda.fillers.pfMet.met = 'slimmedMETsMuEGClean'
process.panda.fillers.metNoFix = process.panda.fillers.puppiMet.clone(
met = 'slimmedMETsUncorrected'
)
if muEGFixed:
process.panda.fillers.electrons.gsUnfixedElectrons = cms.untracked.string('slimmedElectronsBeforeGSFix')
process.panda.fillers.photons.gsUnfixedPhotons = cms.untracked.string('slimmedPhotonsBeforeGSFix')
process.panda.fillers.metMuOnlyFix = process.panda.fillers.puppiMet.clone(
met = 'slimmedMETs'
)
process.panda.fillers.metFilters.dupECALClusters = cms.untracked.string('particleFlowEGammaGSFixed:dupECALClusters')
process.panda.fillers.metFilters.unfixedECALHits = cms.untracked.string('ecalMultiAndGSGlobalRecHitEB:hitsNotReplaced')
process.panda.outputFile = options.outputFile
process.panda.printLevel = options.printLevel
process.ntuples = cms.EndPath(process.panda)
process.schedule += [process.reco, process.ntuples]
if options.connect:
if options.connect == 'mit':
options.connect = 'frontier://(proxyurl=http://squid.cmsaf.mit.edu:3128)(proxyurl=http://squid1.cmsaf.mit.edu:3128)(proxyurl=http://squid2.cmsaf.mit.edu:3128)(serverurl=http://cmsfrontier.cern.ch:8000/FrontierProd)/CMS_CONDITIONS'
process.GlobalTag.connect = options.connect
for toGet in process.GlobalTag.toGet:
toGet.connect = options.connect
| true | true |
f7f8246f648f6f813aff23fe3c9f8c23832e677e | 248 | py | Python | checker.py | yhzmiao/Data-Aquisition | 6c14e6b5684d3d91a96e4f16ed22c920d0fa1424 | [
"MIT"
] | null | null | null | checker.py | yhzmiao/Data-Aquisition | 6c14e6b5684d3d91a96e4f16ed22c920d0fa1424 | [
"MIT"
] | null | null | null | checker.py | yhzmiao/Data-Aquisition | 6c14e6b5684d3d91a96e4f16ed22c920d0fa1424 | [
"MIT"
] | null | null | null | fin = open("date.txt", 'r')
dates = fin.readlines()
for i in range(0, len(dates) - 1):
print 'Now for ' + dates[i][:-1] + '!'
filename = dates[i][2:-1] + '.csv'
fp = open(filename, "r")
filetext = fp.readlines()
print filetext[len(filetext)]
| 24.8 | 39 | 0.596774 | fin = open("date.txt", 'r')
dates = fin.readlines()
for i in range(0, len(dates) - 1):
print 'Now for ' + dates[i][:-1] + '!'
filename = dates[i][2:-1] + '.csv'
fp = open(filename, "r")
filetext = fp.readlines()
print filetext[len(filetext)]
| false | true |
f7f8253f19638d2aa58e8aab67e0259916465f64 | 6,549 | py | Python | emerge/tests/metrics/test_tfidf.py | vianding/test_emerge_win | 51a72acc8e96172f8edd3fb0b664d7bbc145bff3 | [
"MIT"
] | null | null | null | emerge/tests/metrics/test_tfidf.py | vianding/test_emerge_win | 51a72acc8e96172f8edd3fb0b664d7bbc145bff3 | [
"MIT"
] | null | null | null | emerge/tests/metrics/test_tfidf.py | vianding/test_emerge_win | 51a72acc8e96172f8edd3fb0b664d7bbc145bff3 | [
"MIT"
] | null | null | null | """
All unit tests that are related to the tfidf metric.
"""
# Authors: Grzegorz Lato <grzegorz.lato@gmail.com>
# License: MIT
import unittest
from typing import Dict
import logging
import coloredlogs
from emerge.languages.cparser import CParser
from emerge.languages.cppparser import CPPParser
from emerge.languages.groovyparser import GroovyParser
from emerge.languages.javaparser import JavaParser
from emerge.languages.javascriptparser import JavaScriptParser
from emerge.languages.typescriptparser import TypeScriptParser
from emerge.languages.kotlinparser import KotlinParser
from emerge.languages.objcparser import ObjCParser
from emerge.languages.rubyparser import RubyParser
from emerge.languages.swiftparser import SwiftParser
from emerge.languages.pyparser import PythonParser
from emerge.languages.abstractparser import AbstractParser
from emerge.analysis import Analysis
from emerge.analyzer import Analyzer
from emerge.metrics.tfidf.tfidf import TFIDFMetric
from emerge.results import FileResult
from tests.testdata.c import C_TEST_FILES
from tests.testdata.cpp import CPP_TEST_FILES
from tests.testdata.groovy import GROOVY_TEST_FILES
from tests.testdata.java import JAVA_TEST_FILES
from tests.testdata.javascript import JAVASCRIPT_TEST_FILES
from tests.testdata.typescript import TYPESCRIPT_TEST_FILES
from tests.testdata.kotlin import KOTLIN_TEST_FILES
from tests.testdata.objc import OBJC_TEST_FILES
from tests.testdata.ruby import RUBY_TEST_FILES
from tests.testdata.swift import SWIFT_TEST_FILES
from tests.testdata.py import PYTHON_TEST_FILES
LOGGER = logging.getLogger('TESTS')
coloredlogs.install(level='INFO', logger=LOGGER, fmt='\n%(asctime)s %(name)s %(levelname)s %(message)s')
class TFIDFTestCase(unittest.TestCase):
def setUp(self):
self.test_data: Dict[str, Dict[str, str]] = {
CParser.parser_name(): C_TEST_FILES,
CPPParser.parser_name(): CPP_TEST_FILES,
GroovyParser.parser_name(): GROOVY_TEST_FILES,
JavaParser.parser_name(): JAVA_TEST_FILES,
JavaScriptParser.parser_name(): JAVASCRIPT_TEST_FILES,
TypeScriptParser.parser_name(): TYPESCRIPT_TEST_FILES,
KotlinParser.parser_name(): KOTLIN_TEST_FILES,
ObjCParser.parser_name(): OBJC_TEST_FILES,
RubyParser.parser_name(): RUBY_TEST_FILES,
SwiftParser.parser_name(): SWIFT_TEST_FILES,
PythonParser.parser_name(): PYTHON_TEST_FILES
}
self.parsers: Dict[str, AbstractParser] = {
CParser.parser_name(): CParser(),
CPPParser.parser_name(): CPPParser(),
GroovyParser.parser_name(): GroovyParser(),
JavaParser.parser_name(): JavaParser(),
JavaScriptParser.parser_name(): JavaScriptParser(),
TypeScriptParser.parser_name(): TypeScriptParser(),
KotlinParser.parser_name(): KotlinParser(),
ObjCParser.parser_name(): ObjCParser(),
RubyParser.parser_name(): RubyParser(),
SwiftParser.parser_name(): SwiftParser(),
PythonParser.parser_name(): PythonParser()
}
self.analysis = Analysis()
self.analyzer = Analyzer(None, self.parsers)
self.analysis.analysis_name = "test"
self.analysis.source_directory = "/source"
self.tfidf_metric = TFIDFMetric(self.analysis)
def tearDown(self):
pass
def test_tfidf_for_file_results(self):
"""Generate file results for all parsers and check if metrics could be calculated."""
results: Dict[str, FileResult] = {}
for parser_name, test_data_dict in self.test_data.items():
for file_name, file_content in test_data_dict.items():
self.parsers[parser_name].generate_file_result_from_analysis(
self.analysis, file_name=file_name, full_file_path="/source/tests/" + file_name, file_content=file_content)
self.assertTrue(bool(self.parsers[parser_name].results))
results.update(self.parsers[parser_name].results)
self.analysis.collect_results_from_parser(self.parsers[parser_name])
self.assertTrue(bool(results))
self.assertTrue(bool(self.analysis.file_results))
self.assertFalse(bool(self.analysis.entity_results))
for _, result in results.items():
self.assertFalse(bool(result.metrics))
self.assertTrue(bool(self.tfidf_metric.metric_name))
self.analysis.metrics_for_file_results.update({
self.tfidf_metric.metric_name: self.tfidf_metric
})
self.assertTrue(bool(self.analysis.contains_code_metrics))
self.assertFalse(bool(self.analysis.contains_graph_metrics))
self.assertFalse(self.analysis.local_metric_results)
self.assertFalse(self.analysis.overall_metric_results)
self.analyzer._calculate_code_metric_results(self.analysis)
self.assertTrue(self.analysis.local_metric_results)
def test_tfidf_for_entity_results(self):
"""Generate entity results for all parsers and check if metrics could be calculated."""
results: Dict[str, FileResult] = {}
for parser_name, test_data_dict in self.test_data.items():
for file_name, file_content in test_data_dict.items():
self.parsers[parser_name].generate_file_result_from_analysis(self.analysis, file_name=file_name, full_file_path="/tests/" + file_name, file_content=file_content)
self.assertTrue(bool(self.parsers[parser_name].results))
results.update(self.parsers[parser_name].results)
self.analysis.collect_results_from_parser(self.parsers[parser_name])
self.assertFalse(self.analysis.entity_results)
for _, parser in self.parsers.items():
try:
parser.generate_entity_results_from_analysis(self.analysis)
self.analysis.collect_results_from_parser(parser)
except NotImplementedError:
continue
self.assertTrue(self.analysis.entity_results)
self.assertTrue(bool(self.tfidf_metric.metric_name))
self.analysis.metrics_for_file_results.update({
self.tfidf_metric.metric_name: self.tfidf_metric
})
self.assertFalse(self.analysis.local_metric_results)
self.assertFalse(self.analysis.overall_metric_results)
self.analyzer._calculate_code_metric_results(self.analysis)
self.assertTrue(self.analysis.local_metric_results)
| 42.251613 | 177 | 0.722095 |
import unittest
from typing import Dict
import logging
import coloredlogs
from emerge.languages.cparser import CParser
from emerge.languages.cppparser import CPPParser
from emerge.languages.groovyparser import GroovyParser
from emerge.languages.javaparser import JavaParser
from emerge.languages.javascriptparser import JavaScriptParser
from emerge.languages.typescriptparser import TypeScriptParser
from emerge.languages.kotlinparser import KotlinParser
from emerge.languages.objcparser import ObjCParser
from emerge.languages.rubyparser import RubyParser
from emerge.languages.swiftparser import SwiftParser
from emerge.languages.pyparser import PythonParser
from emerge.languages.abstractparser import AbstractParser
from emerge.analysis import Analysis
from emerge.analyzer import Analyzer
from emerge.metrics.tfidf.tfidf import TFIDFMetric
from emerge.results import FileResult
from tests.testdata.c import C_TEST_FILES
from tests.testdata.cpp import CPP_TEST_FILES
from tests.testdata.groovy import GROOVY_TEST_FILES
from tests.testdata.java import JAVA_TEST_FILES
from tests.testdata.javascript import JAVASCRIPT_TEST_FILES
from tests.testdata.typescript import TYPESCRIPT_TEST_FILES
from tests.testdata.kotlin import KOTLIN_TEST_FILES
from tests.testdata.objc import OBJC_TEST_FILES
from tests.testdata.ruby import RUBY_TEST_FILES
from tests.testdata.swift import SWIFT_TEST_FILES
from tests.testdata.py import PYTHON_TEST_FILES
LOGGER = logging.getLogger('TESTS')
coloredlogs.install(level='INFO', logger=LOGGER, fmt='\n%(asctime)s %(name)s %(levelname)s %(message)s')
class TFIDFTestCase(unittest.TestCase):
def setUp(self):
self.test_data: Dict[str, Dict[str, str]] = {
CParser.parser_name(): C_TEST_FILES,
CPPParser.parser_name(): CPP_TEST_FILES,
GroovyParser.parser_name(): GROOVY_TEST_FILES,
JavaParser.parser_name(): JAVA_TEST_FILES,
JavaScriptParser.parser_name(): JAVASCRIPT_TEST_FILES,
TypeScriptParser.parser_name(): TYPESCRIPT_TEST_FILES,
KotlinParser.parser_name(): KOTLIN_TEST_FILES,
ObjCParser.parser_name(): OBJC_TEST_FILES,
RubyParser.parser_name(): RUBY_TEST_FILES,
SwiftParser.parser_name(): SWIFT_TEST_FILES,
PythonParser.parser_name(): PYTHON_TEST_FILES
}
self.parsers: Dict[str, AbstractParser] = {
CParser.parser_name(): CParser(),
CPPParser.parser_name(): CPPParser(),
GroovyParser.parser_name(): GroovyParser(),
JavaParser.parser_name(): JavaParser(),
JavaScriptParser.parser_name(): JavaScriptParser(),
TypeScriptParser.parser_name(): TypeScriptParser(),
KotlinParser.parser_name(): KotlinParser(),
ObjCParser.parser_name(): ObjCParser(),
RubyParser.parser_name(): RubyParser(),
SwiftParser.parser_name(): SwiftParser(),
PythonParser.parser_name(): PythonParser()
}
self.analysis = Analysis()
self.analyzer = Analyzer(None, self.parsers)
self.analysis.analysis_name = "test"
self.analysis.source_directory = "/source"
self.tfidf_metric = TFIDFMetric(self.analysis)
def tearDown(self):
pass
def test_tfidf_for_file_results(self):
results: Dict[str, FileResult] = {}
for parser_name, test_data_dict in self.test_data.items():
for file_name, file_content in test_data_dict.items():
self.parsers[parser_name].generate_file_result_from_analysis(
self.analysis, file_name=file_name, full_file_path="/source/tests/" + file_name, file_content=file_content)
self.assertTrue(bool(self.parsers[parser_name].results))
results.update(self.parsers[parser_name].results)
self.analysis.collect_results_from_parser(self.parsers[parser_name])
self.assertTrue(bool(results))
self.assertTrue(bool(self.analysis.file_results))
self.assertFalse(bool(self.analysis.entity_results))
for _, result in results.items():
self.assertFalse(bool(result.metrics))
self.assertTrue(bool(self.tfidf_metric.metric_name))
self.analysis.metrics_for_file_results.update({
self.tfidf_metric.metric_name: self.tfidf_metric
})
self.assertTrue(bool(self.analysis.contains_code_metrics))
self.assertFalse(bool(self.analysis.contains_graph_metrics))
self.assertFalse(self.analysis.local_metric_results)
self.assertFalse(self.analysis.overall_metric_results)
self.analyzer._calculate_code_metric_results(self.analysis)
self.assertTrue(self.analysis.local_metric_results)
def test_tfidf_for_entity_results(self):
results: Dict[str, FileResult] = {}
for parser_name, test_data_dict in self.test_data.items():
for file_name, file_content in test_data_dict.items():
self.parsers[parser_name].generate_file_result_from_analysis(self.analysis, file_name=file_name, full_file_path="/tests/" + file_name, file_content=file_content)
self.assertTrue(bool(self.parsers[parser_name].results))
results.update(self.parsers[parser_name].results)
self.analysis.collect_results_from_parser(self.parsers[parser_name])
self.assertFalse(self.analysis.entity_results)
for _, parser in self.parsers.items():
try:
parser.generate_entity_results_from_analysis(self.analysis)
self.analysis.collect_results_from_parser(parser)
except NotImplementedError:
continue
self.assertTrue(self.analysis.entity_results)
self.assertTrue(bool(self.tfidf_metric.metric_name))
self.analysis.metrics_for_file_results.update({
self.tfidf_metric.metric_name: self.tfidf_metric
})
self.assertFalse(self.analysis.local_metric_results)
self.assertFalse(self.analysis.overall_metric_results)
self.analyzer._calculate_code_metric_results(self.analysis)
self.assertTrue(self.analysis.local_metric_results)
| true | true |
f7f825ef30c12850299746baea53e43841af515f | 5,925 | py | Python | src/models/convlstm/convlstm.py | TUM-LMF/MTLCC-pytorch | 894a470be2fb4b9e2e0b9e20e8684131ffdb5577 | [
"MIT"
] | 39 | 2018-08-27T11:33:28.000Z | 2021-12-13T11:17:31.000Z | src/models/convlstm/convlstm.py | TUM-LMF/MTLCC-pytorch | 894a470be2fb4b9e2e0b9e20e8684131ffdb5577 | [
"MIT"
] | 2 | 2019-02-16T11:40:54.000Z | 2020-04-23T08:01:53.000Z | src/models/convlstm/convlstm.py | TUM-LMF/MTLCC-pytorch | 894a470be2fb4b9e2e0b9e20e8684131ffdb5577 | [
"MIT"
] | 16 | 2018-08-29T02:03:31.000Z | 2022-03-12T09:41:06.000Z | import torch.nn as nn
from torch.autograd import Variable
import torch
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
"""
Initialize ConvLSTM cell.
Parameters
----------
input_size: (int, int)
Height and width of input tensor as (height, width).
input_dim: int
Number of channels of input tensor.
hidden_dim: int
Number of channels of hidden state.
kernel_size: (int, int)
Size of the convolutional kernel.
bias: bool
Whether or not to add the bias.
"""
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1) # concatenate along channel axis
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
# Make sure that both `kernel_size` and `hidden_dim` are lists having len == num_layers
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
"""
Parameters
----------
input_tensor: todo
5-D Tensor either of shape (t, b, c, h, w) or (b, t, c, h, w)
hidden_state: todo
None. todo implement stateful
Returns
-------
last_state_list, layer_output
"""
if not self.batch_first:
# (t, b, c, h, w) -> (b, t, c, h, w)
input_tensor.permute(1, 0, 2, 3, 4)
# Implement stateful ConvLSTM
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
| 34.852941 | 112 | 0.579578 | import torch.nn as nn
from torch.autograd import Variable
import torch
class ConvLSTMCell(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, bias):
super(ConvLSTMCell, self).__init__()
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.padding = kernel_size[0] // 2, kernel_size[1] // 2
self.bias = bias
self.conv = nn.Conv2d(in_channels=self.input_dim + self.hidden_dim,
out_channels=4 * self.hidden_dim,
kernel_size=self.kernel_size,
padding=self.padding,
bias=self.bias)
def forward(self, input_tensor, cur_state):
h_cur, c_cur = cur_state
combined = torch.cat([input_tensor, h_cur], dim=1)
combined_conv = self.conv(combined)
cc_i, cc_f, cc_o, cc_g = torch.split(combined_conv, self.hidden_dim, dim=1)
i = torch.sigmoid(cc_i)
f = torch.sigmoid(cc_f)
o = torch.sigmoid(cc_o)
g = torch.tanh(cc_g)
c_next = f * c_cur + i * g
h_next = o * torch.tanh(c_next)
return h_next, c_next
def init_hidden(self, batch_size):
return (Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda(),
Variable(torch.zeros(batch_size, self.hidden_dim, self.height, self.width)).cuda())
class ConvLSTM(nn.Module):
def __init__(self, input_size, input_dim, hidden_dim, kernel_size, num_layers,
batch_first=False, bias=True, return_all_layers=False):
super(ConvLSTM, self).__init__()
self._check_kernel_size_consistency(kernel_size)
kernel_size = self._extend_for_multilayer(kernel_size, num_layers)
hidden_dim = self._extend_for_multilayer(hidden_dim, num_layers)
if not len(kernel_size) == len(hidden_dim) == num_layers:
raise ValueError('Inconsistent list length.')
self.height, self.width = input_size
self.input_dim = input_dim
self.hidden_dim = hidden_dim
self.kernel_size = kernel_size
self.num_layers = num_layers
self.batch_first = batch_first
self.bias = bias
self.return_all_layers = return_all_layers
cell_list = []
for i in range(0, self.num_layers):
cur_input_dim = self.input_dim if i == 0 else self.hidden_dim[i-1]
cell_list.append(ConvLSTMCell(input_size=(self.height, self.width),
input_dim=cur_input_dim,
hidden_dim=self.hidden_dim[i],
kernel_size=self.kernel_size[i],
bias=self.bias))
self.cell_list = nn.ModuleList(cell_list)
def forward(self, input_tensor, hidden_state=None):
if not self.batch_first:
input_tensor.permute(1, 0, 2, 3, 4)
if hidden_state is not None:
raise NotImplementedError()
else:
hidden_state = self._init_hidden(batch_size=input_tensor.size(0))
layer_output_list = []
last_state_list = []
seq_len = input_tensor.size(1)
cur_layer_input = input_tensor
for layer_idx in range(self.num_layers):
h, c = hidden_state[layer_idx]
output_inner = []
for t in range(seq_len):
h, c = self.cell_list[layer_idx](input_tensor=cur_layer_input[:, t, :, :, :],
cur_state=[h, c])
output_inner.append(h)
layer_output = torch.stack(output_inner, dim=1)
cur_layer_input = layer_output
layer_output_list.append(layer_output)
last_state_list.append([h, c])
if not self.return_all_layers:
layer_output_list = layer_output_list[-1:]
last_state_list = last_state_list[-1:]
return layer_output_list, last_state_list
def _init_hidden(self, batch_size):
init_states = []
for i in range(self.num_layers):
init_states.append(self.cell_list[i].init_hidden(batch_size))
return init_states
@staticmethod
def _check_kernel_size_consistency(kernel_size):
if not (isinstance(kernel_size, tuple) or
(isinstance(kernel_size, list) and all([isinstance(elem, tuple) for elem in kernel_size]))):
raise ValueError('`kernel_size` must be tuple or list of tuples')
@staticmethod
def _extend_for_multilayer(param, num_layers):
if not isinstance(param, list):
param = [param] * num_layers
return param
| true | true |
f7f827b8ebed2e23607097109bc6373d882a304a | 2,956 | py | Python | tests/tick_test.py | chrjxj/ibclient | c1a4c82e1f0d7f65eb649869ecd1082d50117eb7 | [
"BSD-2-Clause"
] | 1 | 2021-12-15T09:28:16.000Z | 2021-12-15T09:28:16.000Z | tests/tick_test.py | chrjxj/ibclient | c1a4c82e1f0d7f65eb649869ecd1082d50117eb7 | [
"BSD-2-Clause"
] | null | null | null | tests/tick_test.py | chrjxj/ibclient | c1a4c82e1f0d7f65eb649869ecd1082d50117eb7 | [
"BSD-2-Clause"
] | 3 | 2020-05-19T19:28:55.000Z | 2021-05-18T12:06:08.000Z | # -*- coding:utf-8 -*-
'''
Created on 11/08/2016
@author: Jin Xu
'''
import sys
from os import path
import unittest
import datetime
import time
from pprint import pprint
import random
from ibclient import (IBClient,
MarketOrder, LimitOrder,
new_stock_contract, new_futures_contract)
sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '..')))
from tests.config import load_config
class Test(unittest.TestCase):
def setUp(self):
self.config = load_config('test-acc.yaml')
pprint(self.config)
_stock_config = self.config['stock']
self.stock = new_stock_contract(_stock_config['symbol'],
exchange = _stock_config['exchange'],
currency=_stock_config['currency'])
self.future = new_futures_contract(self.config['future']['symbol'],
self.config['future']['exchange'],
False,
expiry=str(self.config['future']['expiry']),
tradingClass=self.config['future']['tradingClass'])
self.end = datetime.datetime.now().strftime('%Y%m%d %H:%M:%S')
self.con = IBClient(port=self.config['port'],
client_id=random.randint(1, 10000))
self.con.connect()
def tearDown(self):
self.con.disconnect()
def test001_get_tick_snapshot(self):
print('test_get_tick_snapshot...')
for i in range(5):
try:
tick_data = self.con.get_tick_snapshot(self.stock)
time.sleep(3)
except RuntimeError:
self.con.close()
else:
print(tick_data)
time.sleep(0.5)
def test002_get_tick(self):
print('def test_get_tick...')
try:
id2, tick_data = self.con.request_tick_data(self.stock)
except RuntimeError:
print('test_get_tick:RuntimeError')
else:
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
self.con.cancel_tick_request(id2)
self.con.close()
time.sleep(0.5)
def test003_get_market_depth(self):
print('test003_get_market_depth...')
try:
id2, _data = self.con.request_market_depth(self.stock)
except RuntimeError:
print('test_get_tick:RuntimeError')
else:
for _ in range(10):
print(_data)
time.sleep(1)
self.con.cancel_market_depth(id2)
self.con.close()
time.sleep(0.5)
if __name__ == "__main__":
unittest.main()
| 30.163265 | 94 | 0.546685 |
import sys
from os import path
import unittest
import datetime
import time
from pprint import pprint
import random
from ibclient import (IBClient,
MarketOrder, LimitOrder,
new_stock_contract, new_futures_contract)
sys.path.insert(0, path.abspath(path.join(path.dirname(__file__), '..')))
from tests.config import load_config
class Test(unittest.TestCase):
def setUp(self):
self.config = load_config('test-acc.yaml')
pprint(self.config)
_stock_config = self.config['stock']
self.stock = new_stock_contract(_stock_config['symbol'],
exchange = _stock_config['exchange'],
currency=_stock_config['currency'])
self.future = new_futures_contract(self.config['future']['symbol'],
self.config['future']['exchange'],
False,
expiry=str(self.config['future']['expiry']),
tradingClass=self.config['future']['tradingClass'])
self.end = datetime.datetime.now().strftime('%Y%m%d %H:%M:%S')
self.con = IBClient(port=self.config['port'],
client_id=random.randint(1, 10000))
self.con.connect()
def tearDown(self):
self.con.disconnect()
def test001_get_tick_snapshot(self):
print('test_get_tick_snapshot...')
for i in range(5):
try:
tick_data = self.con.get_tick_snapshot(self.stock)
time.sleep(3)
except RuntimeError:
self.con.close()
else:
print(tick_data)
time.sleep(0.5)
def test002_get_tick(self):
print('def test_get_tick...')
try:
id2, tick_data = self.con.request_tick_data(self.stock)
except RuntimeError:
print('test_get_tick:RuntimeError')
else:
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
print(self.con.ipc_msg_dict[id2][1].tick_data)
time.sleep(2)
self.con.cancel_tick_request(id2)
self.con.close()
time.sleep(0.5)
def test003_get_market_depth(self):
print('test003_get_market_depth...')
try:
id2, _data = self.con.request_market_depth(self.stock)
except RuntimeError:
print('test_get_tick:RuntimeError')
else:
for _ in range(10):
print(_data)
time.sleep(1)
self.con.cancel_market_depth(id2)
self.con.close()
time.sleep(0.5)
if __name__ == "__main__":
unittest.main()
| true | true |
f7f828489e8c96204219fb682bfbd97fe5e69a61 | 693 | py | Python | thing/migrations/0021_auto_20170824_1953.py | skyride/evething-2 | e0778a539b7f8a56667b2508293ca7e9f515283f | [
"BSD-2-Clause"
] | 21 | 2017-05-24T00:06:07.000Z | 2019-08-06T04:31:18.000Z | thing/migrations/0021_auto_20170824_1953.py | skyride/evething-2 | e0778a539b7f8a56667b2508293ca7e9f515283f | [
"BSD-2-Clause"
] | 11 | 2017-05-23T23:58:57.000Z | 2018-05-27T03:21:30.000Z | thing/migrations/0021_auto_20170824_1953.py | skyride/evething-2 | e0778a539b7f8a56667b2508293ca7e9f515283f | [
"BSD-2-Clause"
] | 10 | 2017-06-08T18:23:51.000Z | 2021-09-05T06:03:59.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thing', '0020_auto_20170707_0010'),
]
operations = [
migrations.AddField(
model_name='characterdetails',
name='jump_fatigue_expire_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='characterdetails',
name='last_jump_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
]
| 25.666667 | 64 | 0.61039 |
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('thing', '0020_auto_20170707_0010'),
]
operations = [
migrations.AddField(
model_name='characterdetails',
name='jump_fatigue_expire_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='characterdetails',
name='last_jump_date',
field=models.DateTimeField(default=None, null=True),
preserve_default=True,
),
]
| true | true |
f7f828687dcec133fb81e88915fcaee542ded6a4 | 1,028 | py | Python | project/app/urls.py | dbinetti/brookwood | e3c89e4ab16f07519258f1ced5f20e64b482e47e | [
"BSD-3-Clause"
] | null | null | null | project/app/urls.py | dbinetti/brookwood | e3c89e4ab16f07519258f1ced5f20e64b482e47e | [
"BSD-3-Clause"
] | null | null | null | project/app/urls.py | dbinetti/brookwood | e3c89e4ab16f07519258f1ced5f20e64b482e47e | [
"BSD-3-Clause"
] | null | null | null | # Django
from django.urls import path
from django.views.generic import TemplateView
# Local
from . import views
urlpatterns = [
# Root
path('', views.index, name='index',),
# Footer
path('about/', TemplateView.as_view(template_name='app/pages/about.html'), name='about',),
path('faq/', TemplateView.as_view(template_name='app/pages/faq.html'), name='faq',),
path('privacy/', TemplateView.as_view(template_name='app/pages/privacy.html'), name='privacy',),
path('terms/', TemplateView.as_view(template_name='app/pages/terms.html'), name='terms',),
path('support/', TemplateView.as_view(template_name='app/pages/support.html'), name='support',),
# Authentication
path('join', views.join, name='join'),
path('callback', views.callback, name='callback'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
# Account
path('account', views.account, name='account',),
# Delete
path('delete', views.delete, name='delete',),
]
| 32.125 | 100 | 0.669261 |
from django.urls import path
from django.views.generic import TemplateView
from . import views
urlpatterns = [
path('', views.index, name='index',),
path('about/', TemplateView.as_view(template_name='app/pages/about.html'), name='about',),
path('faq/', TemplateView.as_view(template_name='app/pages/faq.html'), name='faq',),
path('privacy/', TemplateView.as_view(template_name='app/pages/privacy.html'), name='privacy',),
path('terms/', TemplateView.as_view(template_name='app/pages/terms.html'), name='terms',),
path('support/', TemplateView.as_view(template_name='app/pages/support.html'), name='support',),
path('join', views.join, name='join'),
path('callback', views.callback, name='callback'),
path('login', views.login, name='login'),
path('logout', views.logout, name='logout'),
path('account', views.account, name='account',),
path('delete', views.delete, name='delete',),
]
| true | true |
f7f828ea2ca425ce42e041e122131fc649176981 | 2,260 | py | Python | projexbackend/consumers.py | juliolugo96/projex-api | 4bf12ce3ab228e43e7aa1b3d2d8972c1e4733a89 | [
"MIT"
] | 1 | 2019-05-31T04:40:09.000Z | 2019-05-31T04:40:09.000Z | projexbackend/consumers.py | juliolugo96/projex-api | 4bf12ce3ab228e43e7aa1b3d2d8972c1e4733a89 | [
"MIT"
] | 4 | 2020-06-05T20:41:34.000Z | 2021-09-08T00:58:10.000Z | projexbackend/consumers.py | juliolugo96/projex-api | 4bf12ce3ab228e43e7aa1b3d2d8972c1e4733a89 | [
"MIT"
] | 3 | 2019-05-31T04:40:04.000Z | 2020-02-08T21:54:23.000Z | from channels.generic.websocket import JsonWebsocketConsumer
from rest_framework import serializers
from api.serializers import *
from asgiref.sync import async_to_sync
import json
class NotificationConsumer(JsonWebsocketConsumer):
def connect(self):
# We're always going to accept the connection, though we may
# close it later based on other factors.
user = self.scope.get('user')
group_name = user.get_group_name
async_to_sync(self.channel_layer.group_add(
group_name,
self.channel_name,
))
self.accept()
def notify(self, event):
"""
This handles calls elsewhere in this codebase that look
like:
channel_layer.group_send(group_name, {
'type': 'notify', # This routes it to this handler.
'content': json_message,
})
Don't try to directly use send_json or anything; this
decoupling will help you as things grow.
"""
self.send_json(event["payload"])
def websocket_receive(self, content, **kwargs):
"""
This handles data sent over the wire from the client.
We need to validate that the received data is of the correct
form. You can do this with a simple DRF serializer.
We then need to use that validated data to confirm that the
requesting user (available in self.scope["user"] because of
the use of channels.auth.AuthMiddlewareStack in routing) is
allowed to subscribe to the requested object.
"""
#print(content)
# serializer = self.get_serializer(data=content)
# if not serializer.is_valid():
# return
# # Define this method on your serializer:
# group_name = serializer.get_group_name()
# # The AsyncJsonWebsocketConsumer parent class has a
# # self.groups list already. It uses it in cleanup.
# self.groups.append(group_name)
# # This actually subscribes the requesting socket to the
# # named group:
# await self.channel_layer.group_add(
# group_name,
# self.channel_name,
# )
def websocket_disconnect(self):
super(self)
| 33.235294 | 68 | 0.633186 | from channels.generic.websocket import JsonWebsocketConsumer
from rest_framework import serializers
from api.serializers import *
from asgiref.sync import async_to_sync
import json
class NotificationConsumer(JsonWebsocketConsumer):
def connect(self):
# close it later based on other factors.
user = self.scope.get('user')
group_name = user.get_group_name
async_to_sync(self.channel_layer.group_add(
group_name,
self.channel_name,
))
self.accept()
def notify(self, event):
self.send_json(event["payload"])
def websocket_receive(self, content, **kwargs):
#print(content)
# serializer = self.get_serializer(data=content)
# if not serializer.is_valid():
# return
# # Define this method on your serializer:
# group_name = serializer.get_group_name()
# # The AsyncJsonWebsocketConsumer parent class has a
# # self.groups list already. It uses it in cleanup.
# self.groups.append(group_name)
# # This actually subscribes the requesting socket to the
# # named group:
# await self.channel_layer.group_add(
# group_name,
# self.channel_name,
# )
def websocket_disconnect(self):
super(self)
| true | true |
f7f829fc9c7881d0f66d7797fc0c258b2afa16ef | 5,291 | py | Python | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_collisionDetection.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_collisionDetection.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | null | null | null | naoqi-sdk-2.5.5.5-linux64/doc/_downloads/almotion_collisionDetection.py | applejenny66/docker_pepper | 2469cc4db6585161a31ac44c8fcf2605d71318b1 | [
"MIT"
] | 1 | 2020-10-06T07:44:12.000Z | 2020-10-06T07:44:12.000Z | #! /usr/bin/env python
# -*- encoding: UTF-8 -*-
''' Example :Collision detection - Arm Collision Detection '''
import qi
import argparse
import sys
import almath
import time
def moveArm(motion_service, target, has_hands, chain_name):
''' Function to make NAO bump on his Torso or Head with his arm '''
# Set the fraction of max speed for the arm movement.
pMaxSpeedFraction = 0.5
# Define the final position.
if target == "Torso":
shoulderPitchAngle = 50
elif target == "Head":
shoulderPitchAngle = -50
else:
print "ERROR: target is unknown"
print "Must be Torso or Head"
print "---------------------"
exit(1)
ShoulderRollAngle = 6
ElbowYawAngle = 0
ElbowRollAngle = -150
if chain_name == "LArm":
targetAngles = [shoulderPitchAngle, +ShoulderRollAngle,
+ElbowYawAngle, +ElbowRollAngle]
elif chain_name == "RArm":
targetAngles = [shoulderPitchAngle, -ShoulderRollAngle,
-ElbowYawAngle, -ElbowRollAngle]
else:
print "ERROR: chainName is unknown"
print "Must be LArm or RArm"
print "---------------------"
exit(1)
# Set the target angles according to the robot version.
if has_hands:
targetAngles += [0.0, 0.0]
# Convert to radians.
targetAngles = [x * almath.TO_RAD for x in targetAngles]
# Move the arm to the final position.
motion_service.angleInterpolationWithSpeed(
chain_name, targetAngles, pMaxSpeedFraction)
def main(session, chain_name):
"""
Collision detection : arm collision detection
"""
# Get the services ALMotion, ALRobotModel & ALRobotPosture.
motion_service = session.service("ALMotion")
posture_service = session.service("ALRobotPosture")
model_service = session.service("ALRobotModel")
if model_service.getRobotType() != "Nao" or not model_service.hasArms():
print "This test is not available for your Robot"
print "---------------------"
exit(1)
# Wake up robot
motion_service.wakeUp()
# Send robot to Stand Init
posture_service.goToPosture("StandInit", 0.5)
has_hands = model_service.hasHands()
###############################
# Arm motion bumping on torso #
###############################
# Disable collision detection on chainName.
is_enable = False
success = motion_service.setCollisionProtectionEnabled(chain_name, is_enable)
if (not success):
print("Failed to disable collision protection")
time.sleep(1.0)
# Make NAO's arm move so that it bumps its torso.
target = "Torso"
moveArm(motion_service, target, has_hands, chain_name)
time.sleep(1.0)
# Go back to pose init.
posture_service.goToPosture("StandInit", 1.0)
# Enable collision detection on chainName.
is_enable = True
success = motion_service.setCollisionProtectionEnabled(chain_name, is_enable)
if (not success):
print("Failed to enable collision protection")
time.sleep(1.0)
# Make NAO's arm move and see that it does not bump on the torso.
target = "Torso"
moveArm(motion_service, target, has_hands, chain_name)
##############################
# Arm motion bumping on head #
##############################
time.sleep(1.0)
# Go back to pose init.
posture_service.goToPosture("StandInit", 1.0)
# Disable collision detection on chainName.
is_enable = False
success = motion_service.setCollisionProtectionEnabled(chain_name, is_enable)
if (not success):
print("Failed to disable collision protection")
time.sleep(1.0)
# Make NAO's arm move so that it bumps its head.
target = "Head"
moveArm(motion_service, target, has_hands, chain_name)
time.sleep(1.0)
# Go back to pose init.
posture_service.goToPosture("StandInit", 1.0)
# Enable collision detection on chainName.
is_enable = True
success = motion_service.setCollisionProtectionEnabled(chain_name, is_enable)
if (not success):
print("Failed to enable collision protection")
# Make NAO's arm move and see that it does not bump on the head.
target = "Head"
moveArm(motion_service, target, has_hands, chain_name)
time.sleep(1.0)
# Go back to pose init.
posture_service.goToPosture("StandInit", 1.0)
# Go to rest position
motion_service.rest()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
parser.add_argument("--chain", type=str, default="LArm",
choices=["LArm", "RArm"], help="Chain name")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session, args.chain)
| 31.873494 | 98 | 0.629371 |
''' Example :Collision detection - Arm Collision Detection '''
import qi
import argparse
import sys
import almath
import time
def moveArm(motion_service, target, has_hands, chain_name):
''' Function to make NAO bump on his Torso or Head with his arm '''
pMaxSpeedFraction = 0.5
if target == "Torso":
shoulderPitchAngle = 50
elif target == "Head":
shoulderPitchAngle = -50
else:
print "ERROR: target is unknown"
print "Must be Torso or Head"
print "---------------------"
exit(1)
ShoulderRollAngle = 6
ElbowYawAngle = 0
ElbowRollAngle = -150
if chain_name == "LArm":
targetAngles = [shoulderPitchAngle, +ShoulderRollAngle,
+ElbowYawAngle, +ElbowRollAngle]
elif chain_name == "RArm":
targetAngles = [shoulderPitchAngle, -ShoulderRollAngle,
-ElbowYawAngle, -ElbowRollAngle]
else:
print "ERROR: chainName is unknown"
print "Must be LArm or RArm"
print "---------------------"
exit(1)
if has_hands:
targetAngles += [0.0, 0.0]
targetAngles = [x * almath.TO_RAD for x in targetAngles]
motion_service.angleInterpolationWithSpeed(
chain_name, targetAngles, pMaxSpeedFraction)
def main(session, chain_name):
"""
Collision detection : arm collision detection
"""
motion_service = session.service("ALMotion")
posture_service = session.service("ALRobotPosture")
model_service = session.service("ALRobotModel")
if model_service.getRobotType() != "Nao" or not model_service.hasArms():
print "This test is not available for your Robot"
print "---------------------"
exit(1)
motion_service.wakeUp()
posture_service.goToPosture("StandInit", 0.5)
has_hands = model_service.hasHands()
arser = argparse.ArgumentParser()
parser.add_argument("--ip", type=str, default="127.0.0.1",
help="Robot IP address. On robot or Local Naoqi: use '127.0.0.1'.")
parser.add_argument("--port", type=int, default=9559,
help="Naoqi port number")
parser.add_argument("--chain", type=str, default="LArm",
choices=["LArm", "RArm"], help="Chain name")
args = parser.parse_args()
session = qi.Session()
try:
session.connect("tcp://" + args.ip + ":" + str(args.port))
except RuntimeError:
print ("Can't connect to Naoqi at ip \"" + args.ip + "\" on port " + str(args.port) +".\n"
"Please check your script arguments. Run with -h option for help.")
sys.exit(1)
main(session, args.chain)
| false | true |
f7f82a970a907273a0de9e90dd224f5e53592565 | 3,340 | py | Python | search_engine/search_engine_with_rank.py | huakeda1/Basic-algorithm-and-framework-study-for-AI | 8776dc500772a6c1f28be9c4a426ed9eca2ec775 | [
"MIT"
] | 2 | 2020-11-24T02:58:24.000Z | 2021-08-18T06:50:28.000Z | search_engine/search_engine_with_rank.py | huakeda1/Basic-algorithm-and-framework-study-for-AI | 8776dc500772a6c1f28be9c4a426ed9eca2ec775 | [
"MIT"
] | null | null | null | search_engine/search_engine_with_rank.py | huakeda1/Basic-algorithm-and-framework-study-for-AI | 8776dc500772a6c1f28be9c4a426ed9eca2ec775 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from functools import reduce
import numpy as np
import os
import re
from scipy.spatial.distance import cosine
# In[2]:
csv_file='dataset/news.csv'
if os.path.exists(csv_file):
news=pd.read_csv(csv_file,encoding='gb18030',nrows=20000)
news['content']=news['content'].fillna('')
news['cut_words']=news['content'].apply(lambda x:' '.join(list(jieba.cut(x))))
news['cut_words'].to_csv('dataset/news_content.csv')
print('news csv has been successfully processed')
# In[3]:
def reduce_and(vectors):
return reduce(lambda a,b:a&b,vectors)
# In[4]:
class RetrievalEngine:
def __init__(self,corpus):
# token_pattern is set to be r"(?u)\b\w\w+\b" by default which can only accept words longer than two.
# token_pattern is set to be r"(?u)\b\w+\b" which can accept single word or alpha.
# vocabulary can give words which will be used to build matrix
# max_df can filter words which have higher exist frequency in all docs
# tf is decided only by current doc, tf equals frequency in single doc.
# idf is decided by how many docs have this word and how many docs are given here.
# idf equals to 1+log((total_docs)/(docs_contain_thisword)) or 1+log((1+total_docs)/(1+docs_contain_thisword))
# tfidf means tf*idf.
self.vectorizer=TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",max_df=1.0,stop_words=[],vocabulary=None,use_idf=True,smooth_idf=True)
self.vectorizer.fit(corpus)
self.corpus=corpus
self.d2w=self.vectorizer.transform(corpus).toarray()
self.w2d=self.d2w.transpose()
def get_words_id(self,words):
ids=[self.vectorizer.vocabulary_[w] for w in words if w in self.vectorizer.vocabulary_]
return ids
def get_w2d_vectors(self,words):
vectors=self.w2d[self.get_words_id(words)]
return vectors
# get the idnexes of docs which have all the specific words
def get_combined_common_indices(self,words):
try:
indices=reduce_and([set(np.where(v)[0]) for v in self.get_w2d_vectors(words)])
return indices
except Exception as e:
return []
def get_sorted_indices(self,words):
indices=self.get_combined_common_indices(words)
query_vector=self.vectorizer.transform(words).toarray()[0]
sorted_indices=sorted(indices,key=lambda indice:cosine(query_vector,self.d2w[indice]),reverse=True)
return sorted_indices
def get_requested_text(self,words):
sorted_indices=self.get_sorted_indices(words)
output=[self.corpus[indice] for indice in sorted_indices]
return output
# In[5]:
corpus=[" ".join(list(jieba.cut("我爱吃香蕉")))," ".join(list(jieba.cut("你爱吃苹果")))," ".join(list(jieba.cut("苹果没有香蕉吃得好")))]
retrieval_engine=RetrievalEngine(corpus)
print(retrieval_engine.w2d)
print(retrieval_engine.vectorizer.vocabulary_)
words=list(jieba.cut("喜欢水果"))
print(retrieval_engine.get_words_id(words))
print(retrieval_engine.get_w2d_vectors(words))
print(retrieval_engine.get_combined_common_indices(words))
print(retrieval_engine.get_sorted_indices(words))
print(retrieval_engine.get_requested_text(words))
# In[ ]:
| 33.069307 | 139 | 0.706287 |
import pandas as pd
import jieba
from sklearn.feature_extraction.text import TfidfVectorizer
from functools import reduce
import numpy as np
import os
import re
from scipy.spatial.distance import cosine
csv_file='dataset/news.csv'
if os.path.exists(csv_file):
news=pd.read_csv(csv_file,encoding='gb18030',nrows=20000)
news['content']=news['content'].fillna('')
news['cut_words']=news['content'].apply(lambda x:' '.join(list(jieba.cut(x))))
news['cut_words'].to_csv('dataset/news_content.csv')
print('news csv has been successfully processed')
def reduce_and(vectors):
return reduce(lambda a,b:a&b,vectors)
class RetrievalEngine:
def __init__(self,corpus):
self.vectorizer=TfidfVectorizer(token_pattern=r"(?u)\b\w+\b",max_df=1.0,stop_words=[],vocabulary=None,use_idf=True,smooth_idf=True)
self.vectorizer.fit(corpus)
self.corpus=corpus
self.d2w=self.vectorizer.transform(corpus).toarray()
self.w2d=self.d2w.transpose()
def get_words_id(self,words):
ids=[self.vectorizer.vocabulary_[w] for w in words if w in self.vectorizer.vocabulary_]
return ids
def get_w2d_vectors(self,words):
vectors=self.w2d[self.get_words_id(words)]
return vectors
def get_combined_common_indices(self,words):
try:
indices=reduce_and([set(np.where(v)[0]) for v in self.get_w2d_vectors(words)])
return indices
except Exception as e:
return []
def get_sorted_indices(self,words):
indices=self.get_combined_common_indices(words)
query_vector=self.vectorizer.transform(words).toarray()[0]
sorted_indices=sorted(indices,key=lambda indice:cosine(query_vector,self.d2w[indice]),reverse=True)
return sorted_indices
def get_requested_text(self,words):
sorted_indices=self.get_sorted_indices(words)
output=[self.corpus[indice] for indice in sorted_indices]
return output
corpus=[" ".join(list(jieba.cut("我爱吃香蕉")))," ".join(list(jieba.cut("你爱吃苹果")))," ".join(list(jieba.cut("苹果没有香蕉吃得好")))]
retrieval_engine=RetrievalEngine(corpus)
print(retrieval_engine.w2d)
print(retrieval_engine.vectorizer.vocabulary_)
words=list(jieba.cut("喜欢水果"))
print(retrieval_engine.get_words_id(words))
print(retrieval_engine.get_w2d_vectors(words))
print(retrieval_engine.get_combined_common_indices(words))
print(retrieval_engine.get_sorted_indices(words))
print(retrieval_engine.get_requested_text(words))
| true | true |
f7f82b8d25d7a01f34414de2c1291de2b1051d39 | 1,135 | py | Python | song_dl.py | dodobardo/SongDW | 834733a131f4a7584184c96402d8fea6ac76cb81 | [
"MIT"
] | null | null | null | song_dl.py | dodobardo/SongDW | 834733a131f4a7584184c96402d8fea6ac76cb81 | [
"MIT"
] | null | null | null | song_dl.py | dodobardo/SongDW | 834733a131f4a7584184c96402d8fea6ac76cb81 | [
"MIT"
] | null | null | null | import youtube_dl
import sys
from youtubesearchpython import VideosSearch
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': '/Users/edoardo/Desktop/songDW/songs/%(title)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320',
}],
}
def search_songs(titles):
urls = []
for title in titles:
videosSearch = VideosSearch(title, limit = 1)
results = videosSearch.result()
url = results.get('result')[0].get('link')
urls.append(url)
return urls
def download_songs(urls):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(urls)
def load_songs_from_file():
songs = []
try:
f = open('./songs.txt', 'r')
while f:
song = f.readline()
song = song.rstrip()
if song == "":
break
songs.append(song)
f.close()
return songs
except Exception as e:
print(f"Error {e}")
if __name__ == "__main__":
songs = load_songs_from_file()
download_songs(search_songs(songs))
| 24.673913 | 71 | 0.580617 | import youtube_dl
import sys
from youtubesearchpython import VideosSearch
ydl_opts = {
'format': 'bestaudio/best',
'outtmpl': '/Users/edoardo/Desktop/songDW/songs/%(title)s.%(ext)s',
'postprocessors': [{
'key': 'FFmpegExtractAudio',
'preferredcodec': 'mp3',
'preferredquality': '320',
}],
}
def search_songs(titles):
urls = []
for title in titles:
videosSearch = VideosSearch(title, limit = 1)
results = videosSearch.result()
url = results.get('result')[0].get('link')
urls.append(url)
return urls
def download_songs(urls):
with youtube_dl.YoutubeDL(ydl_opts) as ydl:
ydl.download(urls)
def load_songs_from_file():
songs = []
try:
f = open('./songs.txt', 'r')
while f:
song = f.readline()
song = song.rstrip()
if song == "":
break
songs.append(song)
f.close()
return songs
except Exception as e:
print(f"Error {e}")
if __name__ == "__main__":
songs = load_songs_from_file()
download_songs(search_songs(songs))
| true | true |
f7f82bee14747cf9f27b484dadc7d6bfb56f2df1 | 6,547 | py | Python | lib/galaxy/tools/toolbox/integrated_panel.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/toolbox/integrated_panel.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | null | null | null | lib/galaxy/tools/toolbox/integrated_panel.py | uio-bmi/galaxy-graph-peak-caller | 0e0e8e9bd6d461a4e25b49cea2e6753043f747e0 | [
"CC-BY-3.0"
] | null | null | null | import os
import shutil
import string
import tempfile
import time
import traceback
from xml.sax.saxutils import escape
from .panel import (
panel_item_types,
ToolPanelElements
)
INTEGRATED_TOOL_PANEL_DESCRIPTION = """
This is Galaxy's integrated tool panel and should be modified directly only for
reordering tools inside a section. Each time Galaxy starts up, this file is
synchronized with the various tool config files: tools, sections and labels
added to one of these files, will be added also here in the appropriate place,
while elements removed from the tool config files will be correspondingly
deleted from this file.
To modify locally managed tools (e.g. from tool_conf.xml) modify that file
directly and restart Galaxy. Whenever possible Tool Shed managed tools (e.g.
from shed_tool_conf.xml) should be managed from within the Galaxy interface or
via its API - but if changes are necessary (such as to hide a tool or re-assign
its section) modify that file and restart Galaxy.
"""
class ManagesIntegratedToolPanelMixin:
def _init_integrated_tool_panel(self, config):
self.update_integrated_tool_panel = config.update_integrated_tool_panel
self._integrated_tool_panel_config = config.integrated_tool_panel_config
self._integrated_tool_panel_tracking_directory = getattr(config, "integrated_tool_panel_tracking_directory", None)
# In-memory dictionary that defines the layout of the tool_panel.xml file on disk.
self._integrated_tool_panel = ToolPanelElements()
self._integrated_tool_panel_config_has_contents = os.path.exists(self._integrated_tool_panel_config) and os.stat(self._integrated_tool_panel_config).st_size > 0
if self._integrated_tool_panel_config_has_contents:
self._load_integrated_tool_panel_keys()
def _save_integrated_tool_panel(self):
if self.update_integrated_tool_panel:
# Write the current in-memory integrated_tool_panel to the integrated_tool_panel.xml file.
# This will cover cases where the Galaxy administrator manually edited one or more of the tool panel
# config files, adding or removing locally developed tools or workflows. The value of integrated_tool_panel
# will be False when things like functional tests are the caller.
self._write_integrated_tool_panel_config_file()
def _write_integrated_tool_panel_config_file(self):
"""
Write the current in-memory version of the integrated_tool_panel.xml file to disk. Since Galaxy administrators
use this file to manage the tool panel, we'll not use xml_to_string() since it doesn't write XML quite right.
"""
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
template = string.Template("""<?xml version="1.0"?>
<toolbox>
<!--
$INTEGRATED_TOOL_PANEL_DESCRIPTION
-->
$INTEGRATED_TOOL_PANEL
</toolbox>
""")
integrated_tool_panel = []
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
integrated_tool_panel.append(' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
integrated_tool_panel.append(' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ''
label_text = item.text or ''
label_version = item.version or ''
integrated_tool_panel.append(' <label id="%s" text="%s" version="%s" />\n' % (label_id, label_text, label_version))
elif item_type == panel_item_types.SECTION:
section_id = item.id or ''
section_name = item.name or ''
section_version = item.version or ''
integrated_tool_panel.append(' <section id="%s" name="%s" version="%s">\n' % (escape(section_id), escape(section_name), section_version))
for section_key, section_item_type, section_item in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
integrated_tool_panel.append(' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
integrated_tool_panel.append(' <workflow id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ''
label_text = section_item.text or ''
label_version = section_item.version or ''
integrated_tool_panel.append(' <label id="%s" text="%s" version="%s" />\n' % (label_id, label_text, label_version))
integrated_tool_panel.append(' </section>\n')
tool_panel_description = '\n '.join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l])
tp_string = template.substitute(INTEGRATED_TOOL_PANEL_DESCRIPTION=tool_panel_description,
INTEGRATED_TOOL_PANEL='\n'.join(integrated_tool_panel))
with open(filename, "w") as integrated_tool_panel_file:
integrated_tool_panel_file.write(tp_string)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write(''.join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
try:
os.chmod(destination, 0o644)
except OSError:
# That can happen if multiple threads are simultaneously moving/chmod'ing this file
# Should be harmless, though this race condition should be avoided.
pass
| 55.483051 | 168 | 0.651749 | import os
import shutil
import string
import tempfile
import time
import traceback
from xml.sax.saxutils import escape
from .panel import (
panel_item_types,
ToolPanelElements
)
INTEGRATED_TOOL_PANEL_DESCRIPTION = """
This is Galaxy's integrated tool panel and should be modified directly only for
reordering tools inside a section. Each time Galaxy starts up, this file is
synchronized with the various tool config files: tools, sections and labels
added to one of these files, will be added also here in the appropriate place,
while elements removed from the tool config files will be correspondingly
deleted from this file.
To modify locally managed tools (e.g. from tool_conf.xml) modify that file
directly and restart Galaxy. Whenever possible Tool Shed managed tools (e.g.
from shed_tool_conf.xml) should be managed from within the Galaxy interface or
via its API - but if changes are necessary (such as to hide a tool or re-assign
its section) modify that file and restart Galaxy.
"""
class ManagesIntegratedToolPanelMixin:
def _init_integrated_tool_panel(self, config):
self.update_integrated_tool_panel = config.update_integrated_tool_panel
self._integrated_tool_panel_config = config.integrated_tool_panel_config
self._integrated_tool_panel_tracking_directory = getattr(config, "integrated_tool_panel_tracking_directory", None)
# In-memory dictionary that defines the layout of the tool_panel.xml file on disk.
self._integrated_tool_panel = ToolPanelElements()
self._integrated_tool_panel_config_has_contents = os.path.exists(self._integrated_tool_panel_config) and os.stat(self._integrated_tool_panel_config).st_size > 0
if self._integrated_tool_panel_config_has_contents:
self._load_integrated_tool_panel_keys()
def _save_integrated_tool_panel(self):
if self.update_integrated_tool_panel:
# Write the current in-memory integrated_tool_panel to the integrated_tool_panel.xml file.
# This will cover cases where the Galaxy administrator manually edited one or more of the tool panel
# config files, adding or removing locally developed tools or workflows. The value of integrated_tool_panel
# will be False when things like functional tests are the caller.
self._write_integrated_tool_panel_config_file()
def _write_integrated_tool_panel_config_file(self):
tracking_directory = self._integrated_tool_panel_tracking_directory
if not tracking_directory:
fd, filename = tempfile.mkstemp()
else:
if not os.path.exists(tracking_directory):
os.makedirs(tracking_directory)
name = "integrated_tool_panel_%.10f.xml" % time.time()
filename = os.path.join(tracking_directory, name)
template = string.Template("""<?xml version="1.0"?>
<toolbox>
<!--
$INTEGRATED_TOOL_PANEL_DESCRIPTION
-->
$INTEGRATED_TOOL_PANEL
</toolbox>
""")
integrated_tool_panel = []
for key, item_type, item in self._integrated_tool_panel.panel_items_iter():
if item:
if item_type == panel_item_types.TOOL:
integrated_tool_panel.append(' <tool id="%s" />\n' % item.id)
elif item_type == panel_item_types.WORKFLOW:
integrated_tool_panel.append(' <workflow id="%s" />\n' % item.id)
elif item_type == panel_item_types.LABEL:
label_id = item.id or ''
label_text = item.text or ''
label_version = item.version or ''
integrated_tool_panel.append(' <label id="%s" text="%s" version="%s" />\n' % (label_id, label_text, label_version))
elif item_type == panel_item_types.SECTION:
section_id = item.id or ''
section_name = item.name or ''
section_version = item.version or ''
integrated_tool_panel.append(' <section id="%s" name="%s" version="%s">\n' % (escape(section_id), escape(section_name), section_version))
for section_key, section_item_type, section_item in item.panel_items_iter():
if section_item_type == panel_item_types.TOOL:
if section_item:
integrated_tool_panel.append(' <tool id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.WORKFLOW:
if section_item:
integrated_tool_panel.append(' <workflow id="%s" />\n' % section_item.id)
elif section_item_type == panel_item_types.LABEL:
if section_item:
label_id = section_item.id or ''
label_text = section_item.text or ''
label_version = section_item.version or ''
integrated_tool_panel.append(' <label id="%s" text="%s" version="%s" />\n' % (label_id, label_text, label_version))
integrated_tool_panel.append(' </section>\n')
tool_panel_description = '\n '.join([l for l in INTEGRATED_TOOL_PANEL_DESCRIPTION.split("\n") if l])
tp_string = template.substitute(INTEGRATED_TOOL_PANEL_DESCRIPTION=tool_panel_description,
INTEGRATED_TOOL_PANEL='\n'.join(integrated_tool_panel))
with open(filename, "w") as integrated_tool_panel_file:
integrated_tool_panel_file.write(tp_string)
destination = os.path.abspath(self._integrated_tool_panel_config)
if tracking_directory:
open(filename + ".stack", "w").write(''.join(traceback.format_stack()))
shutil.copy(filename, filename + ".copy")
filename = filename + ".copy"
shutil.move(filename, destination)
try:
os.chmod(destination, 0o644)
except OSError:
# That can happen if multiple threads are simultaneously moving/chmod'ing this file
pass
| true | true |
f7f82c281a354ba5cc662d74fc1042b9031d964c | 79 | py | Python | exercises/exercise104.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | exercises/exercise104.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | exercises/exercise104.py | djangojeng-e/TIL | bdbe1dfb6ebc48b89067fddda195227cca64b8dc | [
"MIT"
] | null | null | null | li = [12, 24, 35, 24, 88, 120, 155]
li = [x for x in li if x != 24]
print(li) | 15.8 | 35 | 0.518987 | li = [12, 24, 35, 24, 88, 120, 155]
li = [x for x in li if x != 24]
print(li) | true | true |
f7f82d964b4bebf25084727d0e956e3ef67ba3bd | 4,438 | py | Python | pip/commands/search.py | slacy/pip | 3b2b2ce49e1e894ef24247a3af2ea5993ccdc085 | [
"MIT"
] | null | null | null | pip/commands/search.py | slacy/pip | 3b2b2ce49e1e894ef24247a3af2ea5993ccdc085 | [
"MIT"
] | null | null | null | pip/commands/search.py | slacy/pip | 3b2b2ce49e1e894ef24247a3af2ea5993ccdc085 | [
"MIT"
] | 1 | 2020-01-09T23:05:18.000Z | 2020-01-09T23:05:18.000Z | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from distutils.version import StrictVersion, LooseVersion
class SearchCommandError(CommandError):
pass
class SearchCommand(Command):
name = 'search'
usage = '%prog QUERY'
summary = 'Search PyPI'
def __init__(self):
super(SearchCommand, self).__init__()
self.parser.add_option(
'--index',
dest='index',
metavar='URL',
default='http://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
"""
The list from pypi is really a list of versions. We want a list of
packages with the list of versions stored inline. This converts the
list from pypi into one we can use.
"""
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
# if this is the highest version, replace summary and score
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
# each record has a unique name now, so we will convert the dict into a list sorted by score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
# wrap and indent summary to fit terminal
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
# in case of abnormal version number, fall back to LooseVersion
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
# certain LooseVersion comparions raise due to unorderable types,
# fallback to string comparison
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
SearchCommand()
| 34.671875 | 102 | 0.620099 | import sys
import textwrap
import pkg_resources
import pip.download
from pip.basecommand import Command
from pip.util import get_terminal_size
from pip.log import logger
from pip.backwardcompat import xmlrpclib, reduce, cmp
from pip.exceptions import CommandError
from distutils.version import StrictVersion, LooseVersion
class SearchCommandError(CommandError):
pass
class SearchCommand(Command):
name = 'search'
usage = '%prog QUERY'
summary = 'Search PyPI'
def __init__(self):
super(SearchCommand, self).__init__()
self.parser.add_option(
'--index',
dest='index',
metavar='URL',
default='http://pypi.python.org/pypi',
help='Base URL of Python Package Index (default %default)')
def run(self, options, args):
if not args:
raise CommandError('Missing required argument (search query).')
query = args
index_url = options.index
pypi_hits = self.search(query, index_url)
hits = transform_hits(pypi_hits)
terminal_width = None
if sys.stdout.isatty():
terminal_width = get_terminal_size()[0]
print_results(hits, terminal_width=terminal_width)
def search(self, query, index_url):
pypi = xmlrpclib.ServerProxy(index_url, pip.download.xmlrpclib_transport)
hits = pypi.search({'name': query, 'summary': query}, 'or')
return hits
def transform_hits(hits):
packages = {}
for hit in hits:
name = hit['name']
summary = hit['summary']
version = hit['version']
score = hit['_pypi_ordering']
if name not in packages.keys():
packages[name] = {'name': name, 'summary': summary, 'versions': [version], 'score': score}
else:
packages[name]['versions'].append(version)
if version == highest_version(packages[name]['versions']):
packages[name]['summary'] = summary
packages[name]['score'] = score
package_list = sorted(packages.values(), key=lambda x: x['score'], reverse=True)
return package_list
def print_results(hits, name_column_width=25, terminal_width=None):
installed_packages = [p.project_name for p in pkg_resources.working_set]
for hit in hits:
name = hit['name']
summary = hit['summary'] or ''
if terminal_width is not None:
summary = textwrap.wrap(summary, terminal_width - name_column_width - 5)
summary = ('\n' + ' ' * (name_column_width + 3)).join(summary)
line = '%s - %s' % (name.ljust(name_column_width), summary)
try:
logger.notify(line)
if name in installed_packages:
dist = pkg_resources.get_distribution(name)
logger.indent += 2
try:
latest = highest_version(hit['versions'])
if dist.version == latest:
logger.notify('INSTALLED: %s (latest)' % dist.version)
else:
logger.notify('INSTALLED: %s' % dist.version)
logger.notify('LATEST: %s' % latest)
finally:
logger.indent -= 2
except UnicodeEncodeError:
pass
def compare_versions(version1, version2):
try:
return cmp(StrictVersion(version1), StrictVersion(version2))
except ValueError:
pass
try:
return cmp(LooseVersion(version1), LooseVersion(version2))
except TypeError:
return cmp([str(v) for v in LooseVersion(version1).version],
[str(v) for v in LooseVersion(version2).version])
def highest_version(versions):
return reduce((lambda v1, v2: compare_versions(v1, v2) == 1 and v1 or v2), versions)
SearchCommand()
| true | true |
f7f82ec2ce2f14ec84d71cc500ba7080bfd780f3 | 679 | py | Python | plugins/dbnd-qubole/src/dbnd_qubole/errors.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 224 | 2020-01-02T10:46:37.000Z | 2022-03-02T13:54:08.000Z | plugins/dbnd-qubole/src/dbnd_qubole/errors.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 16 | 2020-03-11T09:37:58.000Z | 2022-01-26T10:22:08.000Z | plugins/dbnd-qubole/src/dbnd_qubole/errors.py | ipattarapong/dbnd | 7bd65621c46c73e078eb628f994127ad4c7dbd1a | [
"Apache-2.0"
] | 24 | 2020-03-24T13:53:50.000Z | 2022-03-22T11:55:18.000Z | from dbnd._core.errors import DatabandRuntimeError
def failed_to_submit_qubole_job(nested_exception):
return DatabandRuntimeError(
"Qubole submit request failed with code %s." % nested_exception.status_code,
show_exc_info=False,
nested_exceptions=nested_exception,
help_msg="Check your databricks connection id, cluster url and access token",
)
def failed_to_run_qubole_job(status_code, log_url, spark_log):
return DatabandRuntimeError(
"Qubole run failed with code %s." % status_code,
show_exc_info=False,
nested_exceptions=spark_log,
help_msg="Check spark log for more info: %s." % log_url,
)
| 33.95 | 85 | 0.723122 | from dbnd._core.errors import DatabandRuntimeError
def failed_to_submit_qubole_job(nested_exception):
return DatabandRuntimeError(
"Qubole submit request failed with code %s." % nested_exception.status_code,
show_exc_info=False,
nested_exceptions=nested_exception,
help_msg="Check your databricks connection id, cluster url and access token",
)
def failed_to_run_qubole_job(status_code, log_url, spark_log):
return DatabandRuntimeError(
"Qubole run failed with code %s." % status_code,
show_exc_info=False,
nested_exceptions=spark_log,
help_msg="Check spark log for more info: %s." % log_url,
)
| true | true |
f7f82fc995df5ffe0bb2b37c628bd8926c03879c | 54,752 | py | Python | numpy/f2py/rules.py | serge-sans-paille/numpy | 596795bf697b6be29e21c23d7680e2d476c23436 | [
"BSD-3-Clause"
] | 4 | 2020-01-28T08:48:27.000Z | 2022-02-09T18:45:34.000Z | numpy/f2py/rules.py | serge-sans-paille/numpy | 596795bf697b6be29e21c23d7680e2d476c23436 | [
"BSD-3-Clause"
] | null | null | null | numpy/f2py/rules.py | serge-sans-paille/numpy | 596795bf697b6be29e21c23d7680e2d476c23436 | [
"BSD-3-Clause"
] | 1 | 2015-10-08T10:27:03.000Z | 2015-10-08T10:27:03.000Z | #!/usr/bin/env python
"""
Rules for building C/API module with f2py2e.
Here is a skeleton of a new wrapper function (13Dec2001):
wrapper_function(args)
declarations
get_python_arguments, say, `a' and `b'
get_a_from_python
if (successful) {
get_b_from_python
if (successful) {
callfortran
if (succesful) {
put_a_to_python
if (succesful) {
put_b_to_python
if (succesful) {
buildvalue = ...
}
}
}
}
cleanup_b
}
cleanup_a
return buildvalue
Copyright 1999,2000 Pearu Peterson all rights reserved,
Pearu Peterson <pearu@ioc.ee>
Permission to use, modify, and distribute this software is given under the
terms of the NumPy License.
NO WARRANTY IS EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK.
$Date: 2005/08/30 08:58:42 $
Pearu Peterson
"""
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import copy
from .auxfuncs import *
from . import capi_maps
from .capi_maps import *
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
errmess = sys.stderr.write
outmess = sys.stdout.write
show = pprint.pprint
options={}
sepdict={}
#for k in ['need_cfuncs']: sepdict[k]=','
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr','method',
'pyobjfrom','closepyobjfrom',
'freemem',
'userincludes',
'includes0','includes','typedefs','typedefs_generated',
'cppmacros','cfuncs','callbacks',
'latexdoc',
'restdoc',
'routine_defs','externroutines',
'initf2pywraphooks',
'commonhooks','initcommonhooks',
'f90modhooks','initf90modhooks']:
sepdict[k]='\n'
#################### Rules for C/API module #################
module_rules={
'modulebody':"""\
/* File: #modulename#module.c
* This file is auto-generated with f2py (version:#f2py_version#).
* f2py is a Fortran to Python Interface Generator (FPIG), Second Edition,
* written by Pearu Peterson <pearu@cens.ioc.ee>.
* See http://cens.ioc.ee/projects/f2py2e/
* Generation date: """+time.asctime(time.localtime(time.time()))+"""
* $R"""+"""evision:$
* $D"""+"""ate:$
* Do not edit this file directly unless you know what you are doing!!!
*/
#ifdef __cplusplus
extern \"C\" {
#endif
"""+gentitle("See f2py2e/cfuncs.py: includes")+"""
#includes#
#includes0#
"""+gentitle("See f2py2e/rules.py: mod_rules['modulebody']")+"""
static PyObject *#modulename#_error;
static PyObject *#modulename#_module;
"""+gentitle("See f2py2e/cfuncs.py: typedefs")+"""
#typedefs#
"""+gentitle("See f2py2e/cfuncs.py: typedefs_generated")+"""
#typedefs_generated#
"""+gentitle("See f2py2e/cfuncs.py: cppmacros")+"""
#cppmacros#
"""+gentitle("See f2py2e/cfuncs.py: cfuncs")+"""
#cfuncs#
"""+gentitle("See f2py2e/cfuncs.py: userincludes")+"""
#userincludes#
"""+gentitle("See f2py2e/capi_rules.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{#texmodulename#}}\n',
'#modnote#\n',
'#latexdoc#'],
'restdoc':['Module #modulename#\n'+'='*80,
'\n#restdoc#']
}
defmod_rules=[
{'body':'/*eof body*/',
'method':'/*eof method*/',
'externroutines':'/*eof externroutines*/',
'routine_defs':'/*eof routine_defs*/',
'initf90modhooks':'/*eof initf90modhooks*/',
'initf2pywraphooks':'/*eof initf2pywraphooks*/',
'initcommonhooks':'/*eof initcommonhooks*/',
'latexdoc':'',
'restdoc':'',
'modnote':{hasnote:'#note#',l_not(hasnote):''},
}
]
routine_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':'#routine_def#',
'initf2pywraphooks':'#initf2pywraphook#',
'externroutines':'#declfortranroutine#',
'doc':'#docreturn##name#(#docsignature#)',
'docshort':'#docreturn##name#(#docsignatureshort#)',
'docs':'"\t#docreturn##name#(#docsignature#)\\n"\n',
'need':['arrayobject.h','CFUNCSMESS','MINMAX'],
'cppmacros':{debugcapi:'#define DEBUGCFUNCS'},
'latexdoc':['\\subsection{Wrapper function \\texttt{#texname#}}\n',
"""
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function ``#name#``\n'+'-'*80,
]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
'routdebugleave':'\n','routdebugfailure':'\n',
'setjmpbuf':' || ',
'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
'docstrcbs':'\n','docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
'docsign':'','docsignopt':'','decl':'/*decl*/',
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'\\nParameters\\n----------',
'docstropt':'\\nOther Parameters\\n----------------',
'docstrout':'\\nReturns\\n-------',
'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'args_capi':'','keys_capi':'','functype':'',
'frompyobj':'/*frompyobj*/',
'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom':'/*pyobjfrom*/',
'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
'routdebugenter':'/*routdebugenter*/',
'routdebugfailure':'/*routdebugfailure*/',
'callfortranroutine':'/*callfortranroutine*/',
'argformat':'','keyformat':'','need_cfuncs':'',
'docreturn':'','return':'','returnformat':'','rformat':'',
'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
'initf2pywraphook':'',
'routnote':{hasnote:'--- #note#',l_not(hasnote):''},
},{
'apiname':'f2py_rout_#modulename#_#name#',
'pyname':'#modulename#.#name#',
'decl':'',
'_check':l_not(ismoduleroutine)
},{
'apiname':'f2py_rout_#modulename#_#f90modulename#_#name#',
'pyname':'#modulename#.#f90modulename#.#name#',
'decl':'',
'_check':ismoduleroutine
},{ # Subroutine
'functype':'void',
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern void #fortranname#(#callprotoargument#);',
ismoduleroutine:'',
isdummyroutine:''
},
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isdummyroutine):'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'need':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'F_FUNC'},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `#fortranname#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check':l_and(issubroutine,l_not(issubroutine_wrap)),
},{ # Wrapped function
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':isfunction_wrap,
},{ # Wrapped subroutine
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern void #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':issubroutine_wrap,
},{ # Function
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi,l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction,l_not(isfunction_wrap))
},{ # Scalar function
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
},{ # String function # in use for --no-wrap
'declfortranroutine':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
# '\t{\"#name#\",-1,{{-1}},0,(char *)F_FUNC(#fortranname#,#FORTRANNAME#),(void *)#apiname#,doc_#apiname#},',
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c):
# '\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(void *)#apiname#,doc_#apiname#},'
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC','#ctype#','STRINGFREE'],
'_check':l_and(isstringfunction,l_not(isfunction_wrap)) # ???obsolete
},
{ # Debugging
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
################ Rules for arguments ##################
typedef_need_dict = {islong_long:'long_long',
islong_double:'long_double',
islong_complex:'complex_long_double',
isunsigned_char:'unsigned_char',
isunsigned_short:'unsigned_short',
isunsigned:'unsigned',
isunsigned_long_long:'unsigned_long_long',
isunsigned_chararray:'unsigned_char',
isunsigned_shortarray:'unsigned_short',
isunsigned_long_longarray:'unsigned_long_long',
issigned_long_longarray:'long_long',
}
aux_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing auxiliary variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'need':typedef_need_dict,
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'need':{hasinitvalue:'math.h'},
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'_check':l_and(isscalar,l_not(iscomplex)),
},
{
'return':',#varname#',
'docstrout':'#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{ # Common
'frompyobj':['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'_depend':'',
'need':typedef_need_dict,
},
# Doc signatures
{
'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'},
'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'depend':''
},
# Required/Optional arguments
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide,l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide,isoptional)
},
# Docstring/BuildValue
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
# Externals (call-back functions)
{ # Common
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#','setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""},{isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## {l_not(isintent_callback):"""\
## if (#varname#_capi==Py_None) {
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP','create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
# Scalars (not complex)
{ # Common
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar,l_not(iscomplex))
},{
'need':{hasinitvalue:'math.h'},
'_check':l_and(isscalar,l_not(iscomplex)),
#'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
},{
'frompyobj':[
# hasinitvalue...
# if pyobj is None:
# varname = init
# else
# from_pyobj(varname)
#
# isoptional and noinitvalue...
# if pyobj is not None:
# from_pyobj(varname)
# else:
# varname is uninitialized
#
# ...
# from_pyobj(varname)
#
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname#*/',
'need':{l_not(islogical):'#ctype#_from_pyobj'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
},{ # Hidden
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'need':typedef_need_dict,
'_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
'_depend':''
},{ # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'_check':l_and(isscalar,l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t#ctype# #varname#;',
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'return':{isintent_out:',#varname#_capi'},
'_check':iscomplex
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex,isintent_nothide)
},{
'frompyobj':[{hasinitvalue:'\tif (#varname#_capi==Py_None) {#varname#.r = #init.r#, #varname#.i = #init.i#;} else'},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)'},
# '\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\\n");'
'\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");'
'\n\tif (f2py_success) {'],
'cleanupfrompyobj':'\t} /*if (f2py_success) of #varname# frompyobj*/',
'need':['#ctype#_from_pyobj'],
'_check':l_and(iscomplex,isintent_nothide),
'_depend':''
},{ # Hidden
'decl':{isintent_out:'\tPyObject *#varname#_capi = Py_None;'},
'_check':l_and(iscomplex,isintent_hide)
},{
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':l_and(iscomplex,isintent_hide),
'_depend':''
},{ # Common
'pyobjfrom':{isintent_out:'\t#varname#_capi = pyobj_from_#ctype#1(#varname#);'},
'need':['pyobj_from_#ctype#1'],
'_check':iscomplex
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#.r,#varname#.i);'},
'_check':iscomplex,
'_depend':''
},
# String
{ # Common
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
'\tPyObject *#varname#_capi = Py_None;'],
'callfortran':'#varname#,',
'callfortranappend':'slen(#varname#),',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
# 'freemem':'\tSTRINGFREE(#varname#);',
'return':{isintent_out:',#varname#'},
'need':['len..'],#'STRINGFREE'],
'_check':isstring
},{ # Common
'frompyobj':"""\
\tslen(#varname#) = #length#;
\tf2py_success = #ctype#_from_pyobj(&#varname#,&slen(#varname#),#init#,#varname#_capi,\"#ctype#_from_pyobj failed in converting #nth# `#varname#\' of #pyname# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
'_check':isstring,
'_depend':''
},{ # Not hidden
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring,isintent_nothide)
},{ # Hidden
'_check':l_and(isstring,isintent_hide)
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
# Array
{ # Common
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
},{ # intent(overwrite) array
'decl':'\tint capi_overwrite_#varname# = 1;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_overwrite),
'_depend':'',
},
{ # intent(copy) array
'decl':'\tint capi_overwrite_#varname# = 0;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_copy),
'_depend':'',
},{
'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
},{ # Not hidden
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# 'pyobjfrom':{isintent_inout:"""\
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
# 'need':{isintent_inout:'copy_ND_array'},
'_check':l_and(isarray,isintent_nothide)
},{
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif (#varname#_capi == Py_None) {'},
{isintent_hide:'\t{'},
{iscomplexarray:'\t\t#ctype# capi_c;'},
"""\
\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_#varname#_tmp == NULL) ... else of #varname#*/',
{l_not(l_or(isintent_out,isintent_hide)):"""\
\tif((PyObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of #varname# init*/'},
],
'_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_#varname#_tmp);'},
# '_check':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
},{
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar,l_not(iscomplex)),
'_break':''
},{
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
},{
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
},{
'need':'CHECKGENERIC',
'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
}
]
########## Applying the rules. No need to modify what follows #############
#################### Build C/API module #######################
def buildmodule(m,um):
"""
Return
"""
global f2py_version,options
outmess('\tBuilding module "%s"...\n'%(m['name']))
ret = {}
mod_rules=defmod_rules[:]
vrd=modsign2map(m)
rd=dictappend({'f2py_version':f2py_version},vrd)
funcwrappers = []
funcwrappers2 = [] # F90 codes
for n in m['interfaced']:
nb=None
for bi in m['body']:
if not bi['block']=='interface':
errmess('buildmodule: Expected interface block. Skipping.\n')
continue
for b in bi['body']:
if b['name']==n: nb=b;break
if not nb:
errmess('buildmodule: Could not found the body of interfaced routine "%s". Skipping.\n'%(n))
continue
nb_list = [nb]
if 'entry' in nb:
for k,a in nb['entry'].items():
nb1 = copy.deepcopy(nb)
del nb1['entry']
nb1['name'] = k
nb1['args'] = a
nb_list.append(nb1)
for nb in nb_list:
api,wrap=buildapi(nb)
if wrap:
if ismoduleroutine(nb):
funcwrappers2.append(wrap)
else:
funcwrappers.append(wrap)
ar=applyrules(api,vrd)
rd=dictappend(rd,ar)
# Construct COMMON block support
cr,wrap = common_rules.buildhooks(m)
if wrap:
funcwrappers.append(wrap)
ar=applyrules(cr,vrd)
rd=dictappend(rd,ar)
# Construct F90 module support
mr,wrap = f90mod_rules.buildhooks(m)
if wrap:
funcwrappers2.append(wrap)
ar=applyrules(mr,vrd)
rd=dictappend(rd,ar)
for u in um:
ar=use_rules.buildusevars(u,m['use'][u['name']])
rd=dictappend(rd,ar)
needs=cfuncs.get_needs()
code={}
for n in needs.keys():
code[n]=[]
for k in needs[n]:
c=''
if k in cfuncs.includes0:
c=cfuncs.includes0[k]
elif k in cfuncs.includes:
c=cfuncs.includes[k]
elif k in cfuncs.userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r,vrd,m)
rd=dictappend(rd,ar)
ar=applyrules(module_rules,rd)
fn = os.path.join(options['buildpath'],vrd['coutput'])
ret['csrc'] = fn
f=open(fn,'w')
f.write(ar['modulebody'].replace('\t',2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
f=open(fn,'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn,'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f=open(wn,'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
################## Build C/API function #############
stnd={1:'st',2:'nd',3:'rd',4:'th',5:'th',6:'th',7:'th',8:'th',9:'th',0:'th'}
def buildapi(rout):
rout,wrap = func2subr.assubr(rout)
args,depargs=getargs2(rout)
capi_maps.depargs=depargs
var=rout['vars']
auxvars = [a for a in var.keys() if isintent_aux(var[a])]
if ismoduleroutine(rout):
outmess('\t\t\tConstructing wrapper function "%s.%s"...\n'%(rout['modulename'],rout['name']))
else:
outmess('\t\tConstructing wrapper function "%s"...\n'%(rout['name']))
# Routine
vrd=routsign2map(rout)
rd=dictappend({},vrd)
for r in rout_rules:
if ('_check' in r and r['_check'](rout)) or ('_check' not in r):
ar=applyrules(r,vrd,rout)
rd=dictappend(rd,ar)
# Args
nth,nthk=0,0
savevrd={}
for a in args:
vrd=sign2map(a,var[a])
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
# Workaround for Python 2.6, 2.6.1 bug: http://bugs.python.org/issue4720
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str),repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules,rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar,wrap
#################### EOF rules.py #######################
| 37.786059 | 204 | 0.590006 |
from __future__ import division, absolute_import, print_function
__version__ = "$Revision: 1.129 $"[10:-1]
from . import __version__
f2py_version = __version__.version
import pprint
import sys
import time
import copy
from .auxfuncs import *
from . import capi_maps
from .capi_maps import *
from . import cfuncs
from . import common_rules
from . import use_rules
from . import f90mod_rules
from . import func2subr
errmess = sys.stderr.write
outmess = sys.stdout.write
show = pprint.pprint
options={}
sepdict={}
for k in ['decl',
'frompyobj',
'cleanupfrompyobj',
'topyarr','method',
'pyobjfrom','closepyobjfrom',
'freemem',
'userincludes',
'includes0','includes','typedefs','typedefs_generated',
'cppmacros','cfuncs','callbacks',
'latexdoc',
'restdoc',
'routine_defs','externroutines',
'initf2pywraphooks',
'commonhooks','initcommonhooks',
'f90modhooks','initf90modhooks']:
sepdict[k]='\n'
es.py: usercode")+"""
#usercode#
/* See f2py2e/rules.py */
#externroutines#
"""+gentitle("See f2py2e/capi_rules.py: usercode1")+"""
#usercode1#
"""+gentitle("See f2py2e/cb_rules.py: buildcallback")+"""
#callbacks#
"""+gentitle("See f2py2e/rules.py: buildapi")+"""
#body#
"""+gentitle("See f2py2e/f90mod_rules.py: buildhooks")+"""
#f90modhooks#
"""+gentitle("See f2py2e/rules.py: module_rules['modulebody']")+"""
"""+gentitle("See f2py2e/common_rules.py: buildhooks")+"""
#commonhooks#
"""+gentitle("See f2py2e/rules.py")+"""
static FortranDataDef f2py_routine_defs[] = {
#routine_defs#
\t{NULL}
};
static PyMethodDef f2py_module_methods[] = {
#pymethoddef#
\t{NULL,NULL}
};
#if PY_VERSION_HEX >= 0x03000000
static struct PyModuleDef moduledef = {
\tPyModuleDef_HEAD_INIT,
\t"#modulename#",
\tNULL,
\t-1,
\tf2py_module_methods,
\tNULL,
\tNULL,
\tNULL,
\tNULL
};
#endif
#if PY_VERSION_HEX >= 0x03000000
#define RETVAL m
PyMODINIT_FUNC PyInit_#modulename#(void) {
#else
#define RETVAL
PyMODINIT_FUNC init#modulename#(void) {
#endif
\tint i;
\tPyObject *m,*d, *s;
#if PY_VERSION_HEX >= 0x03000000
\tm = #modulename#_module = PyModule_Create(&moduledef);
#else
\tm = #modulename#_module = Py_InitModule(\"#modulename#\", f2py_module_methods);
#endif
\tPy_TYPE(&PyFortran_Type) = &PyType_Type;
\timport_array();
\tif (PyErr_Occurred())
\t\t{PyErr_SetString(PyExc_ImportError, \"can't initialize module #modulename# (failed to import numpy)\"); return RETVAL;}
\td = PyModule_GetDict(m);
\ts = PyString_FromString(\"$R"""+"""evision: $\");
\tPyDict_SetItemString(d, \"__version__\", s);
#if PY_VERSION_HEX >= 0x03000000
\ts = PyUnicode_FromString(
#else
\ts = PyString_FromString(
#endif
\t\t\"This module '#modulename#' is auto-generated with f2py (version:#f2py_version#).\\nFunctions:\\n\"\n#docs#\".\");
\tPyDict_SetItemString(d, \"__doc__\", s);
\t#modulename#_error = PyErr_NewException (\"#modulename#.error\", NULL, NULL);
\tPy_DECREF(s);
\tfor(i=0;f2py_routine_defs[i].name!=NULL;i++)
\t\tPyDict_SetItemString(d, f2py_routine_defs[i].name,PyFortranObject_NewAsAttr(&f2py_routine_defs[i]));
#initf2pywraphooks#
#initf90modhooks#
#initcommonhooks#
#interface_usercode#
#ifdef F2PY_REPORT_ATEXIT
\tif (! PyErr_Occurred())
\t\ton_exit(f2py_report_on_exit,(void*)\"#modulename#\");
#endif
\treturn RETVAL;
}
#ifdef __cplusplus
}
#endif
""",
'separatorsfor':{'latexdoc':'\n\n',
'restdoc':'\n\n'},
'latexdoc':['\\section{Module \\texttt{ ' ' 'restdoc':['Module '\n }
defmod_rules=[
{'body':'/*eof body*/',
'method':'/*eof method*/',
'externroutines':'/*eof externroutines*/',
'routine_defs':'/*eof routine_defs*/',
'initf90modhooks':'/*eof initf90modhooks*/',
'initf2pywraphooks':'/*eof initf2pywraphooks*/',
'initcommonhooks':'/*eof initcommonhooks*/',
'latexdoc':'',
'restdoc':'',
'modnote':{hasnote:'_rules={
'separatorsfor':sepdict,
'body':"""
#begintitle#
static char doc_#apiname#[] = \"\\\n#docreturn##name#(#docsignatureshort#)\\n\\nWrapper for ``#name#``.\\\n\\n#docstrsigns#\";
/* #declfortranroutine# */
static PyObject *#apiname#(const PyObject *capi_self,
PyObject *capi_args,
PyObject *capi_keywds,
#functype# (*f2py_func)(#callprotoargument#)) {
\tPyObject * volatile capi_buildvalue = NULL;
\tvolatile int f2py_success = 1;
#decl#
\tstatic char *capi_kwlist[] = {#kwlist##kwlistopt##kwlistxa#NULL};
#usercode#
#routdebugenter#
#ifdef F2PY_REPORT_ATEXIT
f2py_start_clock();
#endif
\tif (!PyArg_ParseTupleAndKeywords(capi_args,capi_keywds,\\
\t\t\"#argformat##keyformat##xaformat#:#pyname#\",\\
\t\tcapi_kwlist#args_capi##keys_capi##keys_xa#))\n\t\treturn NULL;
#frompyobj#
/*end of frompyobj*/
#ifdef F2PY_REPORT_ATEXIT
f2py_start_call_clock();
#endif
#callfortranroutine#
if (PyErr_Occurred())
f2py_success = 0;
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_call_clock();
#endif
/*end of callfortranroutine*/
\t\tif (f2py_success) {
#pyobjfrom#
/*end of pyobjfrom*/
\t\tCFUNCSMESS(\"Building return value.\\n\");
\t\tcapi_buildvalue = Py_BuildValue(\"#returnformat#\"#return#);
/*closepyobjfrom*/
#closepyobjfrom#
\t\t} /*if (f2py_success) after callfortranroutine*/
/*cleanupfrompyobj*/
#cleanupfrompyobj#
\tif (capi_buildvalue == NULL) {
#routdebugfailure#
\t} else {
#routdebugleave#
\t}
\tCFUNCSMESS(\"Freeing memory.\\n\");
#freemem#
#ifdef F2PY_REPORT_ATEXIT
f2py_stop_clock();
#endif
\treturn capi_buildvalue;
}
#endtitle#
""",
'routine_defs':' 'initf2pywraphooks':' 'externroutines':' 'doc':'tion \\texttt{ """
\\noindent{{}\\verb@#docreturn##name#@{}}\\texttt{(#latexdocsignatureshort#)}
#routnote#
#latexdocstrsigns#
"""],
'restdoc':['Wrapped function `` ]
}
################## Rules for C/API function ##############
rout_rules=[
{ # Init
'separatorsfor': {'callfortranroutine':'\n','routdebugenter':'\n','decl':'\n',
'routdebugleave':'\n','routdebugfailure':'\n',
'setjmpbuf':' || ',
'docstrreq':'\n','docstropt':'\n','docstrout':'\n',
'docstrcbs':'\n','docstrsigns':'\\n"\n"',
'latexdocstrsigns':'\n',
'latexdocstrreq':'\n','latexdocstropt':'\n',
'latexdocstrout':'\n','latexdocstrcbs':'\n',
},
'kwlist':'','kwlistopt':'','callfortran':'','callfortranappend':'',
'docsign':'','docsignopt':'','decl':'/*decl*/',
'freemem':'/*freemem*/',
'docsignshort':'','docsignoptshort':'',
'docstrsigns':'','latexdocstrsigns':'',
'docstrreq':'\\nParameters\\n----------',
'docstropt':'\\nOther Parameters\\n----------------',
'docstrout':'\\nReturns\\n-------',
'docstrcbs':'\\nNotes\\n-----\\nCall-back functions::\\n',
'latexdocstrreq':'\\noindent Required arguments:',
'latexdocstropt':'\\noindent Optional arguments:',
'latexdocstrout':'\\noindent Return objects:',
'latexdocstrcbs':'\\noindent Call-back functions:',
'args_capi':'','keys_capi':'','functype':'',
'frompyobj':'/*frompyobj*/',
'cleanupfrompyobj':['/*end of cleanupfrompyobj*/'], #this list will be reversed
'pyobjfrom':'/*pyobjfrom*/',
'closepyobjfrom':['/*end of closepyobjfrom*/'], #this list will be reversed
'topyarr':'/*topyarr*/','routdebugleave':'/*routdebugleave*/',
'routdebugenter':'/*routdebugenter*/',
'routdebugfailure':'/*routdebugfailure*/',
'callfortranroutine':'/*callfortranroutine*/',
'argformat':'','keyformat':'','need_cfuncs':'',
'docreturn':'','return':'','returnformat':'','rformat':'',
'kwlistxa':'','keys_xa':'','xaformat':'','docsignxa':'','docsignxashort':'',
'initf2pywraphook':'',
'routnote':{hasnote:'--- ame':'f2py_rout_ '_check':l_not(ismoduleroutine)
},{
'apiname':'f2py_rout_l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern void ine)):'\t{\"#name#\",-1,{{-1}},0,(char *)tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\t\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t\t\t\t#callstatement#;
\t\t\t\t/*(*f2py_func)(#callfortran#);*/'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t\t\t\t(*f2py_func)(#callfortran#);'},
{isthreadsafe:'\t\t\tPy_END_ALLOW_THREADS'},
{hasexternals:"""\t\t}"""}
],
'_check':l_and(issubroutine,l_not(issubroutine_wrap)),
},{
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void #F_WRAPPEDFUNC#(#name_lower#,#NAME#)(#callprotoargument#);',
isdummyroutine:'',
},
'routine_def':{l_not(l_or(ismoduleroutine,isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_WRAPPEDFUNC#(#name_lower#,#NAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'initf2pywraphook':{l_not(l_or(ismoduleroutine,isdummyroutine)):'''
{
extern #ctype# #F_FUNC#(#name_lower#,#NAME#)(void);
PyObject* o = PyDict_GetItemString(d,"#name#");
PyObject_SetAttrString(o,"_cpointer", F2PyCapsule_FromVoidPtr((void*)#F_FUNC#(#name_lower#,#NAME#),NULL));
#if PY_VERSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(hascallstatement:'\t}
],
'_check':isfunction_wrap,
},{ # Wrapped subroutine
'functype':'void',
'declfortranroutine':{l_not(l_or(ismoduleroutine,isdummyroutine)):'extern void RSION_HEX >= 0x03000000
PyObject_SetAttrString(o,"__name__", PyUnicode_FromString("#name#"));
#else
PyObject_SetAttrString(o,"__name__", PyString_FromString("#name#"));
#endif
}
'''},
'need':{l_not(l_or(ismoduleroutine,isdummyroutine)):['F_WRAPPEDFUNC','F_FUNC']},
'callfortranroutine':[
{debugcapi:["""\tfprintf(stderr,\"debug-capi:Fortran subroutine `f2pywrap#name_lower#(#callfortran#)\'\\n\");"""]},
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t(*f2py_func)(#callfortran#);'},
{hascallstatement:'\t#callstatement#;\n\t/*(*f2py_func)(#callfortran#);*/'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'}
],
'_check':issubroutine_wrap,
},{
'functype':'#ctype#',
'docreturn':{l_not(isintent_hide):'#rname#,'},
'docstrout':'#pydocsignout#',
'latexdocstrout':['\\item[]{{}\\verb@#pydocsignout#@{}}',
{hasresultnote:'--- #resultnote#'}],
'callfortranroutine':[{l_and(debugcapi,isstringfunction):"""\
#ifdef USESCOMPAQFORTRAN
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callcompaqfortran#)\\n\");
#else
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
#endif
"""},
{l_and(debugcapi,l_not(isstringfunction)):"""\
\tfprintf(stderr,\"debug-capi:Fortran function #ctype# #fortranname#(#callfortran#)\\n\");
"""}
],
'_check':l_and(isfunction,l_not(isfunction_wrap))
},{
'declfortranroutine':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'extern #ctype# #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'extern #ctype# #fortranname#(#callprotoargument#);',
isdummyroutine:''
},
'routine_def':{l_and(l_not(l_or(ismoduleroutine,isintent_c)),l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c,l_not(isdummyroutine)):'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},',
isdummyroutine:'\t{\"#name#\",-1,{{-1}},0,NULL,(f2py_init_func)#apiname#,doc_#apiname#},',
},
'decl':[{iscomplexfunction_warn:'\t#ctype# #name#_return_value={0,0};',
l_not(iscomplexfunction):'\t#ctype# #name#_return_value=0;'},
{iscomplexfunction:'\tPyObject *#name#_return_value_capi = Py_None;'}
],
'callfortranroutine':[
{hasexternals:"""\
\tif (#setjmpbuf#) {
\t\tf2py_success = 0;
\t} else {"""},
{isthreadsafe:'\tPy_BEGIN_ALLOW_THREADS'},
{hascallstatement:'''\t#callstatement#;
/*\t#name#_return_value = (*f2py_func)(#callfortran#);*/
'''},
{l_not(l_or(hascallstatement,isdummyroutine)):'\t#name#_return_value = (*f2py_func)(#callfortran#);'},
{isthreadsafe:'\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t}'},
{l_and(debugcapi,iscomplexfunction):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value.r,#name#_return_value.i);'},
{l_and(debugcapi,l_not(iscomplexfunction)):'\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value);'}],
'pyobjfrom':{iscomplexfunction:'\t#name#_return_value_capi = pyobj_from_#ctype#1(#name#_return_value);'},
'need':[{l_not(isdummyroutine):'F_FUNC'},
{iscomplexfunction:'pyobj_from_#ctype#1'},
{islong_longfunction:'long_long'},
{islong_doublefunction:'long_double'}],
'returnformat':{l_not(isintent_hide):'#rformat#'},
'return':{iscomplexfunction:',#name#_return_value_capi',
l_not(l_or(iscomplexfunction,isintent_hide)):',#name#_return_value'},
'_check':l_and(isfunction,l_not(isstringfunction),l_not(isfunction_wrap))
},{ ne':'extern void #F_FUNC#(#fortranname#,#FORTRANNAME#)(#callprotoargument#);',
'routine_def':{l_not(l_or(ismoduleroutine,isintent_c)):
'\t{\"#name#\",-1,{{-1}},0,(char *)#F_FUNC#(#fortranname#,#FORTRANNAME#),(f2py_init_func)#apiname#,doc_#apiname#},',
l_and(l_not(ismoduleroutine),isintent_c):
'\t{\"#name#\",-1,{{-1}},0,(char *)#fortranname#,(f2py_init_func)#apiname#,doc_#apiname#},'
},
'decl':['\t#ctype# #name#_return_value = NULL;',
'\tint #name#_return_value_len = 0;'],
'callfortran':'#name#_return_value,#name#_return_value_len,',
'callfortranroutine':['\t#name#_return_value_len = #rlength#;',
'\tif ((#name#_return_value = (string)malloc(sizeof(char)*(#name#_return_value_len+1))) == NULL) {',
'\t\tPyErr_SetString(PyExc_MemoryError, \"out of memory\");',
'\t\tf2py_success = 0;',
'\t} else {',
"\t\t(#name#_return_value)[#name#_return_value_len] = '\\0';",
'\t}',
'\tif (f2py_success) {',
{hasexternals:"""\
\t\tif (#setjmpbuf#) {
\t\t\tf2py_success = 0;
\t\t} else {"""},
{isthreadsafe:'\t\tPy_BEGIN_ALLOW_THREADS'},
"""\
#ifdef USESCOMPAQFORTRAN
\t\t(*f2py_func)(#callcompaqfortran#);
#else
\t\t(*f2py_func)(#callfortran#);
#endif
""",
{isthreadsafe:'\t\tPy_END_ALLOW_THREADS'},
{hasexternals:'\t\t}'},
{debugcapi:'\t\tfprintf(stderr,"#routdebugshowvalue#\\n",#name#_return_value_len,#name#_return_value);'},
'\t} /* if (f2py_success) after (string)malloc */',
],
'returnformat':'#rformat#',
'return':',#name#_return_value',
'freemem':'\tSTRINGFREE(#name#_return_value);',
'need':['F_FUNC','#ctype#','STRINGFREE'],
'_check':l_and(isstringfunction,l_not(isfunction_wrap))
},
{
'routdebugenter':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#(#docsignature#)\\n");',
'routdebugleave':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: successful.\\n");',
'routdebugfailure':'\tfprintf(stderr,"debug-capi:Python C/API function #modulename#.#name#: failure.\\n");',
'_check':debugcapi
}
]
End of cleaning variable #varname# */',
'need':typedef_need_dict,
},
{
'decl':'\t#ctype# #varname# = 0;',
'need':{hasinitvalue:'math.h'},
'frompyobj':{hasinitvalue:'\t#varname# = #init#;'},
'_check':l_and(isscalar,l_not(iscomplex)),
},
{
'return':',#varname#',
'docstrout':'#pydocsignout#',
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':l_and(isscalar,l_not(iscomplex),isintent_out),
},
{
'decl':'\t#ctype# #varname#;',
'frompyobj': {hasinitvalue:'\t#varname#.r = #init.r#, #varname#.i = #init.i#;'},
'_check':iscomplex
},
{
'decl':['\t#ctype# #varname# = NULL;',
'\tint slen(#varname#);',
],
'need':['len..'],
'_check':isstring
},
{
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
],
'need':['len..',{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray
},
{
'_check':l_and(isarray,l_not(iscomplexarray))
},{
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
{'need':'#ctype#',
'_check':isint1array,
'_depend':''
},
{'need':'#ctype#',
'_check':isunsigned_chararray,
'_depend':''
},
{'need':'#ctype#',
'_check':isunsigned_shortarray,
'_depend':''
},
{'need':'#ctype#',
'_check':isunsigned_long_longarray,
'_depend':''
},
{'need':'#ctype#',
'_check':iscomplexarray,
'_depend':''
},
{
'callfortranappend':{isarrayofstrings:'flen(#varname#),'},
'need':'string',
'_check':isstringarray
}
]
arg_rules=[
{
'separatorsfor':sepdict
},
{
'frompyobj':['\t/* Processing variable #varname# */',
{debugcapi:'\tfprintf(stderr,"#vardebuginfo#\\n");'},],
'cleanupfrompyobj':'\t/* End of cleaning variable #varname# */',
'_depend':'',
'need':typedef_need_dict,
},
{
'docstropt':{l_and(isoptional,isintent_nothide):'#pydocsign#'},
'docstrreq':{l_and(isrequired,isintent_nothide):'#pydocsign#'},
'docstrout':{isintent_out:'#pydocsignout#'},
'latexdocstropt':{l_and(isoptional,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrreq':{l_and(isrequired,isintent_nothide):['\\item[]{{}\\verb@#pydocsign#@{}}',
{hasnote:'--- #note#'}]},
'latexdocstrout':{isintent_out:['\\item[]{{}\\verb@#pydocsignout#@{}}',
{l_and(hasnote,isintent_hide):'--- #note#',
l_and(hasnote,isintent_nothide):'--- See above.'}]},
'depend':''
},
{
'kwlist':'"#varname#",',
'docsign':'#varname#,',
'_check':l_and(isintent_nothide,l_not(isoptional))
},
{
'kwlistopt':'"#varname#",',
'docsignopt':'#varname#=#showinit#,',
'docsignoptshort':'#varname#,',
'_check':l_and(isintent_nothide,isoptional)
},
{
'docreturn':'#outvarname#,',
'returnformat':'#varrformat#',
'_check':isintent_out
},
{
'docsignxa':{isintent_nothide:'#varname#_extra_args=(),'},
'docsignxashort':{isintent_nothide:'#varname#_extra_args,'},
'docstropt':{isintent_nothide:'#varname#_extra_args : input tuple, optional\\n Default: ()'},
'docstrcbs':'#cbdocstr#',
'latexdocstrcbs':'\\item[] #cblatexdocstr#',
'latexdocstropt':{isintent_nothide:'\\item[]{{}\\verb@#varname#_extra_args := () input tuple@{}} --- Extra arguments for call-back function {{}\\verb@#varname#@{}}.'},
'decl':['\tPyObject *#varname#_capi = Py_None;',
'\tPyTupleObject *#varname#_xa_capi = NULL;',
'\tPyTupleObject *#varname#_args_capi = NULL;',
'\tint #varname#_nofargs_capi = 0;',
{l_not(isintent_callback):'\t#cbname#_typedef #varname#_cptr;'}
],
'kwlistxa':{isintent_nothide:'"#varname#_extra_args",'},
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'xaformat':{isintent_nothide:'O!'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'keys_xa':',&PyTuple_Type,&#varname#_xa_capi',
'setjmpbuf':'(setjmp(#cbname#_jmpbuf))',
'callfortran':{l_not(isintent_callback):'#varname#_cptr,'},
'need':['#cbname#','setjmp.h'],
'_check':isexternal
},
{
'frompyobj':[{l_not(isintent_callback):"""\
if(F2PyCapsule_Check(#varname#_capi)) {
#varname#_cptr = F2PyCapsule_AsVoidPtr(#varname#_capi);
} else {
#varname#_cptr = #cbname#;
}
"""},{isintent_callback:"""\
if (#varname#_capi==Py_None) {
#varname#_capi = PyObject_GetAttrString(#modulename#_module,\"#varname#\");
if (#varname#_capi) {
if (#varname#_xa_capi==NULL) {
if (PyObject_HasAttrString(#modulename#_module,\"#varname#_extra_args\")) {
PyObject* capi_tmp = PyObject_GetAttrString(#modulename#_module,\"#varname#_extra_args\");
if (capi_tmp)
#varname#_xa_capi = (PyTupleObject *)PySequence_Tuple(capi_tmp);
else
#varname#_xa_capi = (PyTupleObject *)Py_BuildValue(\"()\");
if (#varname#_xa_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Failed to convert #modulename#.#varname#_extra_args to tuple.\\n\");
return NULL;
}
}
}
}
if (#varname#_capi==NULL) {
PyErr_SetString(#modulename#_error,\"Callback #varname# not defined (as an argument or module #modulename# attribute).\\n\");
return NULL;
}
}
"""},
## printf(\"hoi\\n\");
## }
## """},
"""\
\t#varname#_nofargs_capi = #cbname#_nofargs;
\tif (create_cb_arglist(#varname#_capi,#varname#_xa_capi,#maxnofargs#,#nofoptargs#,&#cbname#_nofargs,&#varname#_args_capi,\"failed in processing argument list for call-back #varname#.\")) {
\t\tjmp_buf #varname#_jmpbuf;""",
{debugcapi:["""\
\t\tfprintf(stderr,\"debug-capi:Assuming %d arguments; at most #maxnofargs#(-#nofoptargs#) is expected.\\n\",#cbname#_nofargs);
\t\tCFUNCSMESSPY(\"for #varname#=\",#cbname#_capi);""",
{l_not(isintent_callback):"""\t\tfprintf(stderr,\"#vardebugshowvalue# (call-back in C).\\n\",#cbname#);"""}]},
"""\
\t\tCFUNCSMESS(\"Saving jmpbuf for `#varname#`.\\n\");
\t\tSWAP(#varname#_capi,#cbname#_capi,PyObject);
\t\tSWAP(#varname#_args_capi,#cbname#_args_capi,PyTupleObject);
\t\tmemcpy(&#varname#_jmpbuf,&#cbname#_jmpbuf,sizeof(jmp_buf));""",
],
'cleanupfrompyobj':
"""\
\t\tCFUNCSMESS(\"Restoring jmpbuf for `#varname#`.\\n\");
\t\t#cbname#_capi = #varname#_capi;
\t\tPy_DECREF(#cbname#_args_capi);
\t\t#cbname#_args_capi = #varname#_args_capi;
\t\t#cbname#_nofargs = #varname#_nofargs_capi;
\t\tmemcpy(&#cbname#_jmpbuf,&#varname#_jmpbuf,sizeof(jmp_buf));
\t}""",
'need':['SWAP','create_cb_arglist'],
'_check':isexternal,
'_depend':''
},
{
'decl':'\t#ctype# #varname# = 0;',
'pyobjfrom':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",#varname#);'},
'callfortran':{isintent_c:'#varname#,',l_not(isintent_c):'&#varname#,'},
'return':{isintent_out:',#varname#'},
'_check':l_and(isscalar,l_not(iscomplex))
},{
'need':{hasinitvalue:'math.h'},
'_check':l_and(isscalar,l_not(iscomplex)),
},{
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:"""\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isscalar,l_not(iscomplex),isintent_nothide)
},{
'frompyobj':[
{hasinitvalue:'\tif (#varname#_capi == Py_None) #varname# = #init#; else',
'_depend':''},
{l_and(isoptional,l_not(hasinitvalue)):'\tif (#varname#_capi != Py_None)',
'_depend':''},
{l_not(islogical):'''\
\t\tf2py_success = #ctype#_from_pyobj(&#varname#,#varname#_capi,"#pyname#() #nth# (#varname#) can\'t be converted to #ctype#");
\tif (f2py_success) {'''},
{islogical:'''\
\t\t#varname# = (#ctype#)PyObject_IsTrue(#varname#_capi);
\t\tf2py_success = 1;
\tif (f2py_success) {'''},
],
'cleanupfrompyobj':'\t} /*if (f2py_success) of 'need':{l_not(islogical):'_and(isscalar,l_not(iscomplex),isintent_nothide),
'_depend':''
# },{ # Hidden
# '_check':l_and(isscalar,l_not(iscomplex),isintent_hide)
},{ # Hidden
'frompyobj':{hasinitvalue:'\t,
'_check':l_and(isscalar,l_not(iscomplex),isintent_hide),
'_depend':''
},{ # Common
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",_check':l_and(isscalar,l_not(iscomplex)),
'_depend':''
},
# Complex scalars
{ # Common
'decl':'\t:'gshowvalue#\\n",
},{ # Not hidden
'decl':'\tPyObject *srequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&s_capi':{isoptional:',&d':{isintent_inout:'try_pyarr_from_ 'pyobjfrom':{isintent_inout:"""\
\t\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,&#varname#);
\t\tif (f2py_success) {"""},
'closepyobjfrom':{isintent_inout:"\t\t} /*if (f2py_success) of #varname# pyobjfrom*/"},
'_check':l_and(iscomplex,isintent_nothide)
},{
'frompyobj':[{hasinitvalue:'\tif (e# to C #ctype#\");
\tif (f2py_success) {""",
'cleanupfrompyobj':"""\
\t\tSTRINGFREE(#varname#);
\t} /*if (f2py_success) of #varname#*/""",
'need':['#ctype#_from_pyobj','len..','STRINGFREE'],
'_check':isstring,
'_depend':''
},{
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
'pyobjfrom':{isintent_inout:'''\
\tf2py_success = try_pyarr_from_#ctype#(#varname#_capi,#varname#);
\tif (f2py_success) {'''},
'closepyobjfrom':{isintent_inout:'\t} /*if (f2py_success) of #varname# pyobjfrom*/'},
'need':{isintent_inout:'try_pyarr_from_#ctype#'},
'_check':l_and(isstring,isintent_nothide)
},{
'_check':l_and(isstring,isintent_hide)
},{
'frompyobj':{debugcapi:'\tfprintf(stderr,"#vardebugshowvalue#\\n",slen(#varname#),#varname#);'},
'_check':isstring,
'_depend':''
},
{
'decl':['\t#ctype# *#varname# = NULL;',
'\tnpy_intp #varname#_Dims[#rank#] = {#rank*[-1]#};',
'\tconst int #varname#_Rank = #rank#;',
'\tPyArrayObject *capi_#varname#_tmp = NULL;',
'\tint capi_#varname#_intent = 0;',
],
'callfortran':'#varname#,',
'return':{isintent_out:',capi_#varname#_tmp'},
'need':'len..',
'_check':isarray
},{
'decl':'\tint capi_overwrite_#varname# = 1;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=1,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 1',
'_check':l_and(isarray,isintent_overwrite),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_overwrite),
'_depend':'',
},
{
'decl':'\tint capi_overwrite_#varname# = 0;',
'kwlistxa':'"overwrite_#varname#",',
'xaformat':'i',
'keys_xa':',&capi_overwrite_#varname#',
'docsignxa':'overwrite_#varname#=0,',
'docsignxashort':'overwrite_#varname#,',
'docstropt':'overwrite_#varname# : input int, optional\\n Default: 0',
'_check':l_and(isarray,isintent_copy),
},{
'frompyobj':'\tcapi_#varname#_intent |= (capi_overwrite_#varname#?0:F2PY_INTENT_COPY);',
'_check':l_and(isarray,isintent_copy),
'_depend':'',
},{
'need':[{hasinitvalue:'forcomb'},{hasinitvalue:'CFUNCSMESS'}],
'_check':isarray,
'_depend':''
},{
'decl':'\tPyObject *#varname#_capi = Py_None;',
'argformat':{isrequired:'O'},
'keyformat':{isoptional:'O'},
'args_capi':{isrequired:',&#varname#_capi'},
'keys_capi':{isoptional:',&#varname#_capi'},
# /* Partly because of the following hack, intent(inout) is depreciated,
# Use intent(in,out) instead.
# \tif ((#varname#_capi != Py_None) && PyArray_Check(#varname#_capi) \\
# \t\t&& (#varname#_capi != (PyObject *)capi_#varname#_tmp)) {
# \t\tif (((PyArrayObject *)#varname#_capi)->nd != capi_#varname#_tmp->nd) {
# \t\t\tif (#varname#_capi != capi_#varname#_tmp->base)
# \t\t\t\tcopy_ND_array((PyArrayObject *)capi_#varname#_tmp->base,(PyArrayObject *)#varname#_capi);
# \t\t} else
# \t\t\tcopy_ND_array(capi_#varname#_tmp,(PyArrayObject *)#varname#_capi);
# \t}
# */
# """},
'_check':l_and(isarray,isintent_nothide)
},{
'frompyobj':['\t#setdims#;',
'\tcapi_#varname#_intent |= #intent#;',
{isintent_hide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,Py_None);'},
{isintent_nothide:'\tcapi_#varname#_tmp = array_from_pyobj(#atype#,#varname#_Dims,#varname#_Rank,capi_#varname#_intent,#varname#_capi);'},
"""\
\tif (capi_#varname#_tmp == NULL) {
\t\tif (!PyErr_Occurred())
\t\t\tPyErr_SetString(#modulename#_error,\"failed in converting #nth# `#varname#\' of #pyname# to C/Fortran array\" );
\t} else {
\t\t#varname# = (#ctype# *)(capi_#varname#_tmp->data);
""",
{hasinitvalue:[
{isintent_nothide:'\tif ({'},
{iscomplexarray:'\t\t\tint *_i,capi_i=0;
\t\tCFUNCSMESS(\"#name#: Initializing #varname#=#init#\\n\");
\t\tif (initforcomb(capi_#varname#_tmp->dimensions,capi_#varname#_tmp->nd,1)) {
\t\t\twhile ((_i = nextforcomb()))
\t\t\t\t#varname#[capi_i++] = #init#; /* fortran way */
\t\t} else {
\t\t\tif (!PyErr_Occurred())
\t\t\t\tPyErr_SetString(#modulename#_error,\"Initialization of #nth# #varname# failed (initforcomb).\");
\t\t\tf2py_success = 0;
\t\t}
\t}
\tif (f2py_success) {"""]},
],
'cleanupfrompyobj':[ # note that this list will be reversed
'\t} /*if (capi_yObject *)capi_#varname#_tmp!=#varname#_capi) {
\t\tPy_XDECREF(capi_#varname#_tmp); }"""},
{l_and(isintent_hide,l_not(isintent_out)):"""\t\tPy_XDECREF(capi_#varname#_tmp);"""},
{hasinitvalue:'\t} /*if (f2py_success) of '_check':isarray,
'_depend':''
},
# { # Hidden
# 'freemem':{l_not(isintent_out):'\tPy_XDECREF(capi_eck':l_and(isarray,isintent_hide)
# },
# Scalararray
{ # Common
'_check':l_and(isarray,l_not(iscomplexarray))
},{ # Not hidden
'_check':l_and(isarray,l_not(iscomplexarray),isintent_nothide)
},
# Integer*1 array
{'need':' '_check':isint1array,
'_depend':''
},
# Integer*-1 array
{'need':' '_check':isunsigned_chararray,
'_depend':''
},
# Integer*-2 array
{'need':' '_check':isunsigned_shortarray,
'_depend':''
},
# Integer*-8 array
{'need':' '_check':isunsigned_long_longarray,
'_depend':''
},
# Complexarray
{'need':' '_check':iscomplexarray,
'_depend':''
},
# Stringarray
{
'callfortranappend':{isarrayofstrings:'flen('need':'string',
'_check':isstringarray
}
]
################# Rules for checking ###############
check_rules=[
{
'frompyobj':{debugcapi:'\tfprintf(stderr,\"debug-capi:Checking `#check#\'\\n\");'},
'need':'len..'
},{
'frompyobj':'\tCHECKSCALAR(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSCALAR(#check#)*/',
'need':'CHECKSCALAR',
'_check':l_and(isscalar,l_not(iscomplex)),
'_break':''
},{
'frompyobj':'\tCHECKSTRING(#check#,\"#check#\",\"#nth# #varname#\",\"#varshowvalue#\",#varname#) {',
'cleanupfrompyobj':'\t} /*CHECKSTRING(#check#)*/',
'need':'CHECKSTRING',
'_check':isstring,
'_break':''
},{
'need':'CHECKARRAY',
'frompyobj':'\tCHECKARRAY(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKARRAY(#check#)*/',
'_check':isarray,
'_break':''
},{
'need':'CHECKGENERIC',
'frompyobj':'\tCHECKGENERIC(#check#,\"#check#\",\"#nth# #varname#\") {',
'cleanupfrompyobj':'\t} /*CHECKGENERIC(#check#)*/',
}
]
userincludes:
c=cfuncs.userincludes[k]
elif k in cfuncs.typedefs:
c=cfuncs.typedefs[k]
elif k in cfuncs.typedefs_generated:
c=cfuncs.typedefs_generated[k]
elif k in cfuncs.cppmacros:
c=cfuncs.cppmacros[k]
elif k in cfuncs.cfuncs:
c=cfuncs.cfuncs[k]
elif k in cfuncs.callbacks:
c=cfuncs.callbacks[k]
elif k in cfuncs.f90modhooks:
c=cfuncs.f90modhooks[k]
elif k in cfuncs.commonhooks:
c=cfuncs.commonhooks[k]
else:
errmess('buildmodule: unknown need %s.\n'%(repr(k)));continue
code[n].append(c)
mod_rules.append(code)
for r in mod_rules:
if ('_check' in r and r['_check'](m)) or ('_check' not in r):
ar=applyrules(r,vrd,m)
rd=dictappend(rd,ar)
ar=applyrules(module_rules,rd)
fn = os.path.join(options['buildpath'],vrd['coutput'])
ret['csrc'] = fn
f=open(fn,'w')
f.write(ar['modulebody'].replace('\t',2*' '))
f.close()
outmess('\tWrote C/API module "%s" to file "%s"\n'%(m['name'],fn))
if options['dorestdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.rest')
f=open(fn,'w')
f.write('.. -*- rest -*-\n')
f.write('\n'.join(ar['restdoc']))
f.close()
outmess('\tReST Documentation is saved to file "%s/%smodule.rest"\n'%(options['buildpath'],vrd['modulename']))
if options['dolatexdoc']:
fn = os.path.join(options['buildpath'],vrd['modulename']+'module.tex')
ret['ltx'] = fn
f=open(fn,'w')
f.write('%% This file is auto-generated with f2py (version:%s)\n'%(f2py_version))
if 'shortlatex' not in options:
f.write('\\documentclass{article}\n\\usepackage{a4wide}\n\\begin{document}\n\\tableofcontents\n\n')
f.write('\n'.join(ar['latexdoc']))
if 'shortlatex' not in options:
f.write('\\end{document}')
f.close()
outmess('\tDocumentation is saved to file "%s/%smodule.tex"\n'%(options['buildpath'],vrd['modulename']))
if funcwrappers:
wn = os.path.join(options['buildpath'],vrd['f2py_wrapper_output'])
ret['fsrc'] = wn
f=open(wn,'w')
f.write('C -*- fortran -*-\n')
f.write('C This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('C It contains Fortran 77 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers)+'\n').split('\n'):
if l and l[0]==' ':
while len(l)>=66:
lines.append(l[:66]+'\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 77 wrappers are saved to "%s"\n'%(wn))
if funcwrappers2:
wn = os.path.join(options['buildpath'],'%s-f2pywrappers2.f90'%(vrd['modulename']))
ret['fsrc'] = wn
f=open(wn,'w')
f.write('! -*- f90 -*-\n')
f.write('! This file is autogenerated with f2py (version:%s)\n'%(f2py_version))
f.write('! It contains Fortran 90 wrappers to fortran functions.\n')
lines = []
for l in ('\n\n'.join(funcwrappers2)+'\n').split('\n'):
if len(l)>72 and l[0]==' ':
lines.append(l[:72]+'&\n &')
l = l[72:]
while len(l)>66:
lines.append(l[:66]+'&\n &')
l = l[66:]
lines.append(l+'\n')
else: lines.append(l+'\n')
lines = ''.join(lines).replace('\n &\n','\n')
f.write(lines)
f.close()
outmess('\tFortran 90 wrappers are saved to "%s"\n'%(wn))
return ret
_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
if not isintent_hide(var[a]):
if not isoptional(var[a]):
nth=nth+1
vrd['nth']=repr(nth)+stnd[nth%10]+' argument'
else:
nthk=nthk+1
vrd['nth']=repr(nthk)+stnd[nthk%10]+' keyword'
else: vrd['nth']='hidden'
savevrd[a]=vrd
for r in _rules:
if '_depend' in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
for a in depargs:
if isintent_aux(var[a]):
_rules = aux_rules
else:
_rules = arg_rules
vrd=savevrd[a]
for r in _rules:
if '_depend' not in r:
continue
if ('_check' in r and r['_check'](var[a])) or ('_check' not in r):
ar=applyrules(r,vrd,var[a])
rd=dictappend(rd,ar)
if '_break' in r:
break
if 'check' in var[a]:
for c in var[a]['check']:
vrd['check']=c
ar=applyrules(check_rules,vrd,var[a])
rd=dictappend(rd,ar)
if isinstance(rd['cleanupfrompyobj'], list):
rd['cleanupfrompyobj'].reverse()
if isinstance(rd['closepyobjfrom'], list):
rd['closepyobjfrom'].reverse()
rd['docsignature']=stripcomma(replace('#docsign##docsignopt##docsignxa#',
{'docsign':rd['docsign'],
'docsignopt':rd['docsignopt'],
'docsignxa':rd['docsignxa']}))
optargs=stripcomma(replace('#docsignopt##docsignxa#',
{'docsignxa':rd['docsignxashort'],
'docsignopt':rd['docsignoptshort']}
))
if optargs=='':
rd['docsignatureshort']=stripcomma(replace('#docsign#',{'docsign':rd['docsign']}))
else:
rd['docsignatureshort']=replace('#docsign#[#docsignopt#]',
{'docsign':rd['docsign'],
'docsignopt':optargs,
})
rd['latexdocsignatureshort']=rd['docsignatureshort'].replace('_','\\_')
rd['latexdocsignatureshort']=rd['latexdocsignatureshort'].replace(',',', ')
cfs=stripcomma(replace('#callfortran##callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
if len(rd['callfortranappend'])>1:
rd['callcompaqfortran']=stripcomma(replace('#callfortran# 0,#callfortranappend#',{'callfortran':rd['callfortran'],'callfortranappend':rd['callfortranappend']}))
else:
rd['callcompaqfortran']=cfs
rd['callfortran']=cfs
if isinstance(rd['docreturn'], list):
rd['docreturn']=stripcomma(replace('#docreturn#',{'docreturn':rd['docreturn']}))+' = '
rd['docstrsigns']=[]
rd['latexdocstrsigns']=[]
for k in ['docstrreq','docstropt','docstrout','docstrcbs']:
if k in rd and isinstance(rd[k], list):
rd['docstrsigns']=rd['docstrsigns']+rd[k]
k='latex'+k
if k in rd and isinstance(rd[k], list):
rd['latexdocstrsigns']=rd['latexdocstrsigns']+rd[k][0:1]+\
['\\begin{description}']+rd[k][1:]+\
['\\end{description}']
if rd['keyformat'] or rd['xaformat']:
argformat = rd['argformat']
if isinstance(argformat, list):
argformat.append('|')
else:
assert isinstance(argformat, str),repr((argformat, type(argformat)))
rd['argformat'] += '|'
ar=applyrules(routine_rules,rd)
if ismoduleroutine(rout):
outmess('\t\t\t %s\n'%(ar['docshort']))
else:
outmess('\t\t %s\n'%(ar['docshort']))
return ar,wrap
| true | true |
f7f830813343306606469414ca4c304bea049852 | 584 | py | Python | redly/redweb/migrations/0003_chat.py | redlytcc/redlysite | ae1f2476bf58e7bbf3b91baaf7bf1d16777ca939 | [
"Apache-2.0"
] | null | null | null | redly/redweb/migrations/0003_chat.py | redlytcc/redlysite | ae1f2476bf58e7bbf3b91baaf7bf1d16777ca939 | [
"Apache-2.0"
] | 6 | 2020-02-11T23:30:28.000Z | 2022-03-11T23:34:45.000Z | redly/redweb/migrations/0003_chat.py | redlytcc/redlysite | ae1f2476bf58e7bbf3b91baaf7bf1d16777ca939 | [
"Apache-2.0"
] | null | null | null | # Generated by Django 2.1.1 on 2018-11-03 13:12
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('redweb', '0002_delete_chat'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
('text', models.TextField(max_length=6000)),
],
),
]
| 24.333333 | 114 | 0.566781 |
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
('redweb', '0002_delete_chat'),
]
operations = [
migrations.CreateModel(
name='Chat',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nome', models.CharField(max_length=40)),
('text', models.TextField(max_length=6000)),
],
),
]
| true | true |
f7f830f97cb51afdd87d98bd361c6bea5f467b17 | 4,275 | py | Python | csrank/dataset_reader/objectranking/util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | csrank/dataset_reader/objectranking/util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | null | null | null | csrank/dataset_reader/objectranking/util.py | hytsang/cs-ranking | 241626a6a100a27b96990b4f199087a6dc50dcc0 | [
"Apache-2.0"
] | 1 | 2018-10-30T08:57:14.000Z | 2018-10-30T08:57:14.000Z | import logging
from itertools import combinations
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from csrank.util import ranking_ordering_conversion
__all__ = ['generate_complete_pairwise_dataset', 'complete_linear_regression_dataset',
'complete_linear_regression_dataset', "weighted_cosine_similarity", "get_key_for_indices"]
def generate_pairwise_instances(features):
pairs = np.array(list(combinations(features, 2)))
n_pairs = len(pairs)
neg_indices = np.arange(0, n_pairs, 2)
a, b = np.copy(pairs[neg_indices, 0]), np.copy(pairs[neg_indices, 1])
pairs[neg_indices, 1] = a
pairs[neg_indices, 0] = b
X1 = pairs[:, 0]
X2 = pairs[:, 1]
Y_double = np.ones([n_pairs, 1]) * np.array([1, 0])
Y_single = np.repeat(1, n_pairs)
Y_double[neg_indices] = [0, 1]
Y_single[neg_indices] = 0
return X1, X2, Y_double, Y_single
def generate_complete_pairwise_dataset(X, rankings):
try:
n_instances, n_objects, n_features = X.shape
rankings = rankings.astype(int)
rankings -= np.min(rankings)
orderings = ranking_ordering_conversion(rankings)
X_sorted = [X[i, orderings[i], :] for i in range(n_instances)]
except ValueError:
# TODO Add the code to change the rankings to orderings and sort X according to that
logger = logging.getLogger("generate_complete_pairwise_dataset")
logger.error("Value Error: {}, {} ".format(X[0], rankings[0]))
X_sorted = X
Y_double = []
X1 = []
X2 = []
Y_single = []
for features in X_sorted:
x1, x2, y1, y2 = generate_pairwise_instances(features)
X1.extend(x1)
X2.extend(x2)
Y_double.extend(y1)
Y_single.extend(y2)
X1 = np.array(X1)
X2 = np.array(X2)
Y_double = np.array(Y_double)
Y_single = np.array(Y_single)
X_train = X1 - X2
return X_train, X1, X2, Y_double, Y_single
def complete_linear_regression_dataset(X, rankings):
X1 = []
Y_single = []
for features, rank in zip(X, rankings):
X1.extend(features)
norm_ranks = rank / np.max(rank, axis=0)
Y_single.extend(norm_ranks)
X1 = np.array(X1)
Y_single = np.array(Y_single)
return X1, Y_single
def get_key_for_indices(idx1, idx2):
return str(tuple(sorted([idx1, idx2])))
def weighted_cosine_similarity(weights, x, y):
denominator = np.sqrt(np.sum(weights * x * x)) * np.sqrt(
np.sum(weights * y * y))
sim = np.sum(weights * x * y) / denominator
return sim
def similarity_function_for_multilabel_instances(X_labels, Y_labels, X, Y):
similarity = f1_score(X_labels, Y_labels, average='macro')
similarity = np.dot(X, Y) / (np.linalg.norm(X) * np.linalg.norm(Y)) + similarity
return similarity
def initialize_similarity_matrix(mypath):
dataFrame = pd.read_csv(mypath)
similarity_dictionary = dataFrame.set_index('col_major_index')['similarity'].to_dict()
return similarity_dictionary
def sub_sampling(name, Xt, Yt, n_objects=5):
logger = logging.getLogger(name=name)
bucket_size = int(Xt.shape[1] / n_objects)
# logger.info("#########################################################################")
# logger.info("X instances {} objects {} bucket_size {}".format(Xt.shape[0], Xt.shape[1], bucket_size))
X_train = []
Y_train = []
for i in range(bucket_size):
X = np.copy(Xt)
Y = np.copy(Yt)
rs = np.random.RandomState(42 + i)
idx = rs.randint(bucket_size, size=(len(X), n_objects))
# TODO: subsampling multiple rankings
idx += np.arange(start=0, stop=X.shape[1], step=bucket_size)[:n_objects]
X = X[np.arange(X.shape[0])[:, None], idx]
Y = Y[np.arange(X.shape[0])[:, None], idx]
tmp_sort = Y.argsort(axis=-1)
Y = np.empty_like(Y)
Y[np.arange(len(X))[:, None], tmp_sort] = np.arange(n_objects)
if len(X_train) == 0:
X_train = X
Y_train = Y
else:
Y_train = np.concatenate([Y_train, Y], axis=0)
X_train = np.concatenate([X_train, X], axis=0)
logger.info("Sampled instances {} objects {}".format(X_train.shape[0], X_train.shape[1]))
return X_train, Y_train
| 33.928571 | 107 | 0.634152 | import logging
from itertools import combinations
import numpy as np
import pandas as pd
from sklearn.metrics import f1_score
from csrank.util import ranking_ordering_conversion
__all__ = ['generate_complete_pairwise_dataset', 'complete_linear_regression_dataset',
'complete_linear_regression_dataset', "weighted_cosine_similarity", "get_key_for_indices"]
def generate_pairwise_instances(features):
pairs = np.array(list(combinations(features, 2)))
n_pairs = len(pairs)
neg_indices = np.arange(0, n_pairs, 2)
a, b = np.copy(pairs[neg_indices, 0]), np.copy(pairs[neg_indices, 1])
pairs[neg_indices, 1] = a
pairs[neg_indices, 0] = b
X1 = pairs[:, 0]
X2 = pairs[:, 1]
Y_double = np.ones([n_pairs, 1]) * np.array([1, 0])
Y_single = np.repeat(1, n_pairs)
Y_double[neg_indices] = [0, 1]
Y_single[neg_indices] = 0
return X1, X2, Y_double, Y_single
def generate_complete_pairwise_dataset(X, rankings):
try:
n_instances, n_objects, n_features = X.shape
rankings = rankings.astype(int)
rankings -= np.min(rankings)
orderings = ranking_ordering_conversion(rankings)
X_sorted = [X[i, orderings[i], :] for i in range(n_instances)]
except ValueError:
logger = logging.getLogger("generate_complete_pairwise_dataset")
logger.error("Value Error: {}, {} ".format(X[0], rankings[0]))
X_sorted = X
Y_double = []
X1 = []
X2 = []
Y_single = []
for features in X_sorted:
x1, x2, y1, y2 = generate_pairwise_instances(features)
X1.extend(x1)
X2.extend(x2)
Y_double.extend(y1)
Y_single.extend(y2)
X1 = np.array(X1)
X2 = np.array(X2)
Y_double = np.array(Y_double)
Y_single = np.array(Y_single)
X_train = X1 - X2
return X_train, X1, X2, Y_double, Y_single
def complete_linear_regression_dataset(X, rankings):
X1 = []
Y_single = []
for features, rank in zip(X, rankings):
X1.extend(features)
norm_ranks = rank / np.max(rank, axis=0)
Y_single.extend(norm_ranks)
X1 = np.array(X1)
Y_single = np.array(Y_single)
return X1, Y_single
def get_key_for_indices(idx1, idx2):
return str(tuple(sorted([idx1, idx2])))
def weighted_cosine_similarity(weights, x, y):
denominator = np.sqrt(np.sum(weights * x * x)) * np.sqrt(
np.sum(weights * y * y))
sim = np.sum(weights * x * y) / denominator
return sim
def similarity_function_for_multilabel_instances(X_labels, Y_labels, X, Y):
similarity = f1_score(X_labels, Y_labels, average='macro')
similarity = np.dot(X, Y) / (np.linalg.norm(X) * np.linalg.norm(Y)) + similarity
return similarity
def initialize_similarity_matrix(mypath):
dataFrame = pd.read_csv(mypath)
similarity_dictionary = dataFrame.set_index('col_major_index')['similarity'].to_dict()
return similarity_dictionary
def sub_sampling(name, Xt, Yt, n_objects=5):
logger = logging.getLogger(name=name)
bucket_size = int(Xt.shape[1] / n_objects)
X_train = []
Y_train = []
for i in range(bucket_size):
X = np.copy(Xt)
Y = np.copy(Yt)
rs = np.random.RandomState(42 + i)
idx = rs.randint(bucket_size, size=(len(X), n_objects))
idx += np.arange(start=0, stop=X.shape[1], step=bucket_size)[:n_objects]
X = X[np.arange(X.shape[0])[:, None], idx]
Y = Y[np.arange(X.shape[0])[:, None], idx]
tmp_sort = Y.argsort(axis=-1)
Y = np.empty_like(Y)
Y[np.arange(len(X))[:, None], tmp_sort] = np.arange(n_objects)
if len(X_train) == 0:
X_train = X
Y_train = Y
else:
Y_train = np.concatenate([Y_train, Y], axis=0)
X_train = np.concatenate([X_train, X], axis=0)
logger.info("Sampled instances {} objects {}".format(X_train.shape[0], X_train.shape[1]))
return X_train, Y_train
| true | true |
f7f8315b6d1dad24c372b0500d589b4ba6d0ecf7 | 336 | py | Python | src/routing/setup.py | rubengomex/python-rest-api | 4143b5c86658409fd8dc6eb9f2bc71f8de18815f | [
"MIT"
] | null | null | null | src/routing/setup.py | rubengomex/python-rest-api | 4143b5c86658409fd8dc6eb9f2bc71f8de18815f | [
"MIT"
] | null | null | null | src/routing/setup.py | rubengomex/python-rest-api | 4143b5c86658409fd8dc6eb9f2bc71f8de18815f | [
"MIT"
] | null | null | null | from flask_restful import Resource, Api, request
from ..api.users.controller import User, UserList
from ..auth.setup import configure_auth
def init_routes(app):
api = Api(app)
# auth routes /auth
configure_auth(app)
# users routes /users
api.add_resource(UserList, '/users/')
api.add_resource(User, '/users/<string:id>/')
| 25.846154 | 49 | 0.735119 | from flask_restful import Resource, Api, request
from ..api.users.controller import User, UserList
from ..auth.setup import configure_auth
def init_routes(app):
api = Api(app)
configure_auth(app)
api.add_resource(UserList, '/users/')
api.add_resource(User, '/users/<string:id>/')
| true | true |
f7f8337eb29ffe91086c2bd6edbeb60c8e16ba10 | 7,729 | py | Python | spectrum/django/spectrum.py | SpectrumApp/spectrum-python | 773516af1f89547b4a7afe555615247d7e44a393 | [
"BSD-3-Clause"
] | 6 | 2015-11-06T14:46:12.000Z | 2020-01-25T10:28:42.000Z | spectrum/django/spectrum.py | SpectrumApp/spectrum-python | 773516af1f89547b4a7afe555615247d7e44a393 | [
"BSD-3-Clause"
] | 7 | 2015-11-04T23:40:56.000Z | 2018-12-07T11:31:43.000Z | spectrum/django/spectrum.py | SpectrumApp/spectrum-python | 773516af1f89547b4a7afe555615247d7e44a393 | [
"BSD-3-Clause"
] | 2 | 2015-11-04T16:56:38.000Z | 2016-04-17T23:10:26.000Z | FIRE_HOSE = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': '',
'filters': ['request_id']
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django',
'filters': ['request_id']
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django.request',
'filters': ['request_id']
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'celery',
'filters': ['request_id']
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django.db.backends',
},
},
}
FIRE_HOSE_UDP = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': '',
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django',
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django.request',
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'celery',
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django.db.backends',
},
},
}
FIRE_HOSE_WS = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': '',
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django',
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django.request',
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'celery',
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django.db.backends',
},
},
}
def fire_hose(base_config=None, log_db=True, levels=None, handler_kwargs=None):
"""
A convenience method to get and modify predefined logging configurations.
Arguments
~~~~~~~~~
* ``base_config``: Defaults to `FIRE_HOSE`, which uses the REST HTTP stream on ``http://127.0.0.1:9000/``
* ``log_db``: shortcut for toggling the level of ``django.db.backends`` logging. Defaults to ``True``
* ``levels``: if provided, a 2-tuples iterable of logger names and their level.
* ``handler_kwargs``: if provided, kwargs to pass to the handles. Use this to override default settings such as ip / port Spectrum is running on.
Examples
~~~~~~~~
::
from spectrum.django import fire_hose, FIRE_HOSE_UDP
LOGGING = fire_hose()
LOGGING = fire_hose(log_db=False)
LOGGING = fire_hose(levels=(
('my.overly.verbose.module', 'WARNING'),
('some.other.module', 'CRITICAL'),
)
LOGGING = fire_hose(FIRE_HOSE_UDP, handler_kwargs={'url': '127.0.0.1:12345'})
"""
if base_config is None:
base_config = FIRE_HOSE
if levels is None:
levels = tuple()
if handler_kwargs is None:
handler_kwargs = {}
if log_db is False:
base_config['loggers']['django.db.backends']['level'] = 'WARNING'
for silenced, level in levels:
if silenced not in base_config['loggers']:
base_config['loggers'][silenced] = {}
base_config['loggers'][silenced]['level'] = level
for handler, handler_config in base_config['handlers'].items():
handler_config.update(handler_kwargs)
return base_config
| 27.310954 | 149 | 0.464743 | FIRE_HOSE = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': '',
'filters': ['request_id']
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django',
'filters': ['request_id']
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django.request',
'filters': ['request_id']
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'celery',
'filters': ['request_id']
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.RestSpectrum',
'sublevel': 'django.db.backends',
},
},
}
FIRE_HOSE_UDP = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': '',
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django',
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django.request',
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'celery',
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.UDPSpectrum',
'sublevel': 'django.db.backends',
},
},
}
FIRE_HOSE_WS = {
'version': 1,
'disable_existing_loggers': False,
'root': {
'level': 'DEBUG',
'handlers': ['console', 'root']
},
'filters': {
'request_id': {
'()': 'spectrum.filters.RequestIdFilter'
}
},
'formatters': {
'verbose': {
'format': '[%(name)s][%(levelname)s] %(message)s'
}
},
'loggers': {
'django': {
'handlers': ['django'],
'level': 'DEBUG',
'propagate': False,
},
'django.request': {
'handlers': ['django.request'],
'level': 'DEBUG',
'propagate': False,
},
'django.db.backends': {
'handlers': ['django.db.backends'],
'level': 'DEBUG',
'propagate': False,
},
'celery': {
'handlers': ['celery'],
'level': 'DEBUG',
'propagate': False,
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
'filters': ['request_id']
},
'root': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': '',
},
'django': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django',
},
'django.request': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django.request',
},
'celery': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'celery',
},
'django.db.backends': {
'level': 'DEBUG',
'class': 'spectrum.handlers.WebsocketSpectrum',
'sublevel': 'django.db.backends',
},
},
}
def fire_hose(base_config=None, log_db=True, levels=None, handler_kwargs=None):
if base_config is None:
base_config = FIRE_HOSE
if levels is None:
levels = tuple()
if handler_kwargs is None:
handler_kwargs = {}
if log_db is False:
base_config['loggers']['django.db.backends']['level'] = 'WARNING'
for silenced, level in levels:
if silenced not in base_config['loggers']:
base_config['loggers'][silenced] = {}
base_config['loggers'][silenced]['level'] = level
for handler, handler_config in base_config['handlers'].items():
handler_config.update(handler_kwargs)
return base_config
| true | true |
f7f833bb823dfdbe69dd4f6aca9ad56df615e799 | 946 | py | Python | test/test_label_install.py | moengage/neomodel | 8d039f47c9578402dd974a216117d5c54377b78f | [
"MIT"
] | null | null | null | test/test_label_install.py | moengage/neomodel | 8d039f47c9578402dd974a216117d5c54377b78f | [
"MIT"
] | null | null | null | test/test_label_install.py | moengage/neomodel | 8d039f47c9578402dd974a216117d5c54377b78f | [
"MIT"
] | null | null | null | from neomodel import config, StructuredNode, StringProperty, install_all_labels, install_labels
from neomodel.core import get_database_from_cls
db = get_database_from_cls(None)
config.AUTO_INSTALL_LABELS = False
class NoConstraintsSetup(StructuredNode):
name = StringProperty(unique_index=True)
class TestAbstractNode(StructuredNode):
__abstract_node__ = True
name = StringProperty(unique_index=True)
config.AUTO_INSTALL_LABELS = True
def test_labels_were_not_installed():
bob = NoConstraintsSetup(name='bob').save()
bob2 = NoConstraintsSetup(name='bob').save()
assert bob.id != bob2.id
for n in NoConstraintsSetup.nodes.all():
n.delete()
def test_install_all():
install_labels(TestAbstractNode)
# run install all labels
install_all_labels()
assert True
# remove constraint for above test
db.cypher_query("DROP CONSTRAINT on (n:NoConstraintsSetup) ASSERT n.name IS UNIQUE")
| 26.277778 | 95 | 0.761099 | from neomodel import config, StructuredNode, StringProperty, install_all_labels, install_labels
from neomodel.core import get_database_from_cls
db = get_database_from_cls(None)
config.AUTO_INSTALL_LABELS = False
class NoConstraintsSetup(StructuredNode):
name = StringProperty(unique_index=True)
class TestAbstractNode(StructuredNode):
__abstract_node__ = True
name = StringProperty(unique_index=True)
config.AUTO_INSTALL_LABELS = True
def test_labels_were_not_installed():
bob = NoConstraintsSetup(name='bob').save()
bob2 = NoConstraintsSetup(name='bob').save()
assert bob.id != bob2.id
for n in NoConstraintsSetup.nodes.all():
n.delete()
def test_install_all():
install_labels(TestAbstractNode)
install_all_labels()
assert True
db.cypher_query("DROP CONSTRAINT on (n:NoConstraintsSetup) ASSERT n.name IS UNIQUE")
| true | true |
f7f8343284b90779924b86ee9400c35d89381292 | 104,329 | py | Python | contrib/.venv/lib/python3.8/site-packages/bitcoinlib/transactions.py | jtatman/katersltc-client | 7ea88bfa32e9bdce9bf8d434a8909a07b65412f7 | [
"MIT"
] | null | null | null | contrib/.venv/lib/python3.8/site-packages/bitcoinlib/transactions.py | jtatman/katersltc-client | 7ea88bfa32e9bdce9bf8d434a8909a07b65412f7 | [
"MIT"
] | null | null | null | contrib/.venv/lib/python3.8/site-packages/bitcoinlib/transactions.py | jtatman/katersltc-client | 7ea88bfa32e9bdce9bf8d434a8909a07b65412f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# BitcoinLib - Python Cryptocurrency Library
# TRANSACTION class to create, verify and sign Transactions
# © 2017 - 2021 March - 1200 Web Development <http://1200wd.com/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from datetime import datetime
import json
import pickle
import random
from bitcoinlib.encoding import *
from bitcoinlib.config.opcodes import *
from bitcoinlib.keys import HDKey, Key, deserialize_address, Address, sign, verify, Signature
from bitcoinlib.networks import Network
from bitcoinlib.values import Value, value_to_satoshi
_logger = logging.getLogger(__name__)
class TransactionError(Exception):
"""
Handle Transaction class Exceptions
"""
def __init__(self, msg=''):
self.msg = msg
_logger.error(msg)
def __str__(self):
return self.msg
def transaction_deserialize(rawtx, network=DEFAULT_NETWORK, check_size=True):
"""
Deserialize a raw transaction
Returns a dictionary with list of input and output objects, locktime and version.
Will raise an error if wrong number of inputs are found or if there are no output found.
:param rawtx: Raw transaction as hexadecimal string or bytes
:type rawtx: str, bytes
:param network: Network code, i.e. 'bitcoin', 'testnet', 'litecoin', etc. Leave emtpy for default network
:type network: str, Network
:param check_size: Check if not bytes are left when parsing is finished. Disable when parsing list of transactions, such as the transactions in a raw block. Default is True
:type check_size: bool
:return Transaction:
"""
rawtx = to_bytes(rawtx)
version = rawtx[0:4][::-1]
coinbase = False
flag = None
witness_type = 'legacy'
cursor = 4
if rawtx[4:5] == b'\0':
flag = rawtx[5:6]
if flag == b'\1':
witness_type = 'segwit'
cursor += 2
n_inputs, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
inputs = []
if not isinstance(network, Network):
network = Network(network)
for n in range(0, n_inputs):
inp_hash = rawtx[cursor:cursor + 32][::-1]
if not len(inp_hash):
raise TransactionError("Input transaction hash not found. Probably malformed raw transaction")
if inp_hash == 32 * b'\0':
coinbase = True
output_n = rawtx[cursor + 32:cursor + 36][::-1]
cursor += 36
unlocking_script_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
unlocking_script = rawtx[cursor:cursor + unlocking_script_size]
inp_type = 'legacy'
if witness_type == 'segwit' and not unlocking_script_size:
inp_type = 'segwit'
cursor += unlocking_script_size
sequence_number = rawtx[cursor:cursor + 4]
cursor += 4
inputs.append(Input(prev_txid=inp_hash, output_n=output_n, unlocking_script=unlocking_script,
witness_type=inp_type, sequence=sequence_number, index_n=n, network=network))
if len(inputs) != n_inputs:
raise TransactionError("Error parsing inputs. Number of tx specified %d but %d found" % (n_inputs, len(inputs)))
outputs = []
n_outputs, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
output_total = 0
for n in range(0, n_outputs):
value = int.from_bytes(rawtx[cursor:cursor + 8][::-1], 'big')
cursor += 8
lock_script_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
lock_script = rawtx[cursor:cursor + lock_script_size]
cursor += lock_script_size
outputs.append(Output(value=value, lock_script=lock_script, network=network, output_n=n))
output_total += value
if not outputs:
raise TransactionError("Error no outputs found in this transaction")
if witness_type == 'segwit':
for n in range(0, len(inputs)):
n_items, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
witnesses = []
for m in range(0, n_items):
witness = b'\0'
item_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
if item_size:
witness = rawtx[cursor + size:cursor + item_size + size]
cursor += item_size + size
witnesses.append(witness)
if witnesses and not coinbase:
script_type = inputs[n].script_type
witness_script_type = 'sig_pubkey'
signatures = []
keys = []
sigs_required = 1
public_hash = b''
for witness in witnesses:
if witness == b'\0':
continue
if 70 <= len(witness) <= 74 and witness[0:1] == b'\x30': # witness is DER encoded signature
signatures.append(witness)
elif len(witness) == 33 and len(signatures) == 1: # key from sig_pk
keys.append(witness)
else:
rsds = script_deserialize(witness, script_types=['multisig'])
if not rsds['script_type'] == 'multisig':
# FIXME: Parse unknown scripts
_logger.warning("Could not parse witnesses in transaction. Multisig redeemscript expected")
witness_script_type = 'unknown'
script_type = 'unknown'
else:
# FIXME: Do not mixup naming signatures and keys
keys = rsds['signatures']
sigs_required = rsds['number_of_sigs_m']
witness_script_type = 'p2sh'
script_type = 'p2sh_multisig'
inp_witness_type = inputs[n].witness_type
usd = script_deserialize(inputs[n].unlocking_script, locking_script=True)
if usd['script_type'] == "p2wpkh" and witness_script_type == 'sig_pubkey':
inp_witness_type = 'p2sh-segwit'
script_type = 'p2sh_p2wpkh'
elif usd['script_type'] == "p2wsh" and witness_script_type == 'p2sh':
inp_witness_type = 'p2sh-segwit'
script_type = 'p2sh_p2wsh'
inputs[n] = Input(prev_txid=inputs[n].prev_txid, output_n=inputs[n].output_n, keys=keys,
unlocking_script_unsigned=inputs[n].unlocking_script_unsigned,
unlocking_script=inputs[n].unlocking_script, sigs_required=sigs_required,
signatures=signatures, witness_type=inp_witness_type, script_type=script_type,
sequence=inputs[n].sequence, index_n=inputs[n].index_n, public_hash=public_hash,
network=inputs[n].network, witnesses=witnesses)
if len(rawtx[cursor:]) != 4 and check_size:
raise TransactionError("Error when deserializing raw transaction, bytes left for locktime must be 4 not %d" %
len(rawtx[cursor:]))
locktime = int.from_bytes(rawtx[cursor:cursor + 4][::-1], 'big')
return Transaction(inputs, outputs, locktime, version, network, size=cursor + 4, output_total=output_total,
coinbase=coinbase, flag=flag, witness_type=witness_type, rawtx=rawtx)
def script_deserialize(script, script_types=None, locking_script=None, size_bytes_check=True):
"""
Deserialize a script: determine type, number of signatures and script data.
:param script: Raw script
:type script: str, bytes
:param script_types: Limit script type determination to this list. Leave to default None to search in all script types.
:type script_types: list
:param locking_script: Only deserialize locking scripts. Specify False to only deserialize for unlocking scripts. Default is None for both
:type locking_script: bool
:param size_bytes_check: Check if script or signature starts with size bytes and remove size bytes before parsing. Default is True
:type size_bytes_check: bool
:return list: With this items: [script_type, data, number_of_sigs_n, number_of_sigs_m]
"""
def _parse_data(scr, max_items=None, redeemscript_expected=False, item_length=0):
# scr = to_bytes(scr)
items = []
total_length = 0
if 70 <= len(scr) <= 74 and scr[:1] == b'\x30':
return [scr], len(scr)
while len(scr) and (max_items is None or max_items > len(items)):
itemlen, size = varbyteint_to_int(scr[0:9])
if item_length and itemlen != item_length:
break
# TODO: Rethink and rewrite this:
if not item_length and itemlen not in [20, 33, 65, 70, 71, 72, 73]:
break
if redeemscript_expected and len(scr[itemlen + 1:]) < 20:
break
items.append(scr[1:itemlen + 1])
total_length += itemlen + size
scr = scr[itemlen + 1:]
return items, total_length
def _get_empty_data():
return {'script_type': '', 'keys': [], 'signatures': [], 'hashes': [], 'redeemscript': b'',
'number_of_sigs_n': 1, 'number_of_sigs_m': 1, 'locktime_cltv': None, 'locktime_csv': None, 'result': ''}
def _parse_script(script):
found = False
cur = 0
data = _get_empty_data()
for script_type in script_types:
cur = 0
try:
ost = SCRIPT_TYPES_UNLOCKING[script_type]
except KeyError:
ost = SCRIPT_TYPES_LOCKING[script_type]
data = _get_empty_data()
data['script_type'] = script_type
found = True
for ch in ost:
if cur >= len(script):
found = False
break
cur_char = script[cur]
if ch[:4] == 'hash':
hash_length = 0
if len(ch) > 5:
hash_length = int(ch.split("-")[1])
s, total_length = _parse_data(script[cur:], 1, item_length=hash_length)
if not s:
found = False
break
data['hashes'] += s
cur += total_length
elif ch == 'signature':
signature_length = 0
s, total_length = _parse_data(script[cur:], 1, item_length=signature_length)
if not s:
found = False
break
data['signatures'] += s
cur += total_length
elif ch == 'public_key':
pk_size, size = varbyteint_to_int(script[cur:cur + 9])
key = script[cur + size:cur + size + pk_size]
if not key:
found = False
break
data['keys'].append(key)
cur += size + pk_size
elif ch == 'OP_RETURN':
if cur_char == opcodes['OP_RETURN'] and cur == 0:
data.update({'op_return': script[cur + 1:]})
cur = len(script)
found = True
break
else:
found = False
break
elif ch == 'multisig': # one or more signatures
redeemscript_expected = False
if 'redeemscript' in ost:
redeemscript_expected = True
s, total_length = _parse_data(script[cur:], redeemscript_expected=redeemscript_expected)
if not s:
found = False
break
data['signatures'] += s
cur += total_length
elif ch == 'redeemscript':
size_byte = 0
if script[cur:cur + 1] == b'\x4c':
size_byte = 1
elif script[cur:cur + 1] == b'\x4d':
size_byte = 2
elif script[cur:cur + 1] == b'\x4e':
size_byte = 3
data['redeemscript'] = script[cur + 1 + size_byte:]
data2 = script_deserialize(data['redeemscript'], locking_script=True)
if 'signatures' not in data2 or not data2['signatures']:
found = False
break
data['keys'] = data2['signatures']
data['number_of_sigs_m'] = data2['number_of_sigs_m']
data['number_of_sigs_n'] = data2['number_of_sigs_n']
cur = len(script)
elif ch == 'push_size':
push_size, size = varbyteint_to_int(script[cur:cur + 9])
found = bool(len(script[cur:]) - size == push_size)
if not found:
break
elif ch == 'op_m':
if cur_char in OP_N_CODES:
data['number_of_sigs_m'] = cur_char - opcodes['OP_1'] + 1
else:
found = False
break
cur += 1
elif ch == 'op_n':
if cur_char in OP_N_CODES:
data['number_of_sigs_n'] = cur_char - opcodes['OP_1'] + 1
else:
found = False
break
if data['number_of_sigs_m'] > data['number_of_sigs_n']:
raise TransactionError("Number of signatures to sign (%s) is higher then actual "
"amount of signatures (%s)" %
(data['number_of_sigs_m'], data['number_of_sigs_n']))
if len(data['signatures']) > int(data['number_of_sigs_n']):
raise TransactionError("%d signatures found, but %s sigs expected" %
(len(data['signatures']), data['number_of_sigs_n']))
cur += 1
elif ch == 'SIGHASH_ALL':
pass
# TODO: Fix signature parsing: SIGHASHALL not part of signature...
# if cur_char != SIGHASH_ALL:
# found = False
# break
elif ch == 'locktime_cltv':
if len(script) < 4:
found = False
break
data['locktime_cltv'] = int.from_bytes(script[cur:cur + 4], 'little')
cur += 4
elif ch == 'locktime_csv':
if len(script) < 4:
found = False
break
data['locktime_csv'] = int.from_bytes(script[cur:cur + 4], 'little')
cur += 4
else:
try:
if cur_char == opcodes[ch]:
cur += 1
else:
found = False
data = _get_empty_data()
break
except IndexError:
raise TransactionError("Opcode %s not found [type %s]" % (ch, script_type))
if found and not len(script[cur:]): # Found is True and no remaining script to parse
break
if found and not len(script[cur:]):
return data, script[cur:]
data = _get_empty_data()
data['result'] = 'Script not recognised'
return data, ''
data = _get_empty_data()
script = to_bytes(script)
if not script:
data.update({'result': 'Empty script'})
return data
# Check if script starts with size byte
if size_bytes_check:
script_size, size = varbyteint_to_int(script[0:9])
if len(script[1:]) == script_size:
data = script_deserialize(script[1:], script_types, locking_script, size_bytes_check=False)
if 'result' in data and data['result'][:22] not in \
['Script not recognised', 'Empty script', 'Could not parse script']:
return data
if script_types is None:
if locking_script is None:
script_types = dict(SCRIPT_TYPES_UNLOCKING, **SCRIPT_TYPES_LOCKING)
elif locking_script:
script_types = SCRIPT_TYPES_LOCKING
else:
script_types = SCRIPT_TYPES_UNLOCKING
elif not isinstance(script_types, list):
script_types = [script_types]
locktime_cltv = 0
locktime_csv = 0
while len(script):
begin_script = script
data, script = _parse_script(script)
if begin_script == script:
break
if script and data['script_type'] == 'locktime_cltv':
locktime_cltv = data['locktime_cltv']
if script and data['script_type'] == 'locktime_csv':
locktime_csv = data['locktime_csv']
if data and data['result'] != 'Script not recognised':
data['locktime_cltv'] = locktime_cltv
data['locktime_csv'] = locktime_csv
return data
wrn_msg = "Could not parse script, unrecognized script"
# _logger.debug(wrn_msg)
data = _get_empty_data()
data['result'] = wrn_msg
return data
def script_to_string(script, name_data=False):
"""
Convert script to human readable string format with OP-codes, signatures, keys, etc
>>> script = '76a914c7402ab295a0eb8897ff5b8fbd5276c2d9d2340b88ac'
>>> script_to_string(script)
'OP_DUP OP_HASH160 hash-20 OP_EQUALVERIFY OP_CHECKSIG'
:param script: A locking or unlocking script
:type script: bytes, str
:param name_data: Replace signatures and keys strings with name
:type name_data: bool
:return str:
"""
# script = to_bytes(script)
data = script_deserialize(script)
if not data or data['script_type'] == 'empty':
return ""
if name_data:
name = 'signature'
if data['signatures'] and len(data['signatures'][0]) in [33, 65]:
name = 'key'
sigs = ' '.join(['%s-%d' % (name, i) for i in range(1, len(data['signatures']) + 1)])
else:
sigs = ' '.join([i.hex() for i in data['signatures']])
try:
scriptstr = SCRIPT_TYPES_LOCKING[data['script_type']]
except KeyError:
scriptstr = SCRIPT_TYPES_UNLOCKING[data['script_type']]
scriptstr = [sigs if x in ['signature', 'multisig', 'return_data'] else x for x in scriptstr]
if 'redeemscript' in data and data['redeemscript']:
redeemscript_str = script_to_string(data['redeemscript'], name_data=name_data)
scriptstr = [redeemscript_str if x == 'redeemscript' else x for x in scriptstr]
scriptstr = [opcodenames[80 + int(data['number_of_sigs_m'])] if x == 'op_m' else x for x in scriptstr]
scriptstr = [opcodenames[80 + int(data['number_of_sigs_n'])] if x == 'op_n' else x for x in scriptstr]
return ' '.join(scriptstr)
def _serialize_multisig_redeemscript(public_key_list, n_required=None):
# Serialize m-to-n multisig script. Needs a list of public keys
for key in public_key_list:
if not isinstance(key, (str, bytes)):
raise TransactionError("Item %s in public_key_list is not of type string or bytes")
if n_required is None:
n_required = len(public_key_list)
script = int_to_varbyteint(opcodes['OP_1'] + n_required - 1)
for key in public_key_list:
script += varstr(key)
script += int_to_varbyteint(opcodes['OP_1'] + len(public_key_list) - 1)
script += b'\xae' # 'OP_CHECKMULTISIG'
return script
def serialize_multisig_redeemscript(key_list, n_required=None, compressed=True):
"""
Create a multisig redeemscript used in a p2sh.
Contains the number of signatures, followed by the list of public keys and the OP-code for the number of signatures required.
:param key_list: List of public keys
:type key_list: Key, list
:param n_required: Number of required signatures
:type n_required: int
:param compressed: Use compressed public keys?
:type compressed: bool
:return bytes: A multisig redeemscript
"""
if not key_list:
return b''
if not isinstance(key_list, list):
raise TransactionError("Argument public_key_list must be of type list")
if len(key_list) > 15:
raise TransactionError("Redeemscripts with more then 15 keys are non-standard and could result in "
"locked up funds")
public_key_list = []
for k in key_list:
if isinstance(k, Key):
if compressed:
public_key_list.append(k.public_byte)
else:
public_key_list.append(k.public_uncompressed_byte)
elif len(k) == 65 and k[0:1] == b'\x04' or len(k) == 33 and k[0:1] in [b'\x02', b'\x03']:
public_key_list.append(k)
elif len(k) == 132 and k[0:2] == '04' or len(k) == 66 and k[0:2] in ['02', '03']:
public_key_list.append(bytes.fromhex(k))
else:
kobj = Key(k)
if compressed:
public_key_list.append(kobj.public_byte)
else:
public_key_list.append(kobj.public_uncompressed_byte)
return _serialize_multisig_redeemscript(public_key_list, n_required)
def _p2sh_multisig_unlocking_script(sigs, redeemscript, hash_type=None, as_list=False):
usu = b'\x00'
if as_list:
usu = [usu]
if not isinstance(sigs, list):
sigs = [sigs]
for sig in sigs:
s = sig
if hash_type:
s += hash_type.to_bytes(1, 'big')
if as_list:
usu.append(s)
else:
usu += varstr(s)
rs_size = b''
size_byte = b''
if not as_list:
rs_size = int_to_varbyteint(len(redeemscript))
if len(rs_size) > 1:
rs_size = rs_size[1:]
if len(redeemscript) >= 76:
if len(rs_size) == 1:
size_byte = b'\x4c'
elif len(rs_size) == 2:
size_byte = b'\x4d'
else:
size_byte = b'\x4e'
redeemscript_str = size_byte + rs_size + redeemscript
if as_list:
usu.append(redeemscript_str)
else:
usu += redeemscript_str
return usu
def script_add_locktime_cltv(locktime_cltv, script):
lockbytes = opcode('OP_CHECKLOCKTIMEVERIFY') + opcode('OP_DROP')
if script and len(script) > 6:
if script[4:6] == lockbytes:
return script
return locktime_cltv.to_bytes(4, 'little') + lockbytes + script
def script_add_locktime_csv(locktime_csv, script):
lockbytes = opcode('OP_CHECKSEQUENCEVERIFY') + opcode('OP_DROP')
if script and len(script) > 6:
if script[4:6] == lockbytes:
return script
return locktime_csv.to_bytes(4, 'little') + lockbytes + script
def get_unlocking_script_type(locking_script_type, witness_type='legacy', multisig=False):
"""
Specify locking script type and get corresponding script type for unlocking script
>>> get_unlocking_script_type('p2wsh')
'p2sh_multisig'
:param locking_script_type: Locking script type. I.e.: p2pkh, p2sh, p2wpkh, p2wsh
:type locking_script_type: str
:param witness_type: Type of witness: legacy or segwit. Default is legacy
:type witness_type: str
:param multisig: Is multisig script or not? Default is False
:type multisig: bool
:return str: Unlocking script type such as sig_pubkey or p2sh_multisig
"""
if locking_script_type in ['p2pkh', 'p2wpkh']:
return 'sig_pubkey'
elif locking_script_type == 'p2wsh' or (witness_type == 'legacy' and multisig):
return 'p2sh_multisig'
elif locking_script_type == 'p2sh':
if not multisig:
return 'sig_pubkey'
else:
return 'p2sh_multisig'
elif locking_script_type == 'p2pk':
return 'signature'
else:
raise TransactionError("Unknown locking script type %s" % locking_script_type)
def transaction_update_spents(txs, address):
"""
Update spent information for list of transactions for a specific address. This method assumes the list of
transaction complete and up-to-date.
This methods loops through all the transaction and update all transaction outputs for given address, checks
if the output is spent and add the spending transaction ID and index number to the outputs.
The same list of transactions with updates outputs will be returned
:param txs: Complete list of transactions for given address
:type txs: list of Transaction
:param address: Address string
:type address: str
:return list of Transaction:
"""
spend_list = {}
for t in txs:
for inp in t.inputs:
if inp.address == address:
spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t})
address_inputs = list(spend_list.keys())
for t in txs:
for to in t.outputs:
if to.address != address:
continue
spent = True if (t.txid, to.output_n) in address_inputs else False
txs[txs.index(t)].outputs[to.output_n].spent = spent
if spent:
spending_tx = spend_list[(t.txid, to.output_n)]
spending_index_n = \
[inp for inp in txs[txs.index(spending_tx)].inputs
if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n
txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid
txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n
return txs
class Input(object):
"""
Transaction Input class, used by Transaction class
An Input contains a reference to an UTXO or Unspent Transaction Output (prev_txid + output_n).
To spent the UTXO an unlocking script can be included to prove ownership.
Inputs are verified by the Transaction class.
"""
def __init__(self, prev_txid, output_n, keys=None, signatures=None, public_hash=b'', unlocking_script=b'',
unlocking_script_unsigned=None, script_type=None, address='',
sequence=0xffffffff, compressed=None, sigs_required=None, sort=False, index_n=0,
value=0, double_spend=False, locktime_cltv=None, locktime_csv=None, key_path='', witness_type=None,
witnesses=None, encoding=None, network=DEFAULT_NETWORK):
"""
Create a new transaction input
:param prev_txid: Transaction hash of the UTXO (previous output) which will be spent.
:type prev_txid: bytes, str
:param output_n: Output number in previous transaction.
:type output_n: bytes, int
:param keys: A list of Key objects or public / private key string in various formats. If no list is provided but a bytes or string variable, a list with one item will be created. Optional
:type keys: list (bytes, str, Key)
:param signatures: Specify optional signatures
:type signatures: list (bytes, str, Signature)
:param public_hash: Public key hash or script hash. Specify if key is not available
:type public_hash: bytes
:param unlocking_script: Unlocking script (scriptSig) to prove ownership. Optional
:type unlocking_script: bytes, hexstring
:param unlocking_script_unsigned: Unlocking script for signing transaction
:type unlocking_script_unsigned: bytes, hexstring
:param script_type: Type of unlocking script used, i.e. p2pkh or p2sh_multisig. Default is p2pkh
:type script_type: str
:param address: Address string or object for input
:type address: str, Address
:param sequence: Sequence part of input, you normally do not have to touch this
:type sequence: bytes, int
:param compressed: Use compressed or uncompressed public keys. Default is compressed
:type compressed: bool
:param sigs_required: Number of signatures required for a p2sh_multisig unlocking script
:type sigs_required: int
:param sort: Sort public keys according to BIP0045 standard. Default is False to avoid unexpected change of key order.
:type sort: boolean
:param index_n: Index of input in transaction. Used by Transaction class.
:type index_n: int
:param value: Value of input in smallest denominator integers (Satoshi's) or as Value object or string
:type value: int, Value, str
:param double_spend: Is this input also spend in another transaction
:type double_spend: bool
:param locktime_cltv: Check Lock Time Verify value. Script level absolute time lock for this input
:type locktime_cltv: int
:param locktime_csv: Check Sequence Verify value
:type locktime_csv: int
:param key_path: Key path of input key as BIP32 string or list
:type key_path: str, list
:param witness_type: Specify witness/signature position: 'segwit' or 'legacy'. Determine from script, address or encoding if not specified.
:type witness_type: str
:param witnesses: List of witnesses for inputs, used for segwit transactions for instance.
:type witnesses: list of bytes
:param encoding: Address encoding used. For example bech32/base32 or base58. Leave empty for default
:type encoding: str
:param network: Network, leave empty for default
:type network: str, Network
"""
self.prev_txid = to_bytes(prev_txid)
self.output_n = output_n
if isinstance(output_n, int):
self.output_n_int = output_n
self.output_n = output_n.to_bytes(4, 'big')
else:
self.output_n_int = int.from_bytes(output_n, 'big')
self.output_n = output_n
self.unlocking_script = b'' if unlocking_script is None else to_bytes(unlocking_script)
self.unlocking_script_unsigned = b'' if unlocking_script_unsigned is None \
else to_bytes(unlocking_script_unsigned)
if isinstance(sequence, numbers.Number):
self.sequence = sequence
else:
self.sequence = int.from_bytes(sequence, 'little')
self.compressed = compressed
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.index_n = index_n
self.value = value_to_satoshi(value, network=network)
if not keys:
keys = []
self.keys = []
if not isinstance(keys, list):
keys = [keys]
self.public_hash = public_hash
if not signatures:
signatures = []
if not isinstance(signatures, list):
signatures = [signatures]
self.sort = sort
if isinstance(address, Address):
self.address = address.address
self.encoding = address.encoding
self.network = address.network
self.script_type = address.script_type
else:
self.address = address
self.signatures = []
self.redeemscript = b''
self.script_type = script_type
if self.prev_txid == b'\0' * 32:
self.script_type = 'coinbase'
if not sigs_required:
if self.script_type == 'p2sh_multisig':
# raise TransactionError("Please specify number of signatures required (sigs_required) parameter")
pass
else:
sigs_required = 1
self.double_spend = double_spend
self.locktime_cltv = locktime_cltv
self.locktime_csv = locktime_csv
self.witness_type = witness_type
if encoding is None:
self.encoding = 'base58'
if self.witness_type == 'segwit':
self.encoding = 'bech32'
else:
self.encoding = encoding
self.valid = None
self.key_path = key_path
self.witnesses = witnesses if witnesses else []
self.script_code = b''
# If unlocking script is specified extract keys, signatures, type from script
if self.unlocking_script and self.script_type != 'coinbase' and not (signatures and keys):
us_dict = script_deserialize(self.unlocking_script)
if not us_dict: # or us_dict['script_type'] in ['unknown', 'empty']
raise TransactionError("Could not parse unlocking script (%s)" % self.unlocking_script.hex())
if us_dict['script_type'] not in ['', 'unknown', 'empty']:
self.sigs_required = us_dict['number_of_sigs_n']
self.redeemscript = us_dict['redeemscript']
if us_dict['signatures'] not in signatures:
signatures += us_dict['signatures']
if not keys:
keys = us_dict['keys']
sigs_required = us_dict['number_of_sigs_m']
if not signatures and not self.public_hash:
self.public_hash = us_dict['hashes'][0]
# Determine locking script type for unlocking script type
if not self.script_type:
self.script_type = us_dict['script_type']
if us_dict['script_type'] == 'p2wsh':
self.script_type = 'p2sh_p2wsh'
elif us_dict['script_type'] == 'p2wpkh':
self.script_type = 'p2sh_p2wpkh'
elif unlocking_script_unsigned and not signatures:
ls_dict = script_deserialize(unlocking_script_unsigned, locking_script=True)
if ls_dict['hashes']:
self.public_hash = ls_dict['hashes'][0]
if ls_dict['script_type'] in ['p2wpkh', 'p2wsh']:
self.witness_type = 'segwit'
self.script_type = get_unlocking_script_type(ls_dict['script_type'])
self.sigs_required = sigs_required
if self.script_type is None and self.witness_type is None and self.witnesses:
self.witness_type = 'segwit'
if self.witness_type is None or self.witness_type == 'legacy':
# if self.script_type in ['p2wpkh', 'p2wsh', 'p2sh_p2wpkh', 'p2sh_p2wsh']:
if self.script_type in ['p2wpkh', 'p2wsh']:
self.witness_type = 'segwit'
elif self.script_type in ['p2sh_p2wpkh', 'p2sh_p2wsh']:
self.witness_type = 'p2sh-segwit'
else:
self.witness_type = 'legacy'
elif self.witness_type == 'segwit' and self.script_type == 'sig_pubkey' and encoding is None:
self.encoding = 'bech32'
if not self.script_type:
self.script_type = 'sig_pubkey'
for key in keys:
if not isinstance(key, Key):
kobj = Key(key, network=network)
else:
kobj = key
if kobj not in self.keys:
# if self.compressed is not None and kobj.compressed != self.compressed:
# _logger.warning("Key compressed is %s but Input class compressed argument is %s " %
# (kobj.compressed, self.compressed))
self.compressed = kobj.compressed
self.keys.append(kobj)
if self.compressed is None:
self.compressed = True
if self.sort:
self.keys.sort(key=lambda k: k.public_byte)
self.hash_type = SIGHASH_ALL
for sig in signatures:
if not isinstance(sig, Signature):
try:
sig = Signature.from_str(sig)
except Exception as e:
_logger.error("Could not parse signature %s in Input. Error: %s" % (to_hexstring(sig), e))
continue
if sig.as_der_encoded() not in [x.as_der_encoded() for x in self.signatures]:
self.signatures.append(sig)
if sig.hash_type:
self.hash_type = sig.hash_type
self.update_scripts(hash_type=self.hash_type)
def set_locktime_relative_blocks(self, blocks):
"""
Set nSequence relative locktime for this transaction input. The transaction will only be valid if the specified number of blocks has been mined since the previous UTXO is confirmed.
Maximum number of blocks is 65535 as defined in BIP-0068, which is around 455 days.
When setting an relative timelock, the transaction version must be at least 2. The transaction will be updated so existing signatures for this input will be removed.
:param blocks: The blocks value is the number of blocks since the previous transaction output has been confirmed.
:type blocks: int
:return None:
"""
if blocks == 0 or blocks == 0xffffffff:
self.sequence = 0xffffffff
return
if blocks > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of nSequence timelock blocks exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.sequence = blocks
self.signatures = []
def set_locktime_relative_time(self, seconds):
"""
Set nSequence relative locktime for this transaction input. The transaction will only be valid if the specified amount of seconds have been passed since the previous UTXO is confirmed.
Number of seconds will be rounded to the nearest 512 seconds. Any value below 512 will be interpreted as 512 seconds.
Maximum number of seconds is 33553920 (512 * 65535), which equals 384 days. See BIP-0068 definition.
When setting an relative timelock, the transaction version must be at least 2. The transaction will be updated so existing signatures for this input will be removed.
:param seconds: Number of seconds since the related previous transaction output has been confirmed.
:return:
"""
if seconds == 0 or seconds == 0xffffffff:
self.sequence = 0xffffffff
return
if seconds < 512:
seconds = 512
if (seconds // 512) > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of relative nSeqence timelock seconds exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.sequence = seconds // 512 + SEQUENCE_LOCKTIME_TYPE_FLAG
self.signatures = []
def update_scripts(self, hash_type=SIGHASH_ALL):
"""
Method to update Input scripts.
Creates or updates unlocking script, witness script for segwit inputs, multisig redeemscripts and
locktime scripts. This method is called when initializing a Input class or when signing an input.
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:return bool: Always returns True when method is completed
"""
addr_data = b''
unlock_script = b''
if self.script_type in ['sig_pubkey', 'p2sh_p2wpkh']:
if not self.keys and not self.public_hash:
if self.unlocking_script_unsigned:
script_dict = script_deserialize(self.unlocking_script_unsigned)
if script_dict['script_type'] == 'p2pkh':
self.public_hash = script_dict['hashes'][0]
else:
return
else:
return
if not self.public_hash:
self.public_hash = self.keys[0].hash160
self.script_code = b'\x76\xa9\x14' + self.public_hash + b'\x88\xac'
self.unlocking_script_unsigned = self.script_code
addr_data = self.public_hash
if self.signatures and self.keys:
self.witnesses = [self.signatures[0].as_der_encoded() +
hash_type.to_bytes(1, 'big') if hash_type else b'', self.keys[0].public_byte]
unlock_script = b''.join([bytes(varstr(w)) for w in self.witnesses])
elif self.witnesses and not self.signatures and not self.keys and \
self.script_type in ['sig_pubkey', 'p2sh_p2wpkh']:
self.signatures = [self.witnesses[0]]
self.keys = [Key(self.witnesses[1], network=self.network)]
if self.witness_type == 'p2sh-segwit':
self.unlocking_script = varstr(b'\0' + varstr(self.public_hash))
elif self.witness_type == 'segwit':
self.unlocking_script = b''
elif unlock_script != b'':
self.unlocking_script = unlock_script
elif self.script_type in ['p2sh_multisig', 'p2sh_p2wsh']:
# if not self.keys and not self.public_hash:
# raise TransactionError("Please provide keys to append multisig transaction input")
if not self.redeemscript and self.keys:
self.redeemscript = serialize_multisig_redeemscript(self.keys, n_required=self.sigs_required,
compressed=self.compressed)
if self.redeemscript:
if self.witness_type == 'segwit' or self.witness_type == 'p2sh-segwit':
self.public_hash = hashlib.sha256(self.redeemscript).digest()
else:
self.public_hash = hash160(self.redeemscript)
addr_data = self.public_hash
self.unlocking_script_unsigned = self.redeemscript
if self.redeemscript and self.keys:
n_tag = self.redeemscript[0:1]
if not isinstance(n_tag, int):
n_tag = int.from_bytes(n_tag, 'big')
self.sigs_required = n_tag - 80
signatures = [s.as_der_encoded() for s in self.signatures[:self.sigs_required]]
if b'' in signatures:
raise TransactionError("Empty signature found in signature list when signing. "
"Is DER encoded version of signature defined?")
if len(signatures):
us_as_list = False
if self.witness_type in ['segwit', 'p2sh-segwit']:
us_as_list = True
unlock_script = _p2sh_multisig_unlocking_script(signatures, self.redeemscript, hash_type,
as_list=us_as_list)
if self.witness_type == 'segwit':
script_code = b''
for k in self.keys:
script_code += varstr(k.public_byte) + b'\xad\xab'
if len(script_code) > 3:
script_code = script_code[:-2] + b'\xac'
self.script_code = script_code
if signatures:
self.witnesses = unlock_script
elif self.witness_type == 'p2sh-segwit':
self.unlocking_script = varstr(b'\0' + varstr(self.public_hash))
self.script_code = self.unlocking_script
if signatures:
self.witnesses = unlock_script
elif unlock_script != b'':
self.unlocking_script = unlock_script
elif self.script_type == 'signature':
if self.keys:
self.script_code = varstr(self.keys[0].public_byte) + b'\xac'
self.unlocking_script_unsigned = self.script_code
addr_data = self.keys[0].public_byte
if self.signatures:
self.unlocking_script = varstr(self.signatures[0].as_der_encoded() + hash_type.to_bytes(1, 'big'))
elif self.script_type not in ['coinbase', 'unknown']:
raise TransactionError("Unknown unlocking script type %s for input %d" % (self.script_type, self.index_n))
if addr_data and not self.address:
self.address = Address(hashed_data=addr_data, encoding=self.encoding, network=self.network,
script_type=self.script_type, witness_type=self.witness_type).address
if self.locktime_cltv:
self.unlocking_script_unsigned = script_add_locktime_cltv(self.locktime_cltv,
self.unlocking_script_unsigned)
self.unlocking_script = script_add_locktime_cltv(self.locktime_cltv, self.unlocking_script)
elif self.locktime_csv:
self.unlocking_script_unsigned = script_add_locktime_csv(self.locktime_csv, self.unlocking_script_unsigned)
self.unlocking_script = script_add_locktime_csv(self.locktime_csv, self.unlocking_script)
return True
def as_dict(self):
"""
Get transaction input information in json format
:return dict: Json with output_n, prev_txid, output_n, type, address, public_key, public_hash, unlocking_script and sequence
"""
pks = []
for k in self.keys:
pks.append(k.public_hex)
if len(self.keys) == 1:
pks = pks[0]
return {
'index_n': self.index_n,
'prev_txid': self.prev_txid.hex(),
'output_n': self.output_n_int,
'script_type': self.script_type,
'address': self.address,
'value': self.value,
'public_keys': pks,
'compressed': self.compressed,
'encoding': self.encoding,
'double_spend': self.double_spend,
'script': self.unlocking_script.hex(),
'redeemscript': self.redeemscript.hex(),
'sequence': self.sequence,
'signatures': [s.hex() for s in self.signatures],
'sigs_required': self.sigs_required,
'locktime_cltv': self.locktime_cltv,
'locktime_csv': self.locktime_csv,
'public_hash': self.public_hash.hex(),
'script_code': self.script_code.hex(),
'unlocking_script': self.unlocking_script.hex(),
'unlocking_script_unsigned': self.unlocking_script_unsigned.hex(),
'witness_type': self.witness_type,
'witness': b''.join(self.witnesses).hex(),
'sort': self.sort,
'valid': self.valid,
}
def __repr__(self):
return "<Input(prev_txid='%s', output_n=%d, address='%s', index_n=%s, type='%s')>" % \
(self.prev_txid.hex(), self.output_n_int, self.address, self.index_n, self.script_type)
class Output(object):
"""
Transaction Output class, normally part of Transaction class.
Contains the amount and destination of a transaction.
"""
def __init__(self, value, address='', public_hash=b'', public_key=b'', lock_script=b'', spent=False,
output_n=0, script_type=None, encoding=None, spending_txid='', spending_index_n=None,
network=DEFAULT_NETWORK):
"""
Create a new transaction output
An transaction outputs locks the specified amount to a public key. Anyone with the private key can unlock
this output.
The transaction output class contains an amount and the destination which can be provided either as address,
public key, public key hash or a locking script. Only one needs to be provided as the they all can be derived
from each other, but you can provide as much attributes as you know to improve speed.
:param value: Amount of output in smallest denominator integers (Satoshi's) or as Value object or string
:type value: int, Value, str
:param address: Destination address of output. Leave empty to derive from other attributes you provide. An instance of an Address or HDKey class is allowed as argument.
:type address: str, Address, HDKey
:param public_hash: Hash of public key or script
:type public_hash: bytes, str
:param public_key: Destination public key
:type public_key: bytes, str
:param lock_script: Locking script of output. If not provided a default unlocking script will be provided with a public key hash.
:type lock_script: bytes, str
:param spent: Is output already spent? Default is False
:type spent: bool
:param output_n: Output index number, default is 0. Index number has to be unique per transaction and 0 for first output, 1 for second, etc
:type output_n: int
:param script_type: Script type of output (p2pkh, p2sh, segwit p2wpkh, etc). Extracted from lock_script if provided.
:type script_type: str
:param encoding: Address encoding used. For example bech32/base32 or base58. Leave empty to derive from address or default base58 encoding
:type encoding: str
:param spending_txid: Transaction hash of input spending this transaction output
:type spending_txid: str
:param spending_index_n: Index number of input spending this transaction output
:type spending_index_n: int
:param network: Network, leave empty for default
:type network: str, Network
"""
if not (address or public_hash or public_key or lock_script):
raise TransactionError("Please specify address, lock_script, public key or public key hash when "
"creating output")
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.value = value_to_satoshi(value, network=network)
self.lock_script = b'' if lock_script is None else to_bytes(lock_script)
self.public_hash = to_bytes(public_hash)
if isinstance(address, Address):
self.address = address.address
self.address_obj = address
elif isinstance(address, HDKey):
self.address = address.address()
self.address_obj = address.address_obj
public_key = address.public_byte
if not script_type:
script_type = script_type_default(address.witness_type, address.multisig, True)
self.public_hash = address.hash160
else:
self.address = address
self.address_obj = None
self.public_key = to_bytes(public_key)
self.compressed = True
self.k = None
self.versionbyte = self.network.prefix_address
self.script_type = script_type
self.encoding = encoding
if not self.address and self.encoding is None:
self.encoding = 'base58'
self.spent = spent
self.output_n = output_n
if self.address_obj:
self.script_type = self.address_obj.script_type if script_type is None else script_type
self.public_hash = self.address_obj.hash_bytes
self.network = self.address_obj.network
self.encoding = self.address_obj.encoding
if self.public_key and not self.public_hash:
k = Key(self.public_key, is_private=False, network=network)
self.public_hash = k.hash160
elif self.address and (not self.public_hash or not self.script_type or not self.encoding):
address_dict = deserialize_address(self.address, self.encoding, self.network.name)
if address_dict['script_type'] and not script_type:
self.script_type = address_dict['script_type']
if not self.script_type:
raise TransactionError("Could not determine script type of address %s" % self.address)
self.encoding = address_dict['encoding']
network_guesses = address_dict['networks']
if address_dict['network'] and self.network.name != address_dict['network']:
raise TransactionError("Address %s is from %s network and transaction from %s network" %
(self.address, address_dict['network'], self.network.name))
elif self.network.name not in network_guesses:
raise TransactionError("Network for output address %s is different from transaction network. %s not "
"in %s" % (self.address, self.network.name, network_guesses))
self.public_hash = address_dict['public_key_hash_bytes']
if not self.encoding:
self.encoding = 'base58'
if self.script_type in ['p2wpkh', 'p2wsh']:
self.encoding = 'bech32'
if self.lock_script and not self.public_hash:
ss = script_deserialize(self.lock_script, locking_script=True)
self.script_type = ss['script_type']
if self.script_type in ['p2wpkh', 'p2wsh']:
self.encoding = 'bech32'
if ss['hashes']:
self.public_hash = ss['hashes'][0]
if ss['keys']:
self.public_key = ss['keys'][0]
k = Key(self.public_key, is_private=False, network=network)
self.public_hash = k.hash160
if self.script_type is None:
self.script_type = 'p2pkh'
if self.encoding == 'bech32':
self.script_type = 'p2wpkh'
if self.public_hash and not self.address:
self.address_obj = Address(hashed_data=self.public_hash, script_type=self.script_type,
encoding=self.encoding, network=self.network)
self.address = self.address_obj.address
self.versionbyte = self.address_obj.prefix
if self.lock_script == b'':
if self.script_type == 'p2pkh':
self.lock_script = b'\x76\xa9\x14' + self.public_hash + b'\x88\xac'
elif self.script_type == 'p2sh':
self.lock_script = b'\xa9\x14' + self.public_hash + b'\x87'
elif self.script_type == 'p2wpkh':
self.lock_script = b'\x00\x14' + self.public_hash
elif self.script_type == 'p2wsh':
self.lock_script = b'\x00\x20' + self.public_hash
elif self.script_type == 'p2pk':
if not self.public_key:
raise TransactionError("Public key is needed to create P2PK script for output %d" % output_n)
self.lock_script = varstr(self.public_key) + b'\xac'
else:
raise TransactionError("Unknown output script type %s, please provide locking script" %
self.script_type)
self.spending_txid = spending_txid
self.spending_index_n = spending_index_n
# if self.script_type != 'nulldata' and value < self.network.dust_amount:
# raise TransactionError("Output to %s must be more then dust amount %d" %
# (self.address, self.network.dust_amount))
def as_dict(self):
"""
Get transaction output information in json format
:return dict: Json with amount, locking script, public key, public key hash and address
"""
return {
'value': self.value,
'script': self.lock_script.hex(),
'script_type': self.script_type,
'public_key': self.public_key.hex(),
'public_hash': self.public_hash.hex(),
'address': self.address,
'output_n': self.output_n,
'spent': self.spent,
'spending_txid': self.spending_txid,
'spending_index_n': self.spending_index_n,
}
def __repr__(self):
return "<Output(value=%d, address=%s, type=%s)>" % (self.value, self.address, self.script_type)
class Transaction(object):
"""
Transaction Class
Contains 1 or more Input class object with UTXO's to spent and 1 or more Output class objects with destinations.
Besides the transaction class contains a locktime and version.
Inputs and outputs can be included when creating the transaction, or can be add later with add_input and
add_output respectively.
A verify method is available to check if the transaction Inputs have valid unlocking scripts.
Each input in the transaction can be signed with the sign method provided a valid private key.
"""
@staticmethod
def import_raw(rawtx, network=DEFAULT_NETWORK, check_size=True):
"""
Import a raw transaction and create a Transaction object
Uses the transaction_deserialize method to parse the raw transaction and then calls the init method of
this transaction class to create the transaction object
:param rawtx: Raw transaction string
:type rawtx: bytes, str
:param network: Network, leave empty for default
:type network: str, Network
:param check_size: Check if not bytes are left when parsing is finished. Disable when parsing list of transactions, such as the transactions in a raw block. Default is True
:type check_size: bool
:return Transaction:
"""
return transaction_deserialize(rawtx, network=network, check_size=check_size)
@staticmethod
def load(txid=None, filename=None):
"""
Load transaction object from file which has been stored with the :func:`save` method.
Specify transaction ID or filename.
:param txid: Transaction ID. Transaction object will be read from .bitcoinlib datadir
:type txid: str
:param filename: Name of transaction object file
:type filename: str
:return Transaction:
"""
if not filename and not txid:
raise TransactionError("Please supply filename or txid")
elif not filename and txid:
p = Path(BCL_DATA_DIR, '%s.tx' % txid)
else:
p = Path(filename)
if not p.parent or str(p.parent) == '.':
p = Path(BCL_DATA_DIR, filename)
f = p.open('rb')
t = pickle.load(f)
f.close()
return t
def __init__(self, inputs=None, outputs=None, locktime=0, version=None,
network=DEFAULT_NETWORK, fee=None, fee_per_kb=None, size=None, txid='', txhash='', date=None,
confirmations=None, block_height=None, block_hash=None, input_total=0, output_total=0, rawtx=b'',
status='new', coinbase=False, verified=False, witness_type='legacy', flag=None):
"""
Create a new transaction class with provided inputs and outputs.
You can also create a empty transaction and add input and outputs later.
To verify and sign transactions all inputs and outputs need to be included in transaction. Any modification
after signing makes the transaction invalid.
:param inputs: Array of Input objects. Leave empty to add later
:type inputs: list (Input)
:param outputs: Array of Output object. Leave empty to add later
:type outputs: list (Output)
:param locktime: Transaction level locktime. Locks the transaction until a specified block (value from 1 to 5 million) or until a certain time (Timestamp in seconds after 1-jan-1970). Default value is 0 for transactions without locktime
:type locktime: int
:param version: Version rules. Defaults to 1 in bytes
:type version: bytes, int
:param network: Network, leave empty for default network
:type network: str, Network
:param fee: Fee in smallest denominator (ie Satoshi) for complete transaction
:type fee: int
:param fee_per_kb: Fee in smallest denominator per kilobyte. Specify when exact transaction size is not known.
:type fee_per_kb: int
:param size: Transaction size in bytes
:type size: int
:param txid: The transaction id (same for legacy/segwit) based on [nVersion][txins][txouts][nLockTime as hexadecimal string
:type txid: str
:param txhash: The transaction hash (differs from txid for witness transactions), based on [nVersion][marker][flag][txins][txouts][witness][nLockTime] in Segwit (as hexadecimal string). Unused at the moment
:type txhash: str
:param date: Confirmation date of transaction
:type date: datetime
:param confirmations: Number of confirmations
:type confirmations: int
:param block_height: Block number which includes transaction
:type block_height: int
:param block_hash: Hash of block for this transaction
:type block_hash: str
:param input_total: Total value of inputs
:type input_total: int
:param output_total: Total value of outputs
:type output_total: int
:param rawtx: Bytes representation of complete transaction
:type rawtx: bytes
:param status: Transaction status, for example: 'new', 'unconfirmed', 'confirmed'
:type status: str
:param coinbase: Coinbase transaction or not?
:type coinbase: bool
:param verified: Is transaction successfully verified? Updated when verified() method is called
:type verified: bool
:param witness_type: Specify witness/signature position: 'segwit' or 'legacy'. Determine from script, address or encoding if not specified.
:type witness_type: str
:param flag: Transaction flag to indicate version, for example for SegWit
:type flag: bytes, str
"""
self.coinbase = coinbase
self.inputs = []
if inputs is not None:
for inp in inputs:
self.inputs.append(inp)
if not input_total:
input_total = sum([i.value for i in inputs])
id_list = [i.index_n for i in self.inputs]
if list(set(id_list)) != id_list:
_logger.info("Identical transaction indexes (tid) found in inputs, please specify unique index. "
"Indexes will be automatically recreated")
index_n = 0
for inp in self.inputs:
inp.index_n = index_n
index_n += 1
if outputs is None:
self.outputs = []
else:
self.outputs = outputs
if not output_total:
output_total = sum([o.value for o in outputs])
if fee is None and output_total and input_total:
fee = input_total - output_total
if fee < 0 or fee == 0 and not self.coinbase:
raise TransactionError("Transaction inputs total value must be greater then total value of "
"transaction outputs")
if not version:
version = b'\x00\x00\x00\x01'
if isinstance(version, int):
self.version = version.to_bytes(4, 'big')
self.version_int = version
else:
self.version = version
self.version_int = int.from_bytes(version, 'big')
self.locktime = locktime
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.flag = flag
self.fee = fee
self.fee_per_kb = fee_per_kb
self.size = size
self.vsize = size
self.txid = txid
self.txhash = txhash
self.date = date
self.confirmations = confirmations
self.block_height = block_height
self.block_hash = block_hash
self.input_total = input_total
self.output_total = output_total
self.rawtx = rawtx
self.status = status
self.verified = verified
self.witness_type = witness_type
self.change = 0
self.calc_weight_units()
if self.witness_type not in ['legacy', 'segwit']:
raise TransactionError("Please specify a valid witness type: legacy or segwit")
if not self.txid:
self.txid = self.signature_hash()[::-1].hex()
def __repr__(self):
return "<Transaction(id=%s, inputs=%d, outputs=%d, status=%s, network=%s)>" % \
(self.txid, len(self.inputs), len(self.outputs), self.status, self.network.name)
def __str__(self):
return self.txid
def __add__(self, other):
"""
Merge this transaction with another transaction keeping the original transaction intact.
:return Transaction:
"""
t = deepcopy(self)
t.merge_transaction(other)
return t
def __eq__(self, other):
"""
Compare two transaction, must have same transaction ID
:param other: Other transaction object
:type other: Transaction
:return bool:
"""
if not isinstance(other, Transaction):
raise TransactionError("Can only compare with other Transaction object")
return self.txid == other.txid
def as_dict(self):
"""
Return Json dictionary with transaction information: Inputs, outputs, version and locktime
:return dict:
"""
inputs = []
outputs = []
for i in self.inputs:
inputs.append(i.as_dict())
for o in self.outputs:
outputs.append(o.as_dict())
return {
'txid': self.txid,
'date': self.date,
'network': self.network.name,
'witness_type': self.witness_type,
'coinbase': self.coinbase,
'flag': None if not self.flag else ord(self.flag),
'txhash': self.txhash,
'confirmations': self.confirmations,
'block_height': self.block_height,
'block_hash': self.block_hash,
'fee': self.fee,
'fee_per_kb': self.fee_per_kb,
'inputs': inputs,
'outputs': outputs,
'input_total': self.input_total,
'output_total': self.output_total,
'version': self.version_int,
'locktime': self.locktime,
'raw': self.raw_hex(),
'size': self.size,
'vsize': self.vsize,
'verified': self.verified,
'status': self.status
}
def as_json(self):
"""
Get current key as json formatted string
:return str:
"""
adict = self.as_dict()
return json.dumps(adict, indent=4)
def info(self):
"""
Prints transaction information to standard output
"""
print("Transaction %s" % self.txid)
print("Date: %s" % self.date)
print("Network: %s" % self.network.name)
if self.locktime and self.locktime != 0xffffffff:
if self.locktime < 500000000:
print("Locktime: Until block %d" % self.locktime)
else:
print("Locktime: Until %s UTC" % datetime.utcfromtimestamp(self.locktime))
print("Version: %d" % self.version_int)
print("Witness type: %s" % self.witness_type)
print("Status: %s" % self.status)
print("Verified: %s" % self.verified)
print("Inputs")
replace_by_fee = False
for ti in self.inputs:
print("-", ti.address, Value.from_satoshi(ti.value, network=self.network).str(1), ti.prev_txid.hex(),
ti.output_n_int)
validstr = "not validated"
if ti.valid:
validstr = "valid"
elif ti.valid is False:
validstr = "invalid"
print(" %s %s; sigs: %d (%d-of-%d) %s" %
(ti.witness_type, ti.script_type, len(ti.signatures), ti.sigs_required or 0, len(ti.keys), validstr))
if ti.sequence <= SEQUENCE_REPLACE_BY_FEE:
replace_by_fee = True
if ti.sequence <= SEQUENCE_LOCKTIME_DISABLE_FLAG:
if ti.sequence & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Relative timelock for %d seconds" % (512 * (ti.sequence - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Relative timelock for %d blocks" % ti.sequence)
if ti.locktime_cltv:
if ti.locktime_cltv & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Check Locktime Verify (CLTV) for %d seconds" %
(512 * (ti.locktime_cltv - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Check Locktime Verify (CLTV) for %d blocks" % ti.locktime_cltv)
if ti.locktime_csv:
if ti.locktime_csv & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Check Sequence Verify Timelock (CSV) for %d seconds" %
(512 * (ti.locktime_csv - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Check Sequence Verify Timelock (CSV) for %d blocks" % ti.locktime_csv)
print("Outputs")
for to in self.outputs:
if to.script_type == 'nulldata':
print("- NULLDATA ", to.lock_script[2:])
else:
spent_str = ''
if to.spent:
spent_str = 'S'
elif to.spent is False:
spent_str = 'U'
print("-", to.address, Value.from_satoshi(to.value, network=self.network).str(1), to.script_type,
spent_str)
if replace_by_fee:
print("Replace by fee: Enabled")
print("Size: %s" % self.size)
print("Vsize: %s" % self.vsize)
print("Fee: %s" % self.fee)
print("Confirmations: %s" % self.confirmations)
print("Block: %s" % self.block_height)
def set_locktime_relative_blocks(self, blocks, input_index_n=0):
"""
Set nSequence relative locktime for this transaction. The transaction will only be valid if the specified number of blocks has been mined since the previous UTXO is confirmed.
Maximum number of blocks is 65535 as defined in BIP-0068, which is around 455 days.
When setting an relative timelock, the transaction version must be at least 2. The transaction will be updated so existing signatures for this input will be removed.
:param blocks: The blocks value is the number of blocks since the previous transaction output has been confirmed.
:type blocks: int
:param input_index_n: Index number of input for nSequence locktime
:type input_index_n: int
:return None:
"""
if blocks == 0 or blocks == 0xffffffff:
self.inputs[input_index_n].sequence = 0xffffffff
self.sign(index_n=input_index_n, replace_signatures=True)
return
if blocks > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of nSequence timelock blocks exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.inputs[input_index_n].sequence = blocks
self.version_int = 2
self.sign_and_update(index_n=input_index_n)
def set_locktime_relative_time(self, seconds, input_index_n=0):
"""
Set nSequence relative locktime for this transaction. The transaction will only be valid if the specified amount of seconds have been passed since the previous UTXO is confirmed.
Number of seconds will be rounded to the nearest 512 seconds. Any value below 512 will be interpreted as 512 seconds.
Maximum number of seconds is 33553920 (512 * 65535), which equals 384 days. See BIP-0068 definition.
When setting an relative timelock, the transaction version must be at least 2. The transaction will be updated so existing signatures for this input will be removed.
:param seconds: Number of seconds since the related previous transaction output has been confirmed.
:type seconds: int
:param input_index_n: Index number of input for nSequence locktime
:type input_index_n: int
:return:
"""
if seconds == 0 or seconds == 0xffffffff:
self.inputs[input_index_n].sequence = 0xffffffff
self.sign(index_n=input_index_n, replace_signatures=True)
return
elif seconds < 512:
seconds = 512
elif (seconds // 512) > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of relative nSeqence timelock seconds exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.inputs[input_index_n].sequence = seconds // 512 + SEQUENCE_LOCKTIME_TYPE_FLAG
self.version_int = 2
self.sign_and_update(index_n=input_index_n)
def set_locktime_blocks(self, blocks):
"""
Set nLocktime, a transaction level absolute lock time in blocks using the transaction sequence field.
So for example if you set this value to 600000 the transaction will only be valid after block 600000.
:param blocks: Transaction is valid after supplied block number. Value must be between 0 and 500000000. Zero means no locktime.
:type blocks: int
:return:
"""
if blocks == 0 or blocks == 0xffffffff:
self.locktime = 0xffffffff
self.sign(replace_signatures=True)
self.verify()
return
elif blocks > 500000000:
raise TransactionError("Number of locktime blocks must be below %d" % 500000000)
self.locktime = blocks
if blocks != 0 and blocks != 0xffffffff:
for i in self.inputs:
if i.sequence == 0xffffffff:
i.sequence = 0xfffffffd
self.sign_and_update()
def set_locktime_time(self, timestamp):
"""
Set nLocktime, a transaction level absolute lock time in timestamp using the transaction sequence field.
:param timestamp: Transaction is valid after the given timestamp. Value must be between 500000000 and 0xfffffffe
:return:
"""
if timestamp == 0 or timestamp == 0xffffffff:
self.locktime = 0xffffffff
self.sign(replace_signatures=True)
self.verify()
return
if timestamp <= 500000000:
raise TransactionError("Timestamp must have a value higher then %d" % 500000000)
if timestamp > 0xfffffffe:
raise TransactionError("Timestamp must have a value lower then %d" % 0xfffffffe)
self.locktime = timestamp
# Input sequence value must be below 0xffffffff
for i in self.inputs:
if i.sequence == 0xffffffff:
i.sequence = 0xfffffffd
self.sign_and_update()
def signature_hash(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None, as_hex=False):
"""
Double SHA256 Hash of Transaction signature
:param sign_id: Index of input to sign
:type sign_id: int
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:param witness_type: Legacy or Segwit witness type? Leave empty to use Transaction witness type
:type witness_type: str
:param as_hex: Return value as hexadecimal string. Default is False
:type as_hex: bool
:return bytes: Transaction signature hash
"""
return double_sha256(self.signature(sign_id, hash_type, witness_type), as_hex=as_hex)
def signature(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
"""
Serializes transaction and calculates signature for Legacy or Segwit transactions
:param sign_id: Index of input to sign
:type sign_id: int
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:param witness_type: Legacy or Segwit witness type? Leave empty to use Transaction witness type
:type witness_type: str
:return bytes: Transaction signature
"""
if witness_type is None:
witness_type = self.witness_type
if witness_type == 'legacy' or sign_id is None:
return self.raw(sign_id, hash_type, 'legacy')
elif witness_type in ['segwit', 'p2sh-segwit']:
return self.signature_segwit(sign_id, hash_type)
else:
raise TransactionError("Witness_type %s not supported" % self.witness_type)
def signature_segwit(self, sign_id, hash_type=SIGHASH_ALL):
"""
Serialize transaction signature for segregated witness transaction
:param sign_id: Index of input to sign
:type sign_id: int
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:return bytes: Segwit transaction signature
"""
assert (self.witness_type == 'segwit')
prevouts_serialized = b''
sequence_serialized = b''
outputs_serialized = b''
hash_prevouts = b'\0' * 32
hash_sequence = b'\0' * 32
hash_outputs = b'\0' * 32
for i in self.inputs:
prevouts_serialized += i.prev_txid[::-1] + i.output_n[::-1]
sequence_serialized += i.sequence.to_bytes(4, 'little')
if not hash_type & SIGHASH_ANYONECANPAY:
hash_prevouts = double_sha256(prevouts_serialized)
if (hash_type & 0x1f) != SIGHASH_SINGLE and (hash_type & 0x1f) != SIGHASH_NONE:
hash_sequence = double_sha256(sequence_serialized)
if (hash_type & 0x1f) != SIGHASH_SINGLE and (hash_type & 0x1f) != SIGHASH_NONE:
for o in self.outputs:
outputs_serialized += int(o.value).to_bytes(8, 'little')
outputs_serialized += varstr(o.lock_script)
hash_outputs = double_sha256(outputs_serialized)
elif (hash_type & 0x1f) != SIGHASH_SINGLE and sign_id < len(self.outputs):
outputs_serialized += int(self.outputs[sign_id].value).to_bytes(8, 'little')
outputs_serialized += varstr(self.outputs[sign_id].lock_script)
hash_outputs = double_sha256(outputs_serialized)
if not self.inputs[sign_id].value:
raise TransactionError("Need value of input %d to create transaction signature, value can not be 0" %
sign_id)
script_code = self.inputs[sign_id].redeemscript
if not script_code:
script_code = self.inputs[sign_id].script_code
if (not script_code or script_code == b'\0') and self.inputs[sign_id].script_type != 'unknown':
raise TransactionError("Script code missing")
ser_tx = \
self.version[::-1] + hash_prevouts + hash_sequence + self.inputs[sign_id].prev_txid[::-1] + \
self.inputs[sign_id].output_n[::-1] + \
varstr(script_code) + int(self.inputs[sign_id].value).to_bytes(8, 'little') + \
self.inputs[sign_id].sequence.to_bytes(4, 'little') + \
hash_outputs + self.locktime.to_bytes(4, 'little') + hash_type.to_bytes(4, 'little')
return ser_tx
def raw(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
"""
Serialize raw transaction
Return transaction with signed inputs if signatures are available
:param sign_id: Create raw transaction which can be signed by transaction with this input ID
:type sign_id: int, None
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:param witness_type: Serialize transaction with other witness type then default. Use to create legacy raw transaction for segwit transaction to create transaction signature ID's
:type witness_type: str
:return bytes:
"""
if witness_type is None:
witness_type = self.witness_type
r = self.version[::-1]
if sign_id is None and witness_type == 'segwit':
r += b'\x00' # marker (BIP 141)
r += b'\x01' # flag (BIP 141)
r += int_to_varbyteint(len(self.inputs))
r_witness = b''
for i in self.inputs:
r += i.prev_txid[::-1] + i.output_n[::-1]
if i.witnesses and i.witness_type != 'legacy':
r_witness += int_to_varbyteint(len(i.witnesses)) + b''.join([bytes(varstr(w)) for w in i.witnesses])
else:
r_witness += b'\0'
if sign_id is None:
r += varstr(i.unlocking_script)
elif sign_id == i.index_n:
r += varstr(i.unlocking_script_unsigned)
else:
r += b'\0'
r += i.sequence.to_bytes(4, 'little')
r += int_to_varbyteint(len(self.outputs))
for o in self.outputs:
if o.value < 0:
raise TransactionError("Output value < 0 not allowed")
r += int(o.value).to_bytes(8, 'little')
r += varstr(o.lock_script)
if sign_id is None and witness_type == 'segwit':
r += r_witness
r += self.locktime.to_bytes(4, 'little')
if sign_id is not None:
r += hash_type.to_bytes(4, 'little')
else:
if not self.size and b'' not in [i.unlocking_script for i in self.inputs]:
self.size = len(r)
return r
def raw_hex(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
"""
Wrapper for raw() method. Return current raw transaction hex
:param sign_id: Create raw transaction which can be signed by transaction with this input ID
:type sign_id: int
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:param witness_type: Serialize transaction with other witness type then default. Use to create legacy raw transaction for segwit transaction to create transaction signature ID's
:type witness_type: str
:return hexstring:
"""
return self.raw(sign_id, hash_type=hash_type, witness_type=witness_type).hex()
def witness_data(self):
witness_data = b''
for i in self.inputs:
witness_data += int_to_varbyteint(len(i.witnesses)) + b''.join([bytes(varstr(w)) for w in i.witnesses])
return witness_data
def verify(self):
"""
Verify all inputs of a transaction, check if signatures match public key.
Does not check if UTXO is valid or has already been spent
:return bool: True if enough signatures provided and if all signatures are valid
"""
self.verified = False
for i in self.inputs:
if i.script_type == 'coinbase':
i.valid = True
break
if not i.signatures:
_logger.info("No signatures found for transaction input %d" % i.index_n)
return False
if len(i.signatures) < i.sigs_required:
_logger.info("Not enough signatures provided. Found %d signatures but %d needed" %
(len(i.signatures), i.sigs_required))
return False
transaction_hash = self.signature_hash(i.index_n, witness_type=i.witness_type)
sig_id = 0
key_n = 0
for key in i.keys:
if sig_id > i.sigs_required - 1:
break
if sig_id >= len(i.signatures):
_logger.info("No valid signatures found")
return False
if not transaction_hash:
_logger.info("Need at least 1 key to create segwit transaction signature")
return False
key_n += 1
if verify(transaction_hash, i.signatures[sig_id], key):
sig_id += 1
i.valid = True
else:
i.valid = False
if sig_id < i.sigs_required:
_logger.info("Not enough valid signatures provided for input %d. Found %d signatures but %d needed" %
(i.index_n, sig_id, i.sigs_required))
return False
self.verified = True
return True
def sign(self, keys=None, index_n=None, multisig_key_n=None, hash_type=SIGHASH_ALL, fail_on_unknown_key=True,
replace_signatures=False):
"""
Sign the transaction input with provided private key
:param keys: A private key or list of private keys
:type keys: HDKey, Key, bytes, list
:param index_n: Index of transaction input. Leave empty to sign all inputs
:type index_n: int
:param multisig_key_n: Index number of key for multisig input for segwit transactions. Leave empty if not known. If not specified all possibilities will be checked
:type multisig_key_n: int
:param hash_type: Specific hash type, default is SIGHASH_ALL
:type hash_type: int
:param fail_on_unknown_key: Method fails if public key from signature is not found in public key list
:type fail_on_unknown_key: bool
:param replace_signatures: Replace signature with new one if already signed.
:type replace_signatures: bool
:return None:
"""
if index_n is None:
tids = range(len(self.inputs))
else:
tids = [index_n]
if keys is None:
keys = []
elif not isinstance(keys, list):
keys = [keys]
for tid in tids:
n_signs = 0
tid_keys = [k if isinstance(k, (HDKey, Key)) else Key(k, compressed=self.inputs[tid].compressed)
for k in keys]
for k in self.inputs[tid].keys:
if k.is_private and k not in tid_keys:
tid_keys.append(k)
# If input does not contain any keys, try using provided keys
if not self.inputs[tid].keys:
self.inputs[tid].keys = tid_keys
self.inputs[tid].update_scripts(hash_type=hash_type)
if self.inputs[tid].script_type == 'coinbase':
raise TransactionError("Can not sign coinbase transactions")
pub_key_list = [k.public_byte for k in self.inputs[tid].keys]
n_total_sigs = len(self.inputs[tid].keys)
sig_domain = [''] * n_total_sigs
txid = self.signature_hash(tid, witness_type=self.inputs[tid].witness_type)
for key in tid_keys:
# Check if signature signs known key and is not already in list
if key.public_byte not in pub_key_list:
if fail_on_unknown_key:
raise TransactionError("This key does not sign any known key: %s" % key.public_hex)
else:
_logger.info("This key does not sign any known key: %s" % key.public_hex)
continue
if not replace_signatures and key in [x.public_key for x in self.inputs[tid].signatures]:
_logger.info("Key %s already signed" % key.public_hex)
break
if not key.private_byte:
raise TransactionError("Please provide a valid private key to sign the transaction")
sig = sign(txid, key)
newsig_pos = pub_key_list.index(key.public_byte)
sig_domain[newsig_pos] = sig
n_signs += 1
if not n_signs:
break
# Add already known signatures on correct position
n_sigs_to_insert = len(self.inputs[tid].signatures)
for sig in self.inputs[tid].signatures:
if not sig.public_key:
break
newsig_pos = pub_key_list.index(sig.public_key.public_byte)
if sig_domain[newsig_pos] == '':
sig_domain[newsig_pos] = sig
n_sigs_to_insert -= 1
if n_sigs_to_insert:
for sig in self.inputs[tid].signatures:
free_positions = [i for i, s in enumerate(sig_domain) if s == '']
for pos in free_positions:
sig_domain[pos] = sig
n_sigs_to_insert -= 1
break
if n_sigs_to_insert:
_logger.info("Some signatures are replaced with the signatures of the provided keys")
self.inputs[tid].signatures = [s for s in sig_domain if s != '']
self.inputs[tid].update_scripts(hash_type)
def sign_and_update(self, index_n=None):
"""
Update transaction ID and resign. Use if some properties of the transaction changed
:param index_n: Index of transaction input. Leave empty to sign all inputs
:type index_n: int
:return:
"""
self.version = self.version_int.to_bytes(4, 'big')
self.sign(index_n=index_n, replace_signatures=True)
self.txid = self.signature_hash()[::-1].hex()
self.size = len(self.raw())
self.calc_weight_units()
self.update_totals()
if self.fee:
self.fee_per_kb = int((self.fee / float(self.size)) * 1024)
def add_input(self, prev_txid, output_n, keys=None, signatures=None, public_hash=b'', unlocking_script=b'',
unlocking_script_unsigned=None, script_type=None, address='',
sequence=0xffffffff, compressed=True, sigs_required=None, sort=False, index_n=None,
value=None, double_spend=False, locktime_cltv=None, locktime_csv=None,
key_path='', witness_type=None, witnesses=None, encoding=None):
"""
Add input to this transaction
Wrapper for append method of Input class.
:param prev_txid: Transaction hash of the UTXO (previous output) which will be spent.
:type prev_txid: bytes, hexstring
:param output_n: Output number in previous transaction.
:type output_n: bytes, int
:param keys: Public keys can be provided to construct an Unlocking script. Optional
:type keys: bytes, str
:param signatures: Add signatures to input if already known
:type signatures: bytes, str
:param public_hash: Specify public hash from key or redeemscript if key is not available
:type public_hash: bytes
:param unlocking_script: Unlocking script (scriptSig) to prove ownership. Optional
:type unlocking_script: bytes, hexstring
:param unlocking_script_unsigned: TODO: find better name...
:type unlocking_script_unsigned: bytes, str
:param script_type: Type of unlocking script used, i.e. p2pkh or p2sh_multisig. Default is p2pkh
:type script_type: str
:param address: Specify address of input if known, default is to derive from key or scripts
:type address: str, Address
:param sequence: Sequence part of input, used for timelocked transactions
:type sequence: int, bytes
:param compressed: Use compressed or uncompressed public keys. Default is compressed
:type compressed: bool
:param sigs_required: Number of signatures required for a p2sh_multisig unlocking script
:param sigs_required: int
:param sort: Sort public keys according to BIP0045 standard. Default is False to avoid unexpected change of key order.
:type sort: boolean
:param index_n: Index number of position in transaction, leave empty to add input to end of inputs list
:type index_n: int
:param value: Value of input
:type value: int
:param double_spend: True if double spend is detected, depends on which service provider is selected
:type double_spend: bool
:param locktime_cltv: Check Lock Time Verify value. Script level absolute time lock for this input
:type locktime_cltv: int
:param locktime_csv: Check Sequency Verify value.
:type locktime_csv: int
:param key_path: Key path of input key as BIP32 string or list
:type key_path: str, list
:param witness_type: Specify witness/signature position: 'segwit' or 'legacy'. Determine from script, address or encoding if not specified.
:type witness_type: str
:param witnesses: List of witnesses for inputs, used for segwit transactions for instance.
:type witnesses: list of bytes
:param encoding: Address encoding used. For example bech32/base32 or base58. Leave empty to derive from script or script type
:type encoding: str
:return int: Transaction index number (index_n)
"""
if index_n is None:
index_n = len(self.inputs)
sequence_int = sequence
if isinstance(sequence, bytes):
sequence_int = int.from_bytes(sequence, 'little')
if self.version == b'\x00\x00\x00\x01' and 0 < sequence_int < SEQUENCE_LOCKTIME_DISABLE_FLAG:
self.version = b'\x00\x00\x00\x02'
self.version_int = 2
self.inputs.append(
Input(prev_txid=prev_txid, output_n=output_n, keys=keys, signatures=signatures, public_hash=public_hash,
unlocking_script=unlocking_script, unlocking_script_unsigned=unlocking_script_unsigned,
script_type=script_type, address=address, sequence=sequence, compressed=compressed,
sigs_required=sigs_required, sort=sort, index_n=index_n, value=value, double_spend=double_spend,
locktime_cltv=locktime_cltv, locktime_csv=locktime_csv, key_path=key_path, witness_type=witness_type,
witnesses=witnesses, encoding=encoding, network=self.network.name))
return index_n
def add_output(self, value, address='', public_hash=b'', public_key=b'', lock_script=b'', spent=False,
output_n=None, encoding=None, spending_txid=None, spending_index_n=None):
"""
Add an output to this transaction
Wrapper for the append method of the Output class.
:param value: Value of output in smallest denominator of currency, for example satoshi's for bitcoins
:type value: int
:param address: Destination address of output. Leave empty to derive from other attributes you provide.
:type address: str, Address
:param public_hash: Hash of public key or script
:type public_hash: bytes, str
:param public_key: Destination public key
:type public_key: bytes, str
:param lock_script: Locking script of output. If not provided a default unlocking script will be provided with a public key hash.
:type lock_script: bytes, str
:param spent: Has output been spent in new transaction?
:type spent: bool, None
:param output_n: Index number of output in transaction
:type output_n: int
:param encoding: Address encoding used. For example bech32/base32 or base58. Leave empty for to derive from script or script type
:type encoding: str
:param spending_txid: Transaction hash of input spending this transaction output
:type spending_txid: str
:param spending_index_n: Index number of input spending this transaction output
:type spending_index_n: int
:return int: Transaction output number (output_n)
"""
lock_script = to_bytes(lock_script)
if output_n is None:
output_n = len(self.outputs)
if not float(value).is_integer():
raise TransactionError("Output must be of type integer and contain no decimals")
if lock_script.startswith(b'\x6a'):
if value != 0:
raise TransactionError("Output value for OP_RETURN script must be 0")
self.outputs.append(Output(value=int(value), address=address, public_hash=public_hash,
public_key=public_key, lock_script=lock_script, spent=spent, output_n=output_n,
encoding=encoding, spending_txid=spending_txid, spending_index_n=spending_index_n,
network=self.network.name))
return output_n
def merge_transaction(self, transaction):
"""
Merge this transaction with provided Transaction object.
Add all inputs and outputs of a transaction to this Transaction object. Because the transaction signature
changes with this operation, the transaction inputs need to be signed again.
Can be used to implement CoinJoin. Where two or more unrelated Transactions are merged into 1 transaction
to safe fees and increase privacy.
:param transaction: The transaction to be merged
:type transaction: Transaction
"""
self.inputs += transaction.inputs
self.outputs += transaction.outputs
self.shuffle()
self.update_totals()
self.sign_and_update()
def estimate_size(self, number_of_change_outputs=0):
"""
Get estimated vsize in for current transaction based on transaction type and number of inputs and outputs.
For old-style legacy transaction the vsize is the length of the transaction. In segwit transaction the
witness data has less weight. The formula used is: math.ceil(((est_size-witness_size) * 3 + est_size) / 4)
:param number_of_change_outputs: Number of change outputs, default is 0
:type number_of_change_outputs: int
:return int: Estimated transaction size
"""
# if self.input_total and self.output_total + self.fee == self.input_total:
# add_change_output = False
est_size = 10
witness_size = 2
if self.witness_type != 'legacy':
est_size += 2
for inp in self.inputs:
est_size += 40
scr_size = 0
if inp.witness_type != 'legacy':
est_size += 1
if inp.unlocking_script and len(inp.signatures) >= inp.sigs_required:
scr_size += len(varstr(inp.unlocking_script))
if inp.witness_type == 'p2sh-segwit':
scr_size += sum([1 + len(w) for w in inp.witnesses])
else:
if inp.script_type == 'sig_pubkey':
scr_size += 107
if not inp.compressed:
scr_size += 33
if inp.witness_type == 'p2sh-segwit':
scr_size += 24
# elif inp.script_type in ['p2sh_multisig', 'p2sh_p2wpkh', 'p2sh_p2wsh']:
elif inp.script_type == 'p2sh_multisig':
scr_size += 9 + (len(inp.keys) * 34) + (inp.sigs_required * 72)
if inp.witness_type == 'p2sh-segwit':
scr_size += 17 * inp.sigs_required
elif inp.script_type == 'signature':
scr_size += 9 + 72
else:
raise TransactionError("Unknown input script type %s cannot estimate transaction size" %
inp.script_type)
est_size += scr_size
witness_size += scr_size
if not self.inputs:
est_size += 147 # If nothing is known assume 1 p2sh/p2pkh input
for outp in self.outputs:
est_size += 8
if outp.lock_script:
est_size += len(varstr(outp.lock_script))
else:
raise TransactionError("Need locking script for output %d to estimate size" % outp.output_n)
if number_of_change_outputs:
is_multisig = True if self.inputs and self.inputs[0].script_type == 'p2sh_multisig' else False
co_size = 8
if not self.inputs or self.inputs[0].witness_type == 'legacy':
co_size += 24 if is_multisig else 26
elif self.inputs[0].witness_type == 'p2sh-segwit':
co_size += 24
else:
co_size += 33 if is_multisig else 23
est_size += (number_of_change_outputs * co_size)
self.size = est_size
self.vsize = est_size
if self.witness_type == 'legacy':
return est_size
else:
self.vsize = math.ceil((((est_size - witness_size) * 3 + est_size) / 4) - 1.5)
return self.vsize
def calc_weight_units(self):
if not self.size:
return None
wu = self.size * 4
if self.witness_type == 'segwit':
wu = wu - 6 # for segwit marker and flag
wu = wu - len(self.witness_data()) * 3
self.vsize = math.ceil(wu / 4)
return wu
@property
def weight_units(self):
return self.calc_weight_units()
def calculate_fee(self):
"""
Get fee for this transaction in smallest denominator (i.e. Satoshi) based on its size and the
transaction.fee_per_kb value
:return int: Estimated transaction fee
"""
if not self.fee_per_kb:
raise TransactionError("Cannot calculate transaction fees: transaction.fee_per_kb is not set")
fee = int(self.estimate_size() / 1024.0 * self.fee_per_kb)
# FIXME: fee is in kb, network.fee_min in sat/kb
if fee < self.network.fee_min:
fee = self.network.fee_min
elif fee > self.network.fee_max:
fee = self.network.fee_max
return fee
def update_totals(self):
"""
Update input_total, output_total and fee according to inputs and outputs of this transaction
:return int:
"""
self.input_total = sum([i.value for i in self.inputs if i.value])
self.output_total = sum([o.value for o in self.outputs if o.value])
# self.fee = 0
if self.input_total:
self.fee = self.input_total - self.output_total
def save(self, filename=None):
"""
Store transaction object as file so it can be imported in bitcoinlib later with the :func:`load` method.
:param filename: Location and name of file, leave empty to store transaction in bitcoinlib data directory: .bitcoinlib/<transaction_id.tx)
:type filename: str
:return:
"""
if not filename:
p = Path(BCL_DATA_DIR, '%s.tx' % self.txid)
else:
p = Path(filename)
if not p.parent or str(p.parent) == '.':
p = Path(BCL_DATA_DIR, filename)
f = p.open('wb')
pickle.dump(self, f)
f.close()
def shuffle_inputs(self):
"""
Shuffle transaction inputs in random order.
:return:
"""
random.shuffle(self.inputs)
for idx, o in enumerate(self.inputs):
o.index_n = idx
def shuffle_outputs(self):
"""
Shuffle transaction outputs in random order.
:return:
"""
random.shuffle(self.outputs)
for idx, o in enumerate(self.outputs):
o.output_n = idx
def shuffle(self):
"""
Shuffle transaction inputs and outputs in random order.
:return:
"""
self.shuffle_inputs()
self.shuffle_outputs()
| 45.578419 | 244 | 0.60107 |
from datetime import datetime
import json
import pickle
import random
from bitcoinlib.encoding import *
from bitcoinlib.config.opcodes import *
from bitcoinlib.keys import HDKey, Key, deserialize_address, Address, sign, verify, Signature
from bitcoinlib.networks import Network
from bitcoinlib.values import Value, value_to_satoshi
_logger = logging.getLogger(__name__)
class TransactionError(Exception):
def __init__(self, msg=''):
self.msg = msg
_logger.error(msg)
def __str__(self):
return self.msg
def transaction_deserialize(rawtx, network=DEFAULT_NETWORK, check_size=True):
rawtx = to_bytes(rawtx)
version = rawtx[0:4][::-1]
coinbase = False
flag = None
witness_type = 'legacy'
cursor = 4
if rawtx[4:5] == b'\0':
flag = rawtx[5:6]
if flag == b'\1':
witness_type = 'segwit'
cursor += 2
n_inputs, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
inputs = []
if not isinstance(network, Network):
network = Network(network)
for n in range(0, n_inputs):
inp_hash = rawtx[cursor:cursor + 32][::-1]
if not len(inp_hash):
raise TransactionError("Input transaction hash not found. Probably malformed raw transaction")
if inp_hash == 32 * b'\0':
coinbase = True
output_n = rawtx[cursor + 32:cursor + 36][::-1]
cursor += 36
unlocking_script_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
unlocking_script = rawtx[cursor:cursor + unlocking_script_size]
inp_type = 'legacy'
if witness_type == 'segwit' and not unlocking_script_size:
inp_type = 'segwit'
cursor += unlocking_script_size
sequence_number = rawtx[cursor:cursor + 4]
cursor += 4
inputs.append(Input(prev_txid=inp_hash, output_n=output_n, unlocking_script=unlocking_script,
witness_type=inp_type, sequence=sequence_number, index_n=n, network=network))
if len(inputs) != n_inputs:
raise TransactionError("Error parsing inputs. Number of tx specified %d but %d found" % (n_inputs, len(inputs)))
outputs = []
n_outputs, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
output_total = 0
for n in range(0, n_outputs):
value = int.from_bytes(rawtx[cursor:cursor + 8][::-1], 'big')
cursor += 8
lock_script_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
lock_script = rawtx[cursor:cursor + lock_script_size]
cursor += lock_script_size
outputs.append(Output(value=value, lock_script=lock_script, network=network, output_n=n))
output_total += value
if not outputs:
raise TransactionError("Error no outputs found in this transaction")
if witness_type == 'segwit':
for n in range(0, len(inputs)):
n_items, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
cursor += size
witnesses = []
for m in range(0, n_items):
witness = b'\0'
item_size, size = varbyteint_to_int(rawtx[cursor:cursor + 9])
if item_size:
witness = rawtx[cursor + size:cursor + item_size + size]
cursor += item_size + size
witnesses.append(witness)
if witnesses and not coinbase:
script_type = inputs[n].script_type
witness_script_type = 'sig_pubkey'
signatures = []
keys = []
sigs_required = 1
public_hash = b''
for witness in witnesses:
if witness == b'\0':
continue
if 70 <= len(witness) <= 74 and witness[0:1] == b'\x30':
signatures.append(witness)
elif len(witness) == 33 and len(signatures) == 1:
keys.append(witness)
else:
rsds = script_deserialize(witness, script_types=['multisig'])
if not rsds['script_type'] == 'multisig':
_logger.warning("Could not parse witnesses in transaction. Multisig redeemscript expected")
witness_script_type = 'unknown'
script_type = 'unknown'
else:
keys = rsds['signatures']
sigs_required = rsds['number_of_sigs_m']
witness_script_type = 'p2sh'
script_type = 'p2sh_multisig'
inp_witness_type = inputs[n].witness_type
usd = script_deserialize(inputs[n].unlocking_script, locking_script=True)
if usd['script_type'] == "p2wpkh" and witness_script_type == 'sig_pubkey':
inp_witness_type = 'p2sh-segwit'
script_type = 'p2sh_p2wpkh'
elif usd['script_type'] == "p2wsh" and witness_script_type == 'p2sh':
inp_witness_type = 'p2sh-segwit'
script_type = 'p2sh_p2wsh'
inputs[n] = Input(prev_txid=inputs[n].prev_txid, output_n=inputs[n].output_n, keys=keys,
unlocking_script_unsigned=inputs[n].unlocking_script_unsigned,
unlocking_script=inputs[n].unlocking_script, sigs_required=sigs_required,
signatures=signatures, witness_type=inp_witness_type, script_type=script_type,
sequence=inputs[n].sequence, index_n=inputs[n].index_n, public_hash=public_hash,
network=inputs[n].network, witnesses=witnesses)
if len(rawtx[cursor:]) != 4 and check_size:
raise TransactionError("Error when deserializing raw transaction, bytes left for locktime must be 4 not %d" %
len(rawtx[cursor:]))
locktime = int.from_bytes(rawtx[cursor:cursor + 4][::-1], 'big')
return Transaction(inputs, outputs, locktime, version, network, size=cursor + 4, output_total=output_total,
coinbase=coinbase, flag=flag, witness_type=witness_type, rawtx=rawtx)
def script_deserialize(script, script_types=None, locking_script=None, size_bytes_check=True):
def _parse_data(scr, max_items=None, redeemscript_expected=False, item_length=0):
items = []
total_length = 0
if 70 <= len(scr) <= 74 and scr[:1] == b'\x30':
return [scr], len(scr)
while len(scr) and (max_items is None or max_items > len(items)):
itemlen, size = varbyteint_to_int(scr[0:9])
if item_length and itemlen != item_length:
break
if not item_length and itemlen not in [20, 33, 65, 70, 71, 72, 73]:
break
if redeemscript_expected and len(scr[itemlen + 1:]) < 20:
break
items.append(scr[1:itemlen + 1])
total_length += itemlen + size
scr = scr[itemlen + 1:]
return items, total_length
def _get_empty_data():
return {'script_type': '', 'keys': [], 'signatures': [], 'hashes': [], 'redeemscript': b'',
'number_of_sigs_n': 1, 'number_of_sigs_m': 1, 'locktime_cltv': None, 'locktime_csv': None, 'result': ''}
def _parse_script(script):
found = False
cur = 0
data = _get_empty_data()
for script_type in script_types:
cur = 0
try:
ost = SCRIPT_TYPES_UNLOCKING[script_type]
except KeyError:
ost = SCRIPT_TYPES_LOCKING[script_type]
data = _get_empty_data()
data['script_type'] = script_type
found = True
for ch in ost:
if cur >= len(script):
found = False
break
cur_char = script[cur]
if ch[:4] == 'hash':
hash_length = 0
if len(ch) > 5:
hash_length = int(ch.split("-")[1])
s, total_length = _parse_data(script[cur:], 1, item_length=hash_length)
if not s:
found = False
break
data['hashes'] += s
cur += total_length
elif ch == 'signature':
signature_length = 0
s, total_length = _parse_data(script[cur:], 1, item_length=signature_length)
if not s:
found = False
break
data['signatures'] += s
cur += total_length
elif ch == 'public_key':
pk_size, size = varbyteint_to_int(script[cur:cur + 9])
key = script[cur + size:cur + size + pk_size]
if not key:
found = False
break
data['keys'].append(key)
cur += size + pk_size
elif ch == 'OP_RETURN':
if cur_char == opcodes['OP_RETURN'] and cur == 0:
data.update({'op_return': script[cur + 1:]})
cur = len(script)
found = True
break
else:
found = False
break
elif ch == 'multisig':
redeemscript_expected = False
if 'redeemscript' in ost:
redeemscript_expected = True
s, total_length = _parse_data(script[cur:], redeemscript_expected=redeemscript_expected)
if not s:
found = False
break
data['signatures'] += s
cur += total_length
elif ch == 'redeemscript':
size_byte = 0
if script[cur:cur + 1] == b'\x4c':
size_byte = 1
elif script[cur:cur + 1] == b'\x4d':
size_byte = 2
elif script[cur:cur + 1] == b'\x4e':
size_byte = 3
data['redeemscript'] = script[cur + 1 + size_byte:]
data2 = script_deserialize(data['redeemscript'], locking_script=True)
if 'signatures' not in data2 or not data2['signatures']:
found = False
break
data['keys'] = data2['signatures']
data['number_of_sigs_m'] = data2['number_of_sigs_m']
data['number_of_sigs_n'] = data2['number_of_sigs_n']
cur = len(script)
elif ch == 'push_size':
push_size, size = varbyteint_to_int(script[cur:cur + 9])
found = bool(len(script[cur:]) - size == push_size)
if not found:
break
elif ch == 'op_m':
if cur_char in OP_N_CODES:
data['number_of_sigs_m'] = cur_char - opcodes['OP_1'] + 1
else:
found = False
break
cur += 1
elif ch == 'op_n':
if cur_char in OP_N_CODES:
data['number_of_sigs_n'] = cur_char - opcodes['OP_1'] + 1
else:
found = False
break
if data['number_of_sigs_m'] > data['number_of_sigs_n']:
raise TransactionError("Number of signatures to sign (%s) is higher then actual "
"amount of signatures (%s)" %
(data['number_of_sigs_m'], data['number_of_sigs_n']))
if len(data['signatures']) > int(data['number_of_sigs_n']):
raise TransactionError("%d signatures found, but %s sigs expected" %
(len(data['signatures']), data['number_of_sigs_n']))
cur += 1
elif ch == 'SIGHASH_ALL':
pass
elif ch == 'locktime_cltv':
if len(script) < 4:
found = False
break
data['locktime_cltv'] = int.from_bytes(script[cur:cur + 4], 'little')
cur += 4
elif ch == 'locktime_csv':
if len(script) < 4:
found = False
break
data['locktime_csv'] = int.from_bytes(script[cur:cur + 4], 'little')
cur += 4
else:
try:
if cur_char == opcodes[ch]:
cur += 1
else:
found = False
data = _get_empty_data()
break
except IndexError:
raise TransactionError("Opcode %s not found [type %s]" % (ch, script_type))
if found and not len(script[cur:]):
break
if found and not len(script[cur:]):
return data, script[cur:]
data = _get_empty_data()
data['result'] = 'Script not recognised'
return data, ''
data = _get_empty_data()
script = to_bytes(script)
if not script:
data.update({'result': 'Empty script'})
return data
if size_bytes_check:
script_size, size = varbyteint_to_int(script[0:9])
if len(script[1:]) == script_size:
data = script_deserialize(script[1:], script_types, locking_script, size_bytes_check=False)
if 'result' in data and data['result'][:22] not in \
['Script not recognised', 'Empty script', 'Could not parse script']:
return data
if script_types is None:
if locking_script is None:
script_types = dict(SCRIPT_TYPES_UNLOCKING, **SCRIPT_TYPES_LOCKING)
elif locking_script:
script_types = SCRIPT_TYPES_LOCKING
else:
script_types = SCRIPT_TYPES_UNLOCKING
elif not isinstance(script_types, list):
script_types = [script_types]
locktime_cltv = 0
locktime_csv = 0
while len(script):
begin_script = script
data, script = _parse_script(script)
if begin_script == script:
break
if script and data['script_type'] == 'locktime_cltv':
locktime_cltv = data['locktime_cltv']
if script and data['script_type'] == 'locktime_csv':
locktime_csv = data['locktime_csv']
if data and data['result'] != 'Script not recognised':
data['locktime_cltv'] = locktime_cltv
data['locktime_csv'] = locktime_csv
return data
wrn_msg = "Could not parse script, unrecognized script"
data = _get_empty_data()
data['result'] = wrn_msg
return data
def script_to_string(script, name_data=False):
data = script_deserialize(script)
if not data or data['script_type'] == 'empty':
return ""
if name_data:
name = 'signature'
if data['signatures'] and len(data['signatures'][0]) in [33, 65]:
name = 'key'
sigs = ' '.join(['%s-%d' % (name, i) for i in range(1, len(data['signatures']) + 1)])
else:
sigs = ' '.join([i.hex() for i in data['signatures']])
try:
scriptstr = SCRIPT_TYPES_LOCKING[data['script_type']]
except KeyError:
scriptstr = SCRIPT_TYPES_UNLOCKING[data['script_type']]
scriptstr = [sigs if x in ['signature', 'multisig', 'return_data'] else x for x in scriptstr]
if 'redeemscript' in data and data['redeemscript']:
redeemscript_str = script_to_string(data['redeemscript'], name_data=name_data)
scriptstr = [redeemscript_str if x == 'redeemscript' else x for x in scriptstr]
scriptstr = [opcodenames[80 + int(data['number_of_sigs_m'])] if x == 'op_m' else x for x in scriptstr]
scriptstr = [opcodenames[80 + int(data['number_of_sigs_n'])] if x == 'op_n' else x for x in scriptstr]
return ' '.join(scriptstr)
def _serialize_multisig_redeemscript(public_key_list, n_required=None):
for key in public_key_list:
if not isinstance(key, (str, bytes)):
raise TransactionError("Item %s in public_key_list is not of type string or bytes")
if n_required is None:
n_required = len(public_key_list)
script = int_to_varbyteint(opcodes['OP_1'] + n_required - 1)
for key in public_key_list:
script += varstr(key)
script += int_to_varbyteint(opcodes['OP_1'] + len(public_key_list) - 1)
script += b'\xae'
return script
def serialize_multisig_redeemscript(key_list, n_required=None, compressed=True):
if not key_list:
return b''
if not isinstance(key_list, list):
raise TransactionError("Argument public_key_list must be of type list")
if len(key_list) > 15:
raise TransactionError("Redeemscripts with more then 15 keys are non-standard and could result in "
"locked up funds")
public_key_list = []
for k in key_list:
if isinstance(k, Key):
if compressed:
public_key_list.append(k.public_byte)
else:
public_key_list.append(k.public_uncompressed_byte)
elif len(k) == 65 and k[0:1] == b'\x04' or len(k) == 33 and k[0:1] in [b'\x02', b'\x03']:
public_key_list.append(k)
elif len(k) == 132 and k[0:2] == '04' or len(k) == 66 and k[0:2] in ['02', '03']:
public_key_list.append(bytes.fromhex(k))
else:
kobj = Key(k)
if compressed:
public_key_list.append(kobj.public_byte)
else:
public_key_list.append(kobj.public_uncompressed_byte)
return _serialize_multisig_redeemscript(public_key_list, n_required)
def _p2sh_multisig_unlocking_script(sigs, redeemscript, hash_type=None, as_list=False):
usu = b'\x00'
if as_list:
usu = [usu]
if not isinstance(sigs, list):
sigs = [sigs]
for sig in sigs:
s = sig
if hash_type:
s += hash_type.to_bytes(1, 'big')
if as_list:
usu.append(s)
else:
usu += varstr(s)
rs_size = b''
size_byte = b''
if not as_list:
rs_size = int_to_varbyteint(len(redeemscript))
if len(rs_size) > 1:
rs_size = rs_size[1:]
if len(redeemscript) >= 76:
if len(rs_size) == 1:
size_byte = b'\x4c'
elif len(rs_size) == 2:
size_byte = b'\x4d'
else:
size_byte = b'\x4e'
redeemscript_str = size_byte + rs_size + redeemscript
if as_list:
usu.append(redeemscript_str)
else:
usu += redeemscript_str
return usu
def script_add_locktime_cltv(locktime_cltv, script):
lockbytes = opcode('OP_CHECKLOCKTIMEVERIFY') + opcode('OP_DROP')
if script and len(script) > 6:
if script[4:6] == lockbytes:
return script
return locktime_cltv.to_bytes(4, 'little') + lockbytes + script
def script_add_locktime_csv(locktime_csv, script):
lockbytes = opcode('OP_CHECKSEQUENCEVERIFY') + opcode('OP_DROP')
if script and len(script) > 6:
if script[4:6] == lockbytes:
return script
return locktime_csv.to_bytes(4, 'little') + lockbytes + script
def get_unlocking_script_type(locking_script_type, witness_type='legacy', multisig=False):
if locking_script_type in ['p2pkh', 'p2wpkh']:
return 'sig_pubkey'
elif locking_script_type == 'p2wsh' or (witness_type == 'legacy' and multisig):
return 'p2sh_multisig'
elif locking_script_type == 'p2sh':
if not multisig:
return 'sig_pubkey'
else:
return 'p2sh_multisig'
elif locking_script_type == 'p2pk':
return 'signature'
else:
raise TransactionError("Unknown locking script type %s" % locking_script_type)
def transaction_update_spents(txs, address):
spend_list = {}
for t in txs:
for inp in t.inputs:
if inp.address == address:
spend_list.update({(inp.prev_txid.hex(), inp.output_n_int): t})
address_inputs = list(spend_list.keys())
for t in txs:
for to in t.outputs:
if to.address != address:
continue
spent = True if (t.txid, to.output_n) in address_inputs else False
txs[txs.index(t)].outputs[to.output_n].spent = spent
if spent:
spending_tx = spend_list[(t.txid, to.output_n)]
spending_index_n = \
[inp for inp in txs[txs.index(spending_tx)].inputs
if inp.prev_txid.hex() == t.txid and inp.output_n_int == to.output_n][0].index_n
txs[txs.index(t)].outputs[to.output_n].spending_txid = spending_tx.txid
txs[txs.index(t)].outputs[to.output_n].spending_index_n = spending_index_n
return txs
class Input(object):
def __init__(self, prev_txid, output_n, keys=None, signatures=None, public_hash=b'', unlocking_script=b'',
unlocking_script_unsigned=None, script_type=None, address='',
sequence=0xffffffff, compressed=None, sigs_required=None, sort=False, index_n=0,
value=0, double_spend=False, locktime_cltv=None, locktime_csv=None, key_path='', witness_type=None,
witnesses=None, encoding=None, network=DEFAULT_NETWORK):
self.prev_txid = to_bytes(prev_txid)
self.output_n = output_n
if isinstance(output_n, int):
self.output_n_int = output_n
self.output_n = output_n.to_bytes(4, 'big')
else:
self.output_n_int = int.from_bytes(output_n, 'big')
self.output_n = output_n
self.unlocking_script = b'' if unlocking_script is None else to_bytes(unlocking_script)
self.unlocking_script_unsigned = b'' if unlocking_script_unsigned is None \
else to_bytes(unlocking_script_unsigned)
if isinstance(sequence, numbers.Number):
self.sequence = sequence
else:
self.sequence = int.from_bytes(sequence, 'little')
self.compressed = compressed
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.index_n = index_n
self.value = value_to_satoshi(value, network=network)
if not keys:
keys = []
self.keys = []
if not isinstance(keys, list):
keys = [keys]
self.public_hash = public_hash
if not signatures:
signatures = []
if not isinstance(signatures, list):
signatures = [signatures]
self.sort = sort
if isinstance(address, Address):
self.address = address.address
self.encoding = address.encoding
self.network = address.network
self.script_type = address.script_type
else:
self.address = address
self.signatures = []
self.redeemscript = b''
self.script_type = script_type
if self.prev_txid == b'\0' * 32:
self.script_type = 'coinbase'
if not sigs_required:
if self.script_type == 'p2sh_multisig':
pass
else:
sigs_required = 1
self.double_spend = double_spend
self.locktime_cltv = locktime_cltv
self.locktime_csv = locktime_csv
self.witness_type = witness_type
if encoding is None:
self.encoding = 'base58'
if self.witness_type == 'segwit':
self.encoding = 'bech32'
else:
self.encoding = encoding
self.valid = None
self.key_path = key_path
self.witnesses = witnesses if witnesses else []
self.script_code = b''
if self.unlocking_script and self.script_type != 'coinbase' and not (signatures and keys):
us_dict = script_deserialize(self.unlocking_script)
if not us_dict:
raise TransactionError("Could not parse unlocking script (%s)" % self.unlocking_script.hex())
if us_dict['script_type'] not in ['', 'unknown', 'empty']:
self.sigs_required = us_dict['number_of_sigs_n']
self.redeemscript = us_dict['redeemscript']
if us_dict['signatures'] not in signatures:
signatures += us_dict['signatures']
if not keys:
keys = us_dict['keys']
sigs_required = us_dict['number_of_sigs_m']
if not signatures and not self.public_hash:
self.public_hash = us_dict['hashes'][0]
if not self.script_type:
self.script_type = us_dict['script_type']
if us_dict['script_type'] == 'p2wsh':
self.script_type = 'p2sh_p2wsh'
elif us_dict['script_type'] == 'p2wpkh':
self.script_type = 'p2sh_p2wpkh'
elif unlocking_script_unsigned and not signatures:
ls_dict = script_deserialize(unlocking_script_unsigned, locking_script=True)
if ls_dict['hashes']:
self.public_hash = ls_dict['hashes'][0]
if ls_dict['script_type'] in ['p2wpkh', 'p2wsh']:
self.witness_type = 'segwit'
self.script_type = get_unlocking_script_type(ls_dict['script_type'])
self.sigs_required = sigs_required
if self.script_type is None and self.witness_type is None and self.witnesses:
self.witness_type = 'segwit'
if self.witness_type is None or self.witness_type == 'legacy':
if self.script_type in ['p2wpkh', 'p2wsh']:
self.witness_type = 'segwit'
elif self.script_type in ['p2sh_p2wpkh', 'p2sh_p2wsh']:
self.witness_type = 'p2sh-segwit'
else:
self.witness_type = 'legacy'
elif self.witness_type == 'segwit' and self.script_type == 'sig_pubkey' and encoding is None:
self.encoding = 'bech32'
if not self.script_type:
self.script_type = 'sig_pubkey'
for key in keys:
if not isinstance(key, Key):
kobj = Key(key, network=network)
else:
kobj = key
if kobj not in self.keys:
self.compressed = kobj.compressed
self.keys.append(kobj)
if self.compressed is None:
self.compressed = True
if self.sort:
self.keys.sort(key=lambda k: k.public_byte)
self.hash_type = SIGHASH_ALL
for sig in signatures:
if not isinstance(sig, Signature):
try:
sig = Signature.from_str(sig)
except Exception as e:
_logger.error("Could not parse signature %s in Input. Error: %s" % (to_hexstring(sig), e))
continue
if sig.as_der_encoded() not in [x.as_der_encoded() for x in self.signatures]:
self.signatures.append(sig)
if sig.hash_type:
self.hash_type = sig.hash_type
self.update_scripts(hash_type=self.hash_type)
def set_locktime_relative_blocks(self, blocks):
if blocks == 0 or blocks == 0xffffffff:
self.sequence = 0xffffffff
return
if blocks > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of nSequence timelock blocks exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.sequence = blocks
self.signatures = []
def set_locktime_relative_time(self, seconds):
if seconds == 0 or seconds == 0xffffffff:
self.sequence = 0xffffffff
return
if seconds < 512:
seconds = 512
if (seconds // 512) > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of relative nSeqence timelock seconds exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.sequence = seconds // 512 + SEQUENCE_LOCKTIME_TYPE_FLAG
self.signatures = []
def update_scripts(self, hash_type=SIGHASH_ALL):
addr_data = b''
unlock_script = b''
if self.script_type in ['sig_pubkey', 'p2sh_p2wpkh']:
if not self.keys and not self.public_hash:
if self.unlocking_script_unsigned:
script_dict = script_deserialize(self.unlocking_script_unsigned)
if script_dict['script_type'] == 'p2pkh':
self.public_hash = script_dict['hashes'][0]
else:
return
else:
return
if not self.public_hash:
self.public_hash = self.keys[0].hash160
self.script_code = b'\x76\xa9\x14' + self.public_hash + b'\x88\xac'
self.unlocking_script_unsigned = self.script_code
addr_data = self.public_hash
if self.signatures and self.keys:
self.witnesses = [self.signatures[0].as_der_encoded() +
hash_type.to_bytes(1, 'big') if hash_type else b'', self.keys[0].public_byte]
unlock_script = b''.join([bytes(varstr(w)) for w in self.witnesses])
elif self.witnesses and not self.signatures and not self.keys and \
self.script_type in ['sig_pubkey', 'p2sh_p2wpkh']:
self.signatures = [self.witnesses[0]]
self.keys = [Key(self.witnesses[1], network=self.network)]
if self.witness_type == 'p2sh-segwit':
self.unlocking_script = varstr(b'\0' + varstr(self.public_hash))
elif self.witness_type == 'segwit':
self.unlocking_script = b''
elif unlock_script != b'':
self.unlocking_script = unlock_script
elif self.script_type in ['p2sh_multisig', 'p2sh_p2wsh']:
if not self.redeemscript and self.keys:
self.redeemscript = serialize_multisig_redeemscript(self.keys, n_required=self.sigs_required,
compressed=self.compressed)
if self.redeemscript:
if self.witness_type == 'segwit' or self.witness_type == 'p2sh-segwit':
self.public_hash = hashlib.sha256(self.redeemscript).digest()
else:
self.public_hash = hash160(self.redeemscript)
addr_data = self.public_hash
self.unlocking_script_unsigned = self.redeemscript
if self.redeemscript and self.keys:
n_tag = self.redeemscript[0:1]
if not isinstance(n_tag, int):
n_tag = int.from_bytes(n_tag, 'big')
self.sigs_required = n_tag - 80
signatures = [s.as_der_encoded() for s in self.signatures[:self.sigs_required]]
if b'' in signatures:
raise TransactionError("Empty signature found in signature list when signing. "
"Is DER encoded version of signature defined?")
if len(signatures):
us_as_list = False
if self.witness_type in ['segwit', 'p2sh-segwit']:
us_as_list = True
unlock_script = _p2sh_multisig_unlocking_script(signatures, self.redeemscript, hash_type,
as_list=us_as_list)
if self.witness_type == 'segwit':
script_code = b''
for k in self.keys:
script_code += varstr(k.public_byte) + b'\xad\xab'
if len(script_code) > 3:
script_code = script_code[:-2] + b'\xac'
self.script_code = script_code
if signatures:
self.witnesses = unlock_script
elif self.witness_type == 'p2sh-segwit':
self.unlocking_script = varstr(b'\0' + varstr(self.public_hash))
self.script_code = self.unlocking_script
if signatures:
self.witnesses = unlock_script
elif unlock_script != b'':
self.unlocking_script = unlock_script
elif self.script_type == 'signature':
if self.keys:
self.script_code = varstr(self.keys[0].public_byte) + b'\xac'
self.unlocking_script_unsigned = self.script_code
addr_data = self.keys[0].public_byte
if self.signatures:
self.unlocking_script = varstr(self.signatures[0].as_der_encoded() + hash_type.to_bytes(1, 'big'))
elif self.script_type not in ['coinbase', 'unknown']:
raise TransactionError("Unknown unlocking script type %s for input %d" % (self.script_type, self.index_n))
if addr_data and not self.address:
self.address = Address(hashed_data=addr_data, encoding=self.encoding, network=self.network,
script_type=self.script_type, witness_type=self.witness_type).address
if self.locktime_cltv:
self.unlocking_script_unsigned = script_add_locktime_cltv(self.locktime_cltv,
self.unlocking_script_unsigned)
self.unlocking_script = script_add_locktime_cltv(self.locktime_cltv, self.unlocking_script)
elif self.locktime_csv:
self.unlocking_script_unsigned = script_add_locktime_csv(self.locktime_csv, self.unlocking_script_unsigned)
self.unlocking_script = script_add_locktime_csv(self.locktime_csv, self.unlocking_script)
return True
def as_dict(self):
pks = []
for k in self.keys:
pks.append(k.public_hex)
if len(self.keys) == 1:
pks = pks[0]
return {
'index_n': self.index_n,
'prev_txid': self.prev_txid.hex(),
'output_n': self.output_n_int,
'script_type': self.script_type,
'address': self.address,
'value': self.value,
'public_keys': pks,
'compressed': self.compressed,
'encoding': self.encoding,
'double_spend': self.double_spend,
'script': self.unlocking_script.hex(),
'redeemscript': self.redeemscript.hex(),
'sequence': self.sequence,
'signatures': [s.hex() for s in self.signatures],
'sigs_required': self.sigs_required,
'locktime_cltv': self.locktime_cltv,
'locktime_csv': self.locktime_csv,
'public_hash': self.public_hash.hex(),
'script_code': self.script_code.hex(),
'unlocking_script': self.unlocking_script.hex(),
'unlocking_script_unsigned': self.unlocking_script_unsigned.hex(),
'witness_type': self.witness_type,
'witness': b''.join(self.witnesses).hex(),
'sort': self.sort,
'valid': self.valid,
}
def __repr__(self):
return "<Input(prev_txid='%s', output_n=%d, address='%s', index_n=%s, type='%s')>" % \
(self.prev_txid.hex(), self.output_n_int, self.address, self.index_n, self.script_type)
class Output(object):
def __init__(self, value, address='', public_hash=b'', public_key=b'', lock_script=b'', spent=False,
output_n=0, script_type=None, encoding=None, spending_txid='', spending_index_n=None,
network=DEFAULT_NETWORK):
if not (address or public_hash or public_key or lock_script):
raise TransactionError("Please specify address, lock_script, public key or public key hash when "
"creating output")
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.value = value_to_satoshi(value, network=network)
self.lock_script = b'' if lock_script is None else to_bytes(lock_script)
self.public_hash = to_bytes(public_hash)
if isinstance(address, Address):
self.address = address.address
self.address_obj = address
elif isinstance(address, HDKey):
self.address = address.address()
self.address_obj = address.address_obj
public_key = address.public_byte
if not script_type:
script_type = script_type_default(address.witness_type, address.multisig, True)
self.public_hash = address.hash160
else:
self.address = address
self.address_obj = None
self.public_key = to_bytes(public_key)
self.compressed = True
self.k = None
self.versionbyte = self.network.prefix_address
self.script_type = script_type
self.encoding = encoding
if not self.address and self.encoding is None:
self.encoding = 'base58'
self.spent = spent
self.output_n = output_n
if self.address_obj:
self.script_type = self.address_obj.script_type if script_type is None else script_type
self.public_hash = self.address_obj.hash_bytes
self.network = self.address_obj.network
self.encoding = self.address_obj.encoding
if self.public_key and not self.public_hash:
k = Key(self.public_key, is_private=False, network=network)
self.public_hash = k.hash160
elif self.address and (not self.public_hash or not self.script_type or not self.encoding):
address_dict = deserialize_address(self.address, self.encoding, self.network.name)
if address_dict['script_type'] and not script_type:
self.script_type = address_dict['script_type']
if not self.script_type:
raise TransactionError("Could not determine script type of address %s" % self.address)
self.encoding = address_dict['encoding']
network_guesses = address_dict['networks']
if address_dict['network'] and self.network.name != address_dict['network']:
raise TransactionError("Address %s is from %s network and transaction from %s network" %
(self.address, address_dict['network'], self.network.name))
elif self.network.name not in network_guesses:
raise TransactionError("Network for output address %s is different from transaction network. %s not "
"in %s" % (self.address, self.network.name, network_guesses))
self.public_hash = address_dict['public_key_hash_bytes']
if not self.encoding:
self.encoding = 'base58'
if self.script_type in ['p2wpkh', 'p2wsh']:
self.encoding = 'bech32'
if self.lock_script and not self.public_hash:
ss = script_deserialize(self.lock_script, locking_script=True)
self.script_type = ss['script_type']
if self.script_type in ['p2wpkh', 'p2wsh']:
self.encoding = 'bech32'
if ss['hashes']:
self.public_hash = ss['hashes'][0]
if ss['keys']:
self.public_key = ss['keys'][0]
k = Key(self.public_key, is_private=False, network=network)
self.public_hash = k.hash160
if self.script_type is None:
self.script_type = 'p2pkh'
if self.encoding == 'bech32':
self.script_type = 'p2wpkh'
if self.public_hash and not self.address:
self.address_obj = Address(hashed_data=self.public_hash, script_type=self.script_type,
encoding=self.encoding, network=self.network)
self.address = self.address_obj.address
self.versionbyte = self.address_obj.prefix
if self.lock_script == b'':
if self.script_type == 'p2pkh':
self.lock_script = b'\x76\xa9\x14' + self.public_hash + b'\x88\xac'
elif self.script_type == 'p2sh':
self.lock_script = b'\xa9\x14' + self.public_hash + b'\x87'
elif self.script_type == 'p2wpkh':
self.lock_script = b'\x00\x14' + self.public_hash
elif self.script_type == 'p2wsh':
self.lock_script = b'\x00\x20' + self.public_hash
elif self.script_type == 'p2pk':
if not self.public_key:
raise TransactionError("Public key is needed to create P2PK script for output %d" % output_n)
self.lock_script = varstr(self.public_key) + b'\xac'
else:
raise TransactionError("Unknown output script type %s, please provide locking script" %
self.script_type)
self.spending_txid = spending_txid
self.spending_index_n = spending_index_n
def as_dict(self):
return {
'value': self.value,
'script': self.lock_script.hex(),
'script_type': self.script_type,
'public_key': self.public_key.hex(),
'public_hash': self.public_hash.hex(),
'address': self.address,
'output_n': self.output_n,
'spent': self.spent,
'spending_txid': self.spending_txid,
'spending_index_n': self.spending_index_n,
}
def __repr__(self):
return "<Output(value=%d, address=%s, type=%s)>" % (self.value, self.address, self.script_type)
class Transaction(object):
@staticmethod
def import_raw(rawtx, network=DEFAULT_NETWORK, check_size=True):
return transaction_deserialize(rawtx, network=network, check_size=check_size)
@staticmethod
def load(txid=None, filename=None):
if not filename and not txid:
raise TransactionError("Please supply filename or txid")
elif not filename and txid:
p = Path(BCL_DATA_DIR, '%s.tx' % txid)
else:
p = Path(filename)
if not p.parent or str(p.parent) == '.':
p = Path(BCL_DATA_DIR, filename)
f = p.open('rb')
t = pickle.load(f)
f.close()
return t
def __init__(self, inputs=None, outputs=None, locktime=0, version=None,
network=DEFAULT_NETWORK, fee=None, fee_per_kb=None, size=None, txid='', txhash='', date=None,
confirmations=None, block_height=None, block_hash=None, input_total=0, output_total=0, rawtx=b'',
status='new', coinbase=False, verified=False, witness_type='legacy', flag=None):
self.coinbase = coinbase
self.inputs = []
if inputs is not None:
for inp in inputs:
self.inputs.append(inp)
if not input_total:
input_total = sum([i.value for i in inputs])
id_list = [i.index_n for i in self.inputs]
if list(set(id_list)) != id_list:
_logger.info("Identical transaction indexes (tid) found in inputs, please specify unique index. "
"Indexes will be automatically recreated")
index_n = 0
for inp in self.inputs:
inp.index_n = index_n
index_n += 1
if outputs is None:
self.outputs = []
else:
self.outputs = outputs
if not output_total:
output_total = sum([o.value for o in outputs])
if fee is None and output_total and input_total:
fee = input_total - output_total
if fee < 0 or fee == 0 and not self.coinbase:
raise TransactionError("Transaction inputs total value must be greater then total value of "
"transaction outputs")
if not version:
version = b'\x00\x00\x00\x01'
if isinstance(version, int):
self.version = version.to_bytes(4, 'big')
self.version_int = version
else:
self.version = version
self.version_int = int.from_bytes(version, 'big')
self.locktime = locktime
self.network = network
if not isinstance(network, Network):
self.network = Network(network)
self.flag = flag
self.fee = fee
self.fee_per_kb = fee_per_kb
self.size = size
self.vsize = size
self.txid = txid
self.txhash = txhash
self.date = date
self.confirmations = confirmations
self.block_height = block_height
self.block_hash = block_hash
self.input_total = input_total
self.output_total = output_total
self.rawtx = rawtx
self.status = status
self.verified = verified
self.witness_type = witness_type
self.change = 0
self.calc_weight_units()
if self.witness_type not in ['legacy', 'segwit']:
raise TransactionError("Please specify a valid witness type: legacy or segwit")
if not self.txid:
self.txid = self.signature_hash()[::-1].hex()
def __repr__(self):
return "<Transaction(id=%s, inputs=%d, outputs=%d, status=%s, network=%s)>" % \
(self.txid, len(self.inputs), len(self.outputs), self.status, self.network.name)
def __str__(self):
return self.txid
def __add__(self, other):
t = deepcopy(self)
t.merge_transaction(other)
return t
def __eq__(self, other):
if not isinstance(other, Transaction):
raise TransactionError("Can only compare with other Transaction object")
return self.txid == other.txid
def as_dict(self):
inputs = []
outputs = []
for i in self.inputs:
inputs.append(i.as_dict())
for o in self.outputs:
outputs.append(o.as_dict())
return {
'txid': self.txid,
'date': self.date,
'network': self.network.name,
'witness_type': self.witness_type,
'coinbase': self.coinbase,
'flag': None if not self.flag else ord(self.flag),
'txhash': self.txhash,
'confirmations': self.confirmations,
'block_height': self.block_height,
'block_hash': self.block_hash,
'fee': self.fee,
'fee_per_kb': self.fee_per_kb,
'inputs': inputs,
'outputs': outputs,
'input_total': self.input_total,
'output_total': self.output_total,
'version': self.version_int,
'locktime': self.locktime,
'raw': self.raw_hex(),
'size': self.size,
'vsize': self.vsize,
'verified': self.verified,
'status': self.status
}
def as_json(self):
adict = self.as_dict()
return json.dumps(adict, indent=4)
def info(self):
print("Transaction %s" % self.txid)
print("Date: %s" % self.date)
print("Network: %s" % self.network.name)
if self.locktime and self.locktime != 0xffffffff:
if self.locktime < 500000000:
print("Locktime: Until block %d" % self.locktime)
else:
print("Locktime: Until %s UTC" % datetime.utcfromtimestamp(self.locktime))
print("Version: %d" % self.version_int)
print("Witness type: %s" % self.witness_type)
print("Status: %s" % self.status)
print("Verified: %s" % self.verified)
print("Inputs")
replace_by_fee = False
for ti in self.inputs:
print("-", ti.address, Value.from_satoshi(ti.value, network=self.network).str(1), ti.prev_txid.hex(),
ti.output_n_int)
validstr = "not validated"
if ti.valid:
validstr = "valid"
elif ti.valid is False:
validstr = "invalid"
print(" %s %s; sigs: %d (%d-of-%d) %s" %
(ti.witness_type, ti.script_type, len(ti.signatures), ti.sigs_required or 0, len(ti.keys), validstr))
if ti.sequence <= SEQUENCE_REPLACE_BY_FEE:
replace_by_fee = True
if ti.sequence <= SEQUENCE_LOCKTIME_DISABLE_FLAG:
if ti.sequence & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Relative timelock for %d seconds" % (512 * (ti.sequence - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Relative timelock for %d blocks" % ti.sequence)
if ti.locktime_cltv:
if ti.locktime_cltv & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Check Locktime Verify (CLTV) for %d seconds" %
(512 * (ti.locktime_cltv - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Check Locktime Verify (CLTV) for %d blocks" % ti.locktime_cltv)
if ti.locktime_csv:
if ti.locktime_csv & SEQUENCE_LOCKTIME_TYPE_FLAG:
print(" Check Sequence Verify Timelock (CSV) for %d seconds" %
(512 * (ti.locktime_csv - SEQUENCE_LOCKTIME_TYPE_FLAG)))
else:
print(" Check Sequence Verify Timelock (CSV) for %d blocks" % ti.locktime_csv)
print("Outputs")
for to in self.outputs:
if to.script_type == 'nulldata':
print("- NULLDATA ", to.lock_script[2:])
else:
spent_str = ''
if to.spent:
spent_str = 'S'
elif to.spent is False:
spent_str = 'U'
print("-", to.address, Value.from_satoshi(to.value, network=self.network).str(1), to.script_type,
spent_str)
if replace_by_fee:
print("Replace by fee: Enabled")
print("Size: %s" % self.size)
print("Vsize: %s" % self.vsize)
print("Fee: %s" % self.fee)
print("Confirmations: %s" % self.confirmations)
print("Block: %s" % self.block_height)
def set_locktime_relative_blocks(self, blocks, input_index_n=0):
if blocks == 0 or blocks == 0xffffffff:
self.inputs[input_index_n].sequence = 0xffffffff
self.sign(index_n=input_index_n, replace_signatures=True)
return
if blocks > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of nSequence timelock blocks exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.inputs[input_index_n].sequence = blocks
self.version_int = 2
self.sign_and_update(index_n=input_index_n)
def set_locktime_relative_time(self, seconds, input_index_n=0):
if seconds == 0 or seconds == 0xffffffff:
self.inputs[input_index_n].sequence = 0xffffffff
self.sign(index_n=input_index_n, replace_signatures=True)
return
elif seconds < 512:
seconds = 512
elif (seconds // 512) > SEQUENCE_LOCKTIME_MASK:
raise TransactionError("Number of relative nSeqence timelock seconds exceeds %d" % SEQUENCE_LOCKTIME_MASK)
self.inputs[input_index_n].sequence = seconds // 512 + SEQUENCE_LOCKTIME_TYPE_FLAG
self.version_int = 2
self.sign_and_update(index_n=input_index_n)
def set_locktime_blocks(self, blocks):
if blocks == 0 or blocks == 0xffffffff:
self.locktime = 0xffffffff
self.sign(replace_signatures=True)
self.verify()
return
elif blocks > 500000000:
raise TransactionError("Number of locktime blocks must be below %d" % 500000000)
self.locktime = blocks
if blocks != 0 and blocks != 0xffffffff:
for i in self.inputs:
if i.sequence == 0xffffffff:
i.sequence = 0xfffffffd
self.sign_and_update()
def set_locktime_time(self, timestamp):
if timestamp == 0 or timestamp == 0xffffffff:
self.locktime = 0xffffffff
self.sign(replace_signatures=True)
self.verify()
return
if timestamp <= 500000000:
raise TransactionError("Timestamp must have a value higher then %d" % 500000000)
if timestamp > 0xfffffffe:
raise TransactionError("Timestamp must have a value lower then %d" % 0xfffffffe)
self.locktime = timestamp
for i in self.inputs:
if i.sequence == 0xffffffff:
i.sequence = 0xfffffffd
self.sign_and_update()
def signature_hash(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None, as_hex=False):
return double_sha256(self.signature(sign_id, hash_type, witness_type), as_hex=as_hex)
def signature(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
if witness_type is None:
witness_type = self.witness_type
if witness_type == 'legacy' or sign_id is None:
return self.raw(sign_id, hash_type, 'legacy')
elif witness_type in ['segwit', 'p2sh-segwit']:
return self.signature_segwit(sign_id, hash_type)
else:
raise TransactionError("Witness_type %s not supported" % self.witness_type)
def signature_segwit(self, sign_id, hash_type=SIGHASH_ALL):
assert (self.witness_type == 'segwit')
prevouts_serialized = b''
sequence_serialized = b''
outputs_serialized = b''
hash_prevouts = b'\0' * 32
hash_sequence = b'\0' * 32
hash_outputs = b'\0' * 32
for i in self.inputs:
prevouts_serialized += i.prev_txid[::-1] + i.output_n[::-1]
sequence_serialized += i.sequence.to_bytes(4, 'little')
if not hash_type & SIGHASH_ANYONECANPAY:
hash_prevouts = double_sha256(prevouts_serialized)
if (hash_type & 0x1f) != SIGHASH_SINGLE and (hash_type & 0x1f) != SIGHASH_NONE:
hash_sequence = double_sha256(sequence_serialized)
if (hash_type & 0x1f) != SIGHASH_SINGLE and (hash_type & 0x1f) != SIGHASH_NONE:
for o in self.outputs:
outputs_serialized += int(o.value).to_bytes(8, 'little')
outputs_serialized += varstr(o.lock_script)
hash_outputs = double_sha256(outputs_serialized)
elif (hash_type & 0x1f) != SIGHASH_SINGLE and sign_id < len(self.outputs):
outputs_serialized += int(self.outputs[sign_id].value).to_bytes(8, 'little')
outputs_serialized += varstr(self.outputs[sign_id].lock_script)
hash_outputs = double_sha256(outputs_serialized)
if not self.inputs[sign_id].value:
raise TransactionError("Need value of input %d to create transaction signature, value can not be 0" %
sign_id)
script_code = self.inputs[sign_id].redeemscript
if not script_code:
script_code = self.inputs[sign_id].script_code
if (not script_code or script_code == b'\0') and self.inputs[sign_id].script_type != 'unknown':
raise TransactionError("Script code missing")
ser_tx = \
self.version[::-1] + hash_prevouts + hash_sequence + self.inputs[sign_id].prev_txid[::-1] + \
self.inputs[sign_id].output_n[::-1] + \
varstr(script_code) + int(self.inputs[sign_id].value).to_bytes(8, 'little') + \
self.inputs[sign_id].sequence.to_bytes(4, 'little') + \
hash_outputs + self.locktime.to_bytes(4, 'little') + hash_type.to_bytes(4, 'little')
return ser_tx
def raw(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
if witness_type is None:
witness_type = self.witness_type
r = self.version[::-1]
if sign_id is None and witness_type == 'segwit':
r += b'\x00'
r += b'\x01'
r += int_to_varbyteint(len(self.inputs))
r_witness = b''
for i in self.inputs:
r += i.prev_txid[::-1] + i.output_n[::-1]
if i.witnesses and i.witness_type != 'legacy':
r_witness += int_to_varbyteint(len(i.witnesses)) + b''.join([bytes(varstr(w)) for w in i.witnesses])
else:
r_witness += b'\0'
if sign_id is None:
r += varstr(i.unlocking_script)
elif sign_id == i.index_n:
r += varstr(i.unlocking_script_unsigned)
else:
r += b'\0'
r += i.sequence.to_bytes(4, 'little')
r += int_to_varbyteint(len(self.outputs))
for o in self.outputs:
if o.value < 0:
raise TransactionError("Output value < 0 not allowed")
r += int(o.value).to_bytes(8, 'little')
r += varstr(o.lock_script)
if sign_id is None and witness_type == 'segwit':
r += r_witness
r += self.locktime.to_bytes(4, 'little')
if sign_id is not None:
r += hash_type.to_bytes(4, 'little')
else:
if not self.size and b'' not in [i.unlocking_script for i in self.inputs]:
self.size = len(r)
return r
def raw_hex(self, sign_id=None, hash_type=SIGHASH_ALL, witness_type=None):
return self.raw(sign_id, hash_type=hash_type, witness_type=witness_type).hex()
def witness_data(self):
witness_data = b''
for i in self.inputs:
witness_data += int_to_varbyteint(len(i.witnesses)) + b''.join([bytes(varstr(w)) for w in i.witnesses])
return witness_data
def verify(self):
self.verified = False
for i in self.inputs:
if i.script_type == 'coinbase':
i.valid = True
break
if not i.signatures:
_logger.info("No signatures found for transaction input %d" % i.index_n)
return False
if len(i.signatures) < i.sigs_required:
_logger.info("Not enough signatures provided. Found %d signatures but %d needed" %
(len(i.signatures), i.sigs_required))
return False
transaction_hash = self.signature_hash(i.index_n, witness_type=i.witness_type)
sig_id = 0
key_n = 0
for key in i.keys:
if sig_id > i.sigs_required - 1:
break
if sig_id >= len(i.signatures):
_logger.info("No valid signatures found")
return False
if not transaction_hash:
_logger.info("Need at least 1 key to create segwit transaction signature")
return False
key_n += 1
if verify(transaction_hash, i.signatures[sig_id], key):
sig_id += 1
i.valid = True
else:
i.valid = False
if sig_id < i.sigs_required:
_logger.info("Not enough valid signatures provided for input %d. Found %d signatures but %d needed" %
(i.index_n, sig_id, i.sigs_required))
return False
self.verified = True
return True
def sign(self, keys=None, index_n=None, multisig_key_n=None, hash_type=SIGHASH_ALL, fail_on_unknown_key=True,
replace_signatures=False):
if index_n is None:
tids = range(len(self.inputs))
else:
tids = [index_n]
if keys is None:
keys = []
elif not isinstance(keys, list):
keys = [keys]
for tid in tids:
n_signs = 0
tid_keys = [k if isinstance(k, (HDKey, Key)) else Key(k, compressed=self.inputs[tid].compressed)
for k in keys]
for k in self.inputs[tid].keys:
if k.is_private and k not in tid_keys:
tid_keys.append(k)
if not self.inputs[tid].keys:
self.inputs[tid].keys = tid_keys
self.inputs[tid].update_scripts(hash_type=hash_type)
if self.inputs[tid].script_type == 'coinbase':
raise TransactionError("Can not sign coinbase transactions")
pub_key_list = [k.public_byte for k in self.inputs[tid].keys]
n_total_sigs = len(self.inputs[tid].keys)
sig_domain = [''] * n_total_sigs
txid = self.signature_hash(tid, witness_type=self.inputs[tid].witness_type)
for key in tid_keys:
if key.public_byte not in pub_key_list:
if fail_on_unknown_key:
raise TransactionError("This key does not sign any known key: %s" % key.public_hex)
else:
_logger.info("This key does not sign any known key: %s" % key.public_hex)
continue
if not replace_signatures and key in [x.public_key for x in self.inputs[tid].signatures]:
_logger.info("Key %s already signed" % key.public_hex)
break
if not key.private_byte:
raise TransactionError("Please provide a valid private key to sign the transaction")
sig = sign(txid, key)
newsig_pos = pub_key_list.index(key.public_byte)
sig_domain[newsig_pos] = sig
n_signs += 1
if not n_signs:
break
n_sigs_to_insert = len(self.inputs[tid].signatures)
for sig in self.inputs[tid].signatures:
if not sig.public_key:
break
newsig_pos = pub_key_list.index(sig.public_key.public_byte)
if sig_domain[newsig_pos] == '':
sig_domain[newsig_pos] = sig
n_sigs_to_insert -= 1
if n_sigs_to_insert:
for sig in self.inputs[tid].signatures:
free_positions = [i for i, s in enumerate(sig_domain) if s == '']
for pos in free_positions:
sig_domain[pos] = sig
n_sigs_to_insert -= 1
break
if n_sigs_to_insert:
_logger.info("Some signatures are replaced with the signatures of the provided keys")
self.inputs[tid].signatures = [s for s in sig_domain if s != '']
self.inputs[tid].update_scripts(hash_type)
def sign_and_update(self, index_n=None):
self.version = self.version_int.to_bytes(4, 'big')
self.sign(index_n=index_n, replace_signatures=True)
self.txid = self.signature_hash()[::-1].hex()
self.size = len(self.raw())
self.calc_weight_units()
self.update_totals()
if self.fee:
self.fee_per_kb = int((self.fee / float(self.size)) * 1024)
def add_input(self, prev_txid, output_n, keys=None, signatures=None, public_hash=b'', unlocking_script=b'',
unlocking_script_unsigned=None, script_type=None, address='',
sequence=0xffffffff, compressed=True, sigs_required=None, sort=False, index_n=None,
value=None, double_spend=False, locktime_cltv=None, locktime_csv=None,
key_path='', witness_type=None, witnesses=None, encoding=None):
if index_n is None:
index_n = len(self.inputs)
sequence_int = sequence
if isinstance(sequence, bytes):
sequence_int = int.from_bytes(sequence, 'little')
if self.version == b'\x00\x00\x00\x01' and 0 < sequence_int < SEQUENCE_LOCKTIME_DISABLE_FLAG:
self.version = b'\x00\x00\x00\x02'
self.version_int = 2
self.inputs.append(
Input(prev_txid=prev_txid, output_n=output_n, keys=keys, signatures=signatures, public_hash=public_hash,
unlocking_script=unlocking_script, unlocking_script_unsigned=unlocking_script_unsigned,
script_type=script_type, address=address, sequence=sequence, compressed=compressed,
sigs_required=sigs_required, sort=sort, index_n=index_n, value=value, double_spend=double_spend,
locktime_cltv=locktime_cltv, locktime_csv=locktime_csv, key_path=key_path, witness_type=witness_type,
witnesses=witnesses, encoding=encoding, network=self.network.name))
return index_n
def add_output(self, value, address='', public_hash=b'', public_key=b'', lock_script=b'', spent=False,
output_n=None, encoding=None, spending_txid=None, spending_index_n=None):
lock_script = to_bytes(lock_script)
if output_n is None:
output_n = len(self.outputs)
if not float(value).is_integer():
raise TransactionError("Output must be of type integer and contain no decimals")
if lock_script.startswith(b'\x6a'):
if value != 0:
raise TransactionError("Output value for OP_RETURN script must be 0")
self.outputs.append(Output(value=int(value), address=address, public_hash=public_hash,
public_key=public_key, lock_script=lock_script, spent=spent, output_n=output_n,
encoding=encoding, spending_txid=spending_txid, spending_index_n=spending_index_n,
network=self.network.name))
return output_n
def merge_transaction(self, transaction):
self.inputs += transaction.inputs
self.outputs += transaction.outputs
self.shuffle()
self.update_totals()
self.sign_and_update()
def estimate_size(self, number_of_change_outputs=0):
est_size = 10
witness_size = 2
if self.witness_type != 'legacy':
est_size += 2
for inp in self.inputs:
est_size += 40
scr_size = 0
if inp.witness_type != 'legacy':
est_size += 1
if inp.unlocking_script and len(inp.signatures) >= inp.sigs_required:
scr_size += len(varstr(inp.unlocking_script))
if inp.witness_type == 'p2sh-segwit':
scr_size += sum([1 + len(w) for w in inp.witnesses])
else:
if inp.script_type == 'sig_pubkey':
scr_size += 107
if not inp.compressed:
scr_size += 33
if inp.witness_type == 'p2sh-segwit':
scr_size += 24
elif inp.script_type == 'p2sh_multisig':
scr_size += 9 + (len(inp.keys) * 34) + (inp.sigs_required * 72)
if inp.witness_type == 'p2sh-segwit':
scr_size += 17 * inp.sigs_required
elif inp.script_type == 'signature':
scr_size += 9 + 72
else:
raise TransactionError("Unknown input script type %s cannot estimate transaction size" %
inp.script_type)
est_size += scr_size
witness_size += scr_size
if not self.inputs:
est_size += 147
for outp in self.outputs:
est_size += 8
if outp.lock_script:
est_size += len(varstr(outp.lock_script))
else:
raise TransactionError("Need locking script for output %d to estimate size" % outp.output_n)
if number_of_change_outputs:
is_multisig = True if self.inputs and self.inputs[0].script_type == 'p2sh_multisig' else False
co_size = 8
if not self.inputs or self.inputs[0].witness_type == 'legacy':
co_size += 24 if is_multisig else 26
elif self.inputs[0].witness_type == 'p2sh-segwit':
co_size += 24
else:
co_size += 33 if is_multisig else 23
est_size += (number_of_change_outputs * co_size)
self.size = est_size
self.vsize = est_size
if self.witness_type == 'legacy':
return est_size
else:
self.vsize = math.ceil((((est_size - witness_size) * 3 + est_size) / 4) - 1.5)
return self.vsize
def calc_weight_units(self):
if not self.size:
return None
wu = self.size * 4
if self.witness_type == 'segwit':
wu = wu - 6
wu = wu - len(self.witness_data()) * 3
self.vsize = math.ceil(wu / 4)
return wu
@property
def weight_units(self):
return self.calc_weight_units()
def calculate_fee(self):
if not self.fee_per_kb:
raise TransactionError("Cannot calculate transaction fees: transaction.fee_per_kb is not set")
fee = int(self.estimate_size() / 1024.0 * self.fee_per_kb)
if fee < self.network.fee_min:
fee = self.network.fee_min
elif fee > self.network.fee_max:
fee = self.network.fee_max
return fee
def update_totals(self):
self.input_total = sum([i.value for i in self.inputs if i.value])
self.output_total = sum([o.value for o in self.outputs if o.value])
if self.input_total:
self.fee = self.input_total - self.output_total
def save(self, filename=None):
if not filename:
p = Path(BCL_DATA_DIR, '%s.tx' % self.txid)
else:
p = Path(filename)
if not p.parent or str(p.parent) == '.':
p = Path(BCL_DATA_DIR, filename)
f = p.open('wb')
pickle.dump(self, f)
f.close()
def shuffle_inputs(self):
random.shuffle(self.inputs)
for idx, o in enumerate(self.inputs):
o.index_n = idx
def shuffle_outputs(self):
random.shuffle(self.outputs)
for idx, o in enumerate(self.outputs):
o.output_n = idx
def shuffle(self):
self.shuffle_inputs()
self.shuffle_outputs()
| true | true |
f7f8352ab0a6fa4f33a391619562915ba18b491b | 23 | py | Python | recipe_scrapers/__version__.py | oncleben31/recipe-scrapers | 3e2f80776f8e4267817bd7002fc299d79a80e112 | [
"MIT"
] | null | null | null | recipe_scrapers/__version__.py | oncleben31/recipe-scrapers | 3e2f80776f8e4267817bd7002fc299d79a80e112 | [
"MIT"
] | null | null | null | recipe_scrapers/__version__.py | oncleben31/recipe-scrapers | 3e2f80776f8e4267817bd7002fc299d79a80e112 | [
"MIT"
] | null | null | null | __version__ = "13.4.0"
| 11.5 | 22 | 0.652174 | __version__ = "13.4.0"
| true | true |
f7f8357aea533891c5ce13fb840b931c50006e7e | 435 | py | Python | 07.Redirect/main.py | sarincr/Introduction-to-Flask-Web-framework | 69a703b3533b6b12f07712a5473b31176c25c9c7 | [
"MIT"
] | null | null | null | 07.Redirect/main.py | sarincr/Introduction-to-Flask-Web-framework | 69a703b3533b6b12f07712a5473b31176c25c9c7 | [
"MIT"
] | null | null | null | 07.Redirect/main.py | sarincr/Introduction-to-Flask-Web-framework | 69a703b3533b6b12f07712a5473b31176c25c9c7 | [
"MIT"
] | null | null | null | from flask import Flask
test = Flask(__name__)
@test.route("/")
def start():
return "Hello Start Page"
@test.route("/home")
def home_page():
return "Welcome Home page"
@test.route("/blog")
def blog():
return "Welcome Author"
@test.route('/<name>')
def base():
if name =='home':
return redirect(url_for('home_page'))
else:
return redirect(url_for('blog'))
if __name__ == '__main__':
test.run()
| 14.5 | 43 | 0.627586 | from flask import Flask
test = Flask(__name__)
@test.route("/")
def start():
return "Hello Start Page"
@test.route("/home")
def home_page():
return "Welcome Home page"
@test.route("/blog")
def blog():
return "Welcome Author"
@test.route('/<name>')
def base():
if name =='home':
return redirect(url_for('home_page'))
else:
return redirect(url_for('blog'))
if __name__ == '__main__':
test.run()
| true | true |
f7f835b089350f9adf5c8a561460d8ef48e5daeb | 4,658 | py | Python | examples/comment_by_file.py | linneudm/Instagram-API | 7949cee0ddfe037841e4226a3038a4751b7a7854 | [
"MIT"
] | null | null | null | examples/comment_by_file.py | linneudm/Instagram-API | 7949cee0ddfe037841e4226a3038a4751b7a7854 | [
"MIT"
] | null | null | null | examples/comment_by_file.py | linneudm/Instagram-API | 7949cee0ddfe037841e4226a3038a4751b7a7854 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Use text editor to edit the script and type in valid Instagram username/password
from InstagramAPI import InstagramAPI
from random import randint
import time
import getpass
import os
def getIdByUsername(API, targetname):
API.searchUsername(targetname)
return API.LastJson['user']['pk']
def getTotalFollowers(api, user_id):
"""
Returns the list of followers of the user.
It should be equivalent of calling api.getTotalFollowers from InstagramAPI
"""
followers = []
next_max_id = True
while next_max_id:
# first iteration hack
if next_max_id is True:
next_max_id = ''
_ = api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
def getTotalFollowings(api, user_id):
"""
Returns the list of followers of the user.
It should be equivalent of calling api.getTotalFollowers from InstagramAPI
"""
followers = []
next_max_id = True
while next_max_id:
# first iteration hack
if next_max_id is True:
next_max_id = ''
_ = api.getUserFollowings(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
if __name__ == "__main__":
print("Informe o nome de usuario:")
username = input()
passw = getpass.getpass("Informe a senha:")
api = InstagramAPI(username, passw)
api.login()
# user_id = '1461295173'
#print("Informe o limite de comentarios:")
#maxcomment = int(input())
maxcomment = 20
print("Informe o nome do instagram que deseja pegar os seguidores:")
user_f = str(input())
user_id = getIdByUsername(api, user_f)
print("Informe o nome do instagram que deseja comentar a foto:")
ig_comment = str(input())
ig_id = getIdByUsername(api, ig_comment)
result = api.getUserFeed(ig_id)
if(result):
feed = api.LastJson
else:
print("nao deu certo.")
print("Selecione a foto do sorteio com base na legenda.")
select = 0
for f in (feed['items']):
if select != 1:
try:
caption = f['caption']['text']
except:
caption = "Sem legenda."
print('Legenda:', caption)
print("É esta a foto do sorteio? 1 - Sim, 2 - Não.")
select = int(input())
if(select == 1):
print("OK! Foto encontrada...")
media_id = str(f['caption']['media_id'])
res = api.getMediaComments(media_id)
else:
break
print("Quantos usuarios por comentário?")
usr_qtd = int(input())
comments = 0
#maxcomment = 20
partial = 0
count = 0
text = ""
print("Agora, vamos comentar com os segudires de {}.".format(user_f))
users_commented = []
print("Nome do arquivo para verificação: ")
namef = input()
if not os.path.exists(namef):
open(namef, 'w')
with open(namef, 'r') as f:
for line in f:
tmp = line.replace('\n', '')
users_commented.append(tmp)
followers2 = getTotalFollowers(api, user_id)
followers = []
for fol in followers2:
if(fol['is_verified'] == False):
followers.append(fol['username'])
for fol in followers:
if(count < usr_qtd):
if(fol not in users_commented):
text += "@" + fol + " "
count+=1
with open(namef, 'a') as f:
f.write(fol+'\n')
else:
if(comments < maxcomment):
if(partial == 10):
partial = 0
print("Precisamos parar por 15 minutos")
time.sleep(60*15)
interval = randint(0, 30)
time.sleep(60+interval)
print("Comentando: {}. Aguarde {} segundos...".format(text, 60+interval))
print("Comentarios ate agora: {}".format(comments))
ok = api.comment(media_id, "Eu quero ganhar " + text)
if(ok):
with open('qtd.txt', 'a') as f:
f.write(str(comments)+'\n')
else:
break
count = 0
partial+=1
comments+=1
text = ""
else:
break
#print(result)
| 31.472973 | 90 | 0.548089 |
from InstagramAPI import InstagramAPI
from random import randint
import time
import getpass
import os
def getIdByUsername(API, targetname):
API.searchUsername(targetname)
return API.LastJson['user']['pk']
def getTotalFollowers(api, user_id):
followers = []
next_max_id = True
while next_max_id:
if next_max_id is True:
next_max_id = ''
_ = api.getUserFollowers(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
def getTotalFollowings(api, user_id):
followers = []
next_max_id = True
while next_max_id:
if next_max_id is True:
next_max_id = ''
_ = api.getUserFollowings(user_id, maxid=next_max_id)
followers.extend(api.LastJson.get('users', []))
next_max_id = api.LastJson.get('next_max_id', '')
return followers
if __name__ == "__main__":
print("Informe o nome de usuario:")
username = input()
passw = getpass.getpass("Informe a senha:")
api = InstagramAPI(username, passw)
api.login()
maxcomment = 20
print("Informe o nome do instagram que deseja pegar os seguidores:")
user_f = str(input())
user_id = getIdByUsername(api, user_f)
print("Informe o nome do instagram que deseja comentar a foto:")
ig_comment = str(input())
ig_id = getIdByUsername(api, ig_comment)
result = api.getUserFeed(ig_id)
if(result):
feed = api.LastJson
else:
print("nao deu certo.")
print("Selecione a foto do sorteio com base na legenda.")
select = 0
for f in (feed['items']):
if select != 1:
try:
caption = f['caption']['text']
except:
caption = "Sem legenda."
print('Legenda:', caption)
print("É esta a foto do sorteio? 1 - Sim, 2 - Não.")
select = int(input())
if(select == 1):
print("OK! Foto encontrada...")
media_id = str(f['caption']['media_id'])
res = api.getMediaComments(media_id)
else:
break
print("Quantos usuarios por comentário?")
usr_qtd = int(input())
comments = 0
partial = 0
count = 0
text = ""
print("Agora, vamos comentar com os segudires de {}.".format(user_f))
users_commented = []
print("Nome do arquivo para verificação: ")
namef = input()
if not os.path.exists(namef):
open(namef, 'w')
with open(namef, 'r') as f:
for line in f:
tmp = line.replace('\n', '')
users_commented.append(tmp)
followers2 = getTotalFollowers(api, user_id)
followers = []
for fol in followers2:
if(fol['is_verified'] == False):
followers.append(fol['username'])
for fol in followers:
if(count < usr_qtd):
if(fol not in users_commented):
text += "@" + fol + " "
count+=1
with open(namef, 'a') as f:
f.write(fol+'\n')
else:
if(comments < maxcomment):
if(partial == 10):
partial = 0
print("Precisamos parar por 15 minutos")
time.sleep(60*15)
interval = randint(0, 30)
time.sleep(60+interval)
print("Comentando: {}. Aguarde {} segundos...".format(text, 60+interval))
print("Comentarios ate agora: {}".format(comments))
ok = api.comment(media_id, "Eu quero ganhar " + text)
if(ok):
with open('qtd.txt', 'a') as f:
f.write(str(comments)+'\n')
else:
break
count = 0
partial+=1
comments+=1
text = ""
else:
break
| true | true |
f7f836e4775957861360b6b9862eff5ec23845c6 | 906 | py | Python | fobi_custom/plugins/form_elements/fields/intercept/transportation/fobi_form_elements.py | jeancochrane/just-spaces | f7e8f710c3ab2db5f8a87a533547c5176a2d1e83 | [
"MIT"
] | null | null | null | fobi_custom/plugins/form_elements/fields/intercept/transportation/fobi_form_elements.py | jeancochrane/just-spaces | f7e8f710c3ab2db5f8a87a533547c5176a2d1e83 | [
"MIT"
] | null | null | null | fobi_custom/plugins/form_elements/fields/intercept/transportation/fobi_form_elements.py | jeancochrane/just-spaces | f7e8f710c3ab2db5f8a87a533547c5176a2d1e83 | [
"MIT"
] | null | null | null | from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from ..forms import TRANSPORTATION_CHOICES
from .forms import TransportationForm
class TransportationPlugin(FormFieldPlugin):
"""TransportationPlugin."""
uid = "transportation"
name = "How did you travel here?"
form = TransportationForm
group = "Intercept" # Group to which the plugin belongs to
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
field_kwargs = {
'required': self.data.required,
'label': self.data.label,
'widget': forms.widgets.Select(attrs={}),
'choices': TRANSPORTATION_CHOICES,
}
return [(self.data.name, forms.ChoiceField, field_kwargs)]
form_element_plugin_registry.register(TransportationPlugin)
| 29.225806 | 70 | 0.682119 | from django import forms
from fobi.base import FormFieldPlugin, form_element_plugin_registry
from ..forms import TRANSPORTATION_CHOICES
from .forms import TransportationForm
class TransportationPlugin(FormFieldPlugin):
uid = "transportation"
name = "How did you travel here?"
form = TransportationForm
group = "Intercept"
def get_form_field_instances(self, request=None, form_entry=None,
form_element_entries=None, **kwargs):
field_kwargs = {
'required': self.data.required,
'label': self.data.label,
'widget': forms.widgets.Select(attrs={}),
'choices': TRANSPORTATION_CHOICES,
}
return [(self.data.name, forms.ChoiceField, field_kwargs)]
form_element_plugin_registry.register(TransportationPlugin)
| true | true |
f7f83903c83772674143d3573e348cac3917f922 | 6,473 | py | Python | assignments/2019/assignment2/cs231n/classifiers/cnn.py | SudoHead/cs231n.github.io | 652285518ff5ed8c02503bac6cb24aaea0d6ff75 | [
"MIT"
] | 2 | 2021-03-03T02:49:00.000Z | 2021-09-17T06:53:13.000Z | assignments/2019/assignment2/cs231n/classifiers/cnn.py | SudoHead/cs231n.github.io | 652285518ff5ed8c02503bac6cb24aaea0d6ff75 | [
"MIT"
] | 32 | 2020-09-17T19:43:53.000Z | 2022-03-12T00:55:26.000Z | assignment2/cs231n/classifiers/cnn.py | AI-Huang/cs231n | 7887bd52e01dec87a33db7b5ac122dc702a2c3df | [
"MIT"
] | 1 | 2020-09-24T19:57:47.000Z | 2020-09-24T19:57:47.000Z | from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ThreeLayerConvNet(object):
"""
A three-layer convolutional network with the following architecture:
conv - relu - 2x2 max pool - affine - relu - affine - softmax
The network operates on minibatches of data that have shape (N, C, H, W)
consisting of N images, each with height H and width W and with C input
channels.
"""
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
"""
Initialize a new network.
Inputs:
- input_dim: Tuple (C, H, W) giving size of input data
- num_filters: Number of filters to use in the convolutional layer
- filter_size: Width/height of filters to use in the convolutional layer
- hidden_dim: Number of units to use in the fully-connected hidden layer
- num_classes: Number of scores to produce from the final affine layer.
- weight_scale: Scalar giving standard deviation for random initialization
of weights.
- reg: Scalar giving L2 regularization strength
- dtype: numpy datatype to use for computation.
"""
self.params = {}
self.reg = reg
self.dtype = dtype
############################################################################
# TODO: Initialize weights and biases for the three-layer convolutional #
# network. Weights should be initialized from a Gaussian centered at 0.0 #
# with standard deviation equal to weight_scale; biases should be #
# initialized to zero. All weights and biases should be stored in the #
# dictionary self.params. Store weights and biases for the convolutional #
# layer using the keys 'W1' and 'b1'; use keys 'W2' and 'b2' for the #
# weights and biases of the hidden affine layer, and keys 'W3' and 'b3' #
# for the weights and biases of the output affine layer. #
# #
# IMPORTANT: For this assignment, you can assume that the padding #
# and stride of the first convolutional layer are chosen so that #
# **the width and height of the input are preserved**. Take a look at #
# the start of the loss() function to see how that happens. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
for k, v in self.params.items():
self.params[k] = v.astype(dtype)
def loss(self, X, y=None):
"""
Evaluate loss and gradient for the three-layer convolutional network.
Input / output: Same API as TwoLayerNet in fc_net.py.
"""
W1, b1 = self.params['W1'], self.params['b1']
W2, b2 = self.params['W2'], self.params['b2']
W3, b3 = self.params['W3'], self.params['b3']
# pass conv_param to the forward pass for the convolutional layer
# Padding and stride chosen to preserve the input spatial size
filter_size = W1.shape[2]
conv_param = {'stride': 1, 'pad': (filter_size - 1) // 2}
# pass pool_param to the forward pass for the max-pooling layer
pool_param = {'pool_height': 2, 'pool_width': 2, 'stride': 2}
scores = None
############################################################################
# TODO: Implement the forward pass for the three-layer convolutional net, #
# computing the class scores for X and storing them in the scores #
# variable. #
# #
# Remember you can use the functions defined in cs231n/fast_layers.py and #
# cs231n/layer_utils.py in your implementation (already imported). #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
if y is None:
return scores
loss, grads = 0, {}
############################################################################
# TODO: Implement the backward pass for the three-layer convolutional net, #
# storing the loss and gradients in the loss and grads variables. Compute #
# data loss using softmax, and make sure that grads[k] holds the gradients #
# for self.params[k]. Don't forget to add L2 regularization! #
# #
# NOTE: To ensure that your implementation matches ours and you pass the #
# automated tests, make sure that your L2 regularization includes a factor #
# of 0.5 to simplify the expression for the gradient. #
############################################################################
# *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
pass
# *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****
############################################################################
# END OF YOUR CODE #
############################################################################
return loss, grads
| 50.178295 | 111 | 0.467326 | from builtins import object
import numpy as np
from cs231n.layers import *
from cs231n.fast_layers import *
from cs231n.layer_utils import *
class ThreeLayerConvNet(object):
def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,
hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,
dtype=np.float32):
self.params = {}
self.reg = reg
self.dtype = dtype
| true | true |
f7f8398d48622f2c288a4b0b875b93a5b54c1208 | 683 | py | Python | app/core/migrations/0002_tag.py | oussema-azzebi/recipe-app-api | 1aeedd7f79caa055d6b57f08363032abff28df91 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | oussema-azzebi/recipe-app-api | 1aeedd7f79caa055d6b57f08363032abff28df91 | [
"MIT"
] | null | null | null | app/core/migrations/0002_tag.py | oussema-azzebi/recipe-app-api | 1aeedd7f79caa055d6b57f08363032abff28df91 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.15 on 2021-08-31 13:48
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 28.458333 | 118 | 0.616398 |
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| true | true |
f7f83bf7facb087cc7224f0ce420b2972d7b800a | 577 | py | Python | examples/sample-signals/generator/generate_events.py | denist-huma/prometheus_flask_exporter | 2cb218d2001c1edb678bd8169c5ca4190b2d2f96 | [
"MIT"
] | null | null | null | examples/sample-signals/generator/generate_events.py | denist-huma/prometheus_flask_exporter | 2cb218d2001c1edb678bd8169c5ca4190b2d2f96 | [
"MIT"
] | null | null | null | examples/sample-signals/generator/generate_events.py | denist-huma/prometheus_flask_exporter | 2cb218d2001c1edb678bd8169c5ca4190b2d2f96 | [
"MIT"
] | null | null | null | import time
import random
import threading
import requests
endpoints = ("one", "two", "three", "four", "error")
HOST = "http://app:5000/"
def run():
while True:
try:
target = random.choice(endpoints)
requests.get(HOST + target, timeout=1)
except requests.RequestException:
print("cannot connect", HOST)
time.sleep(1)
if __name__ == "__main__":
for _ in range(4):
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
while True:
time.sleep(1)
| 20.607143 | 52 | 0.584055 | import time
import random
import threading
import requests
endpoints = ("one", "two", "three", "four", "error")
HOST = "http://app:5000/"
def run():
while True:
try:
target = random.choice(endpoints)
requests.get(HOST + target, timeout=1)
except requests.RequestException:
print("cannot connect", HOST)
time.sleep(1)
if __name__ == "__main__":
for _ in range(4):
thread = threading.Thread(target=run)
thread.daemon = True
thread.start()
while True:
time.sleep(1)
| true | true |
f7f83c52f10f355c8f1ff71421b774fe9c4400de | 2,516 | py | Python | test/IECore/BINParticleWriterTest.py | gcodebackups/cortex-vfx | 72fa6c6eb3327fce4faf01361c8fcc2e1e892672 | [
"BSD-3-Clause"
] | 5 | 2016-07-26T06:09:28.000Z | 2022-03-07T03:58:51.000Z | test/IECore/BINParticleWriterTest.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | null | null | null | test/IECore/BINParticleWriterTest.py | turbosun/cortex | 4bdc01a692652cd562f3bfa85f3dae99d07c0b15 | [
"BSD-3-Clause"
] | 3 | 2015-03-25T18:45:24.000Z | 2020-02-15T15:37:18.000Z | ##########################################################################
#
# Copyright (c) 2009, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# * Neither the name of Image Engine Design nor the names of any
# other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import unittest
import sys
import IECore
class BINParticleWriterTest( unittest.TestCase ) :
def testBasics( self ) :
r = IECore.Reader.create( "test/IECore/data/pdcFiles/particleMesh.pdc" )
p = r.read()
w = IECore.Writer.create( p, "test/particleMesh.bin" )
w.write()
def testEmpty( self ) :
p = IECore.PointsPrimitive( IECore.V3fVectorData() )
w = IECore.Writer.create( p, "test/particleMesh.bin" )
w["positionPrimVar"] = "P"
w["velocityPrimVar"] = ""
w["massPrimVar"] = ""
w["particleIdPrimVar"] = ""
w.write()
def tearDown( self ) :
if os.path.isfile( "test/particleMesh.bin" ) :
os.remove( "test/particleMesh.bin" )
if __name__ == "__main__":
unittest.main()
| 36.463768 | 76 | 0.68124 | true | true | |
f7f83d067d0364d32e7f9b053f64c29ad2ce128c | 73 | py | Python | app/modules/backend/modules/file/__init__.py | joostsijm/ssg | c2da0d7274aa76dc2d565e20d1b4c3f0ef5241a1 | [
"Apache-2.0"
] | null | null | null | app/modules/backend/modules/file/__init__.py | joostsijm/ssg | c2da0d7274aa76dc2d565e20d1b4c3f0ef5241a1 | [
"Apache-2.0"
] | null | null | null | app/modules/backend/modules/file/__init__.py | joostsijm/ssg | c2da0d7274aa76dc2d565e20d1b4c3f0ef5241a1 | [
"Apache-2.0"
] | null | null | null |
"""
Server static pages
"""
from .app import BLUEPRINT as Backend_File
| 10.428571 | 42 | 0.726027 |
from .app import BLUEPRINT as Backend_File
| true | true |
f7f83da1c20e77875ca5a940174bcd7ffde60d10 | 4,661 | py | Python | tests/system/fixtures/__init__.py | tgockel/marathon | 90619da70d79a2ecf19368a21c6a695ea981bd74 | [
"Apache-2.0"
] | 3,556 | 2015-01-01T00:18:02.000Z | 2022-03-30T01:58:26.000Z | tests/system/fixtures/__init__.py | tgockel/marathon | 90619da70d79a2ecf19368a21c6a695ea981bd74 | [
"Apache-2.0"
] | 5,128 | 2015-01-01T12:46:00.000Z | 2021-08-25T14:56:05.000Z | tests/system/fixtures/__init__.py | tgockel/marathon | 90619da70d79a2ecf19368a21c6a695ea981bd74 | [
"Apache-2.0"
] | 988 | 2015-01-04T18:40:44.000Z | 2022-02-23T21:04:47.000Z | import aiohttp
import common
import json
import os.path
import pytest
import logging
from datetime import timedelta
from shakedown.clients import dcos_url_path
from shakedown.clients.authentication import dcos_acs_token
from shakedown.clients.rpcclient import get_ssl_context
from shakedown.dcos.agent import get_agents, get_private_agents
from shakedown.dcos.command import run_command_on_agent
from shakedown.dcos.cluster import ee_version
from shakedown.dcos.file import copy_file_from_agent
from shakedown.dcos.marathon import marathon_on_marathon
from shakedown.dcos.security import add_user, set_user_permission, remove_user, remove_user_permission
from shakedown.dcos.service import wait_for_service_endpoint
from asyncsseclient import SSEClient
logger = logging.getLogger(__name__)
def fixtures_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="function")
def wait_for_marathon_and_cleanup():
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
yield
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
common.clean_up_marathon()
@pytest.fixture(scope="function")
def wait_for_marathon_user_and_cleanup():
wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds(), path="ping")
with marathon_on_marathon() as client:
yield
wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds(), path="ping")
common.clean_up_marathon(client)
@pytest.fixture(scope="function")
def parent_group(request):
""" Fixture which yields a temporary marathon parent group can be used to place apps/pods within the test
function. Parent group will be removed after the test. Group name is equal to the test function name with
underscores replaced by dashes.
"""
group = '/{}'.format(request.function.__name__).replace('_', '-')
yield group
common.clean_up_marathon(parent_group=group)
@pytest.fixture
async def sse_events():
url = dcos_url_path('service/marathon/v2/events')
headers = {'Authorization': 'token={}'.format(dcos_acs_token()),
'Accept': 'text/event-stream'}
ssl_context = get_ssl_context()
verify_ssl = ssl_context is not None
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url, verify_ssl=verify_ssl, ssl_context=ssl_context) as response:
async def internal_generator():
client = SSEClient(response.content)
async for event in client.events():
yield json.loads(event.data)
yield internal_generator()
@pytest.fixture(scope="function")
def user_billy():
logger.info("entering user_billy fixture")
add_user('billy', 'billy')
set_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
set_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
yield
remove_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
remove_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
remove_user('billy')
logger.info("exiting user_billy fixture")
@pytest.fixture(scope="function")
def docker_ipv6_network_fixture():
agents = get_agents()
network_cmd = "sudo docker network create --driver=bridge --ipv6 --subnet=fd01::/64 mesos-docker-ipv6-test"
for agent in agents:
run_command_on_agent(agent, network_cmd)
yield
for agent in agents:
run_command_on_agent(agent, "sudo docker network rm mesos-docker-ipv6-test")
@pytest.fixture(autouse=True, scope='session')
def install_enterprise_cli():
"""Install enterprise cli on an DC/OS EE cluster before all tests start.
"""
if ee_version() is not None:
common.install_enterprise_cli_package()
@pytest.fixture(autouse=True, scope='session')
def archive_sandboxes():
# Nothing to setup
yield
logger.info('>>> Archiving Mesos sandboxes')
# We tarball the sandboxes from all the agents first and download them afterwards
for agent in get_private_agents():
file_name = 'sandbox_{}.tar.gz'.format(agent.replace(".", "_"))
cmd = 'sudo tar --exclude=provisioner -zcf {} /var/lib/mesos/slave'.format(file_name)
status, output = run_command_on_agent(agent, cmd) # NOQA
if status:
copy_file_from_agent(agent, file_name)
else:
logger.warning('Failed to tarball the sandbox from the agent={}, output={}'.format(agent, output))
| 38.204918 | 111 | 0.732675 | import aiohttp
import common
import json
import os.path
import pytest
import logging
from datetime import timedelta
from shakedown.clients import dcos_url_path
from shakedown.clients.authentication import dcos_acs_token
from shakedown.clients.rpcclient import get_ssl_context
from shakedown.dcos.agent import get_agents, get_private_agents
from shakedown.dcos.command import run_command_on_agent
from shakedown.dcos.cluster import ee_version
from shakedown.dcos.file import copy_file_from_agent
from shakedown.dcos.marathon import marathon_on_marathon
from shakedown.dcos.security import add_user, set_user_permission, remove_user, remove_user_permission
from shakedown.dcos.service import wait_for_service_endpoint
from asyncsseclient import SSEClient
logger = logging.getLogger(__name__)
def fixtures_dir():
return os.path.dirname(os.path.abspath(__file__))
@pytest.fixture(scope="function")
def wait_for_marathon_and_cleanup():
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
yield
wait_for_service_endpoint('marathon', timedelta(minutes=5).total_seconds(), path="ping")
common.clean_up_marathon()
@pytest.fixture(scope="function")
def wait_for_marathon_user_and_cleanup():
wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds(), path="ping")
with marathon_on_marathon() as client:
yield
wait_for_service_endpoint('marathon-user', timedelta(minutes=5).total_seconds(), path="ping")
common.clean_up_marathon(client)
@pytest.fixture(scope="function")
def parent_group(request):
group = '/{}'.format(request.function.__name__).replace('_', '-')
yield group
common.clean_up_marathon(parent_group=group)
@pytest.fixture
async def sse_events():
url = dcos_url_path('service/marathon/v2/events')
headers = {'Authorization': 'token={}'.format(dcos_acs_token()),
'Accept': 'text/event-stream'}
ssl_context = get_ssl_context()
verify_ssl = ssl_context is not None
async with aiohttp.ClientSession(headers=headers) as session:
async with session.get(url, verify_ssl=verify_ssl, ssl_context=ssl_context) as response:
async def internal_generator():
client = SSEClient(response.content)
async for event in client.events():
yield json.loads(event.data)
yield internal_generator()
@pytest.fixture(scope="function")
def user_billy():
logger.info("entering user_billy fixture")
add_user('billy', 'billy')
set_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
set_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
yield
remove_user_permission(rid='dcos:adminrouter:service:marathon', uid='billy', action='full')
remove_user_permission(rid='dcos:service:marathon:marathon:services:/', uid='billy', action='full')
remove_user('billy')
logger.info("exiting user_billy fixture")
@pytest.fixture(scope="function")
def docker_ipv6_network_fixture():
agents = get_agents()
network_cmd = "sudo docker network create --driver=bridge --ipv6 --subnet=fd01::/64 mesos-docker-ipv6-test"
for agent in agents:
run_command_on_agent(agent, network_cmd)
yield
for agent in agents:
run_command_on_agent(agent, "sudo docker network rm mesos-docker-ipv6-test")
@pytest.fixture(autouse=True, scope='session')
def install_enterprise_cli():
if ee_version() is not None:
common.install_enterprise_cli_package()
@pytest.fixture(autouse=True, scope='session')
def archive_sandboxes():
yield
logger.info('>>> Archiving Mesos sandboxes')
for agent in get_private_agents():
file_name = 'sandbox_{}.tar.gz'.format(agent.replace(".", "_"))
cmd = 'sudo tar --exclude=provisioner -zcf {} /var/lib/mesos/slave'.format(file_name)
status, output = run_command_on_agent(agent, cmd)
if status:
copy_file_from_agent(agent, file_name)
else:
logger.warning('Failed to tarball the sandbox from the agent={}, output={}'.format(agent, output))
| true | true |
f7f83de67031c546f5c3dbfca02aeca0dc2ac473 | 280 | py | Python | backend/urls.py | FranBisquerra/django-vue | fe58bbbeef766bc99f34c8e3d30fd0910a34a741 | [
"MIT"
] | null | null | null | backend/urls.py | FranBisquerra/django-vue | fe58bbbeef766bc99f34c8e3d30fd0910a34a741 | [
"MIT"
] | null | null | null | backend/urls.py | FranBisquerra/django-vue | fe58bbbeef766bc99f34c8e3d30fd0910a34a741 | [
"MIT"
] | null | null | null | from django.contrib import admin
from django.conf.urls import url, include
from django.urls import path
from .api.views import index_view
urlpatterns = [
path('', index_view, name='index'),
url('', include('backend.api.urls')),
path('admin/', admin.site.urls),
]
| 18.666667 | 41 | 0.692857 | from django.contrib import admin
from django.conf.urls import url, include
from django.urls import path
from .api.views import index_view
urlpatterns = [
path('', index_view, name='index'),
url('', include('backend.api.urls')),
path('admin/', admin.site.urls),
]
| true | true |
f7f83fb5760e68fce1f6ad015c1890655cc2a2d3 | 6,936 | py | Python | test/expression/test_logical.py | LordDarkula/eva | 93433bc88f361c277690c9e31f1b8de657f25823 | [
"Apache-2.0"
] | null | null | null | test/expression/test_logical.py | LordDarkula/eva | 93433bc88f361c277690c9e31f1b8de657f25823 | [
"Apache-2.0"
] | null | null | null | test/expression/test_logical.py | LordDarkula/eva | 93433bc88f361c277690c9e31f1b8de657f25823 | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# Copyright 2018-2020 EVA
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import pandas as pd
from mock import Mock
from eva.expression.abstract_expression import ExpressionType
from eva.expression.comparison_expression import ComparisonExpression
from eva.expression.logical_expression import LogicalExpression
from eva.expression.constant_value_expression import ConstantValueExpression
from eva.expression.tuple_value_expression import TupleValueExpression
from eva.models.storage.batch import Batch
class LogicalExpressionsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_logical_and(self):
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(1)
comparison_expression_left = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
const_exp1,
const_exp2
)
const_exp1 = ConstantValueExpression(2)
const_exp2 = ConstantValueExpression(1)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_AND,
comparison_expression_left,
comparison_expression_right
)
self.assertEqual(
[True], logical_expr.evaluate(None).frames[0].tolist())
def test_logical_or(self):
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(1)
comparison_expression_left = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
const_exp1,
const_exp2
)
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(2)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_OR,
comparison_expression_left,
comparison_expression_right
)
self.assertEqual(
[True],
logical_expr.evaluate(None).frames[0].tolist()
)
def test_logical_not(self):
const_exp1 = ConstantValueExpression(0)
const_exp2 = ConstantValueExpression(1)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_NOT,
None,
comparison_expression_right
)
self.assertEqual(
[True],
logical_expr.evaluate(None).frames[0].tolist()
)
def test_short_circuiting_and_complete(self):
# tests whether right-hand side is bypassed completely with and
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_AND,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3], 1: [4, 5, 6]}))
self.assertEqual(
[False, False, False],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_not_called()
def test_short_circuiting_or_complete(self):
# tests whether right-hand side is bypassed completely with or
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_OR,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3], 1: [1, 2, 3]}))
self.assertEqual(
[True, True, True],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_not_called()
def test_short_circuiting_and_partial(self):
# tests whether right-hand side is partially executed with and
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_AND,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3, 4], 1: [1, 2, 5, 6]}))
self.assertEqual(
[True, False, False, False],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])
def test_short_circuiting_or_partial(self):
# tests whether right-hand side is partially executed with or
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_OR,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3, 4], 1: [5, 6, 3, 4]}))
self.assertEqual(
[True, False, True, True],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])
| 33.669903 | 79 | 0.63985 |
import unittest
import pandas as pd
from mock import Mock
from eva.expression.abstract_expression import ExpressionType
from eva.expression.comparison_expression import ComparisonExpression
from eva.expression.logical_expression import LogicalExpression
from eva.expression.constant_value_expression import ConstantValueExpression
from eva.expression.tuple_value_expression import TupleValueExpression
from eva.models.storage.batch import Batch
class LogicalExpressionsTest(unittest.TestCase):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def test_logical_and(self):
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(1)
comparison_expression_left = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
const_exp1,
const_exp2
)
const_exp1 = ConstantValueExpression(2)
const_exp2 = ConstantValueExpression(1)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_AND,
comparison_expression_left,
comparison_expression_right
)
self.assertEqual(
[True], logical_expr.evaluate(None).frames[0].tolist())
def test_logical_or(self):
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(1)
comparison_expression_left = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
const_exp1,
const_exp2
)
const_exp1 = ConstantValueExpression(1)
const_exp2 = ConstantValueExpression(2)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_OR,
comparison_expression_left,
comparison_expression_right
)
self.assertEqual(
[True],
logical_expr.evaluate(None).frames[0].tolist()
)
def test_logical_not(self):
const_exp1 = ConstantValueExpression(0)
const_exp2 = ConstantValueExpression(1)
comparison_expression_right = ComparisonExpression(
ExpressionType.COMPARE_GREATER,
const_exp1,
const_exp2
)
logical_expr = LogicalExpression(
ExpressionType.LOGICAL_NOT,
None,
comparison_expression_right
)
self.assertEqual(
[True],
logical_expr.evaluate(None).frames[0].tolist()
)
def test_short_circuiting_and_complete(self):
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_AND,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3], 1: [4, 5, 6]}))
self.assertEqual(
[False, False, False],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_not_called()
def test_short_circuiting_or_complete(self):
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_OR,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3], 1: [1, 2, 3]}))
self.assertEqual(
[True, True, True],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_not_called()
def test_short_circuiting_and_partial(self):
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_AND,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3, 4], 1: [1, 2, 5, 6]}))
self.assertEqual(
[True, False, False, False],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])
def test_short_circuiting_or_partial(self):
tup_val_exp_l = TupleValueExpression(col_name=0)
tup_val_exp_r = TupleValueExpression(col_name=1)
comp_exp_l = ComparisonExpression(
ExpressionType.COMPARE_EQUAL,
tup_val_exp_l,
tup_val_exp_r
)
comp_exp_r = Mock(spec=ComparisonExpression)
comp_exp_r.evaluate = Mock(return_value=Mock(frames=[[True], [False]]))
logical_exp = LogicalExpression(
ExpressionType.LOGICAL_OR,
comp_exp_l,
comp_exp_r
)
tuples = Batch(pd.DataFrame(
{0: [1, 2, 3, 4], 1: [5, 6, 3, 4]}))
self.assertEqual(
[True, False, True, True],
logical_exp.evaluate(tuples).frames[0].tolist()
)
comp_exp_r.evaluate.assert_called_once_with(tuples, mask=[0, 1])
| true | true |
f7f84019a158f52e8dd458a60900bac31893f555 | 4,981 | py | Python | imcsdk/mometa/ip/IpBlocking.py | ecoen66/imcsdk | b10eaa926a5ee57cea7182ae0adc8dd1c818b0ab | [
"Apache-2.0"
] | 31 | 2016-06-14T07:23:59.000Z | 2021-09-12T17:17:26.000Z | imcsdk/mometa/ip/IpBlocking.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 109 | 2016-05-25T03:56:56.000Z | 2021-10-18T02:58:12.000Z | imcsdk/mometa/ip/IpBlocking.py | sthagen/imcsdk | 1831eaecb5960ca03a8624b1579521749762b932 | [
"Apache-2.0"
] | 67 | 2016-05-17T05:53:56.000Z | 2022-03-24T15:52:53.000Z | """This module contains the general information for IpBlocking ManagedObject."""
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class IpBlockingConsts:
pass
class IpBlocking(ManagedObject):
"""This is IpBlocking class."""
consts = IpBlockingConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("IpBlocking", "ipBlocking", "ip-block", VersionMeta.Version151f, "InputOutput", 0xff, [], ["admin", "read-only", "user"], ['mgmtIf'], [], ["Get", "Set"]),
"modular": MoMeta("IpBlocking", "ipBlocking", "ip-block", VersionMeta.Version2013e, "InputOutput", 0xff, [], ["admin", "read-only", "user"], ['mgmtIf'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"enable": MoPropertyMeta("enable", "enable", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"fail_count": MoPropertyMeta("fail_count", "failCount", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["3-10"]),
"fail_window": MoPropertyMeta("fail_window", "failWindow", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["60-120"]),
"penalty_time": MoPropertyMeta("penalty_time", "penaltyTime", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], ["300-900"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"enable": MoPropertyMeta("enable", "enable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["No", "Yes", "no", "yes"], []),
"fail_count": MoPropertyMeta("fail_count", "failCount", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["3-10"]),
"fail_window": MoPropertyMeta("fail_window", "failWindow", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["60-280"]),
"penalty_time": MoPropertyMeta("penalty_time", "penaltyTime", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], ["300-900"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"enable": "enable",
"failCount": "fail_count",
"failWindow": "fail_window",
"penaltyTime": "penalty_time",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
"modular": {
"dn": "dn",
"enable": "enable",
"failCount": "fail_count",
"failWindow": "fail_window",
"penaltyTime": "penalty_time",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.enable = None
self.fail_count = None
self.fail_window = None
self.penalty_time = None
self.status = None
self.child_action = None
self.description = None
ManagedObject.__init__(self, "IpBlocking", parent_mo_or_dn, **kwargs)
| 54.141304 | 199 | 0.609115 |
from ...imcmo import ManagedObject
from ...imccoremeta import MoPropertyMeta, MoMeta
from ...imcmeta import VersionMeta
class IpBlockingConsts:
pass
class IpBlocking(ManagedObject):
consts = IpBlockingConsts()
naming_props = set([])
mo_meta = {
"classic": MoMeta("IpBlocking", "ipBlocking", "ip-block", VersionMeta.Version151f, "InputOutput", 0xff, [], ["admin", "read-only", "user"], ['mgmtIf'], [], ["Get", "Set"]),
"modular": MoMeta("IpBlocking", "ipBlocking", "ip-block", VersionMeta.Version2013e, "InputOutput", 0xff, [], ["admin", "read-only", "user"], ['mgmtIf'], [], ["Get", "Set"])
}
prop_meta = {
"classic": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"enable": MoPropertyMeta("enable", "enable", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["No", "Yes", "false", "no", "true", "yes"], []),
"fail_count": MoPropertyMeta("fail_count", "failCount", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["3-10"]),
"fail_window": MoPropertyMeta("fail_window", "failWindow", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["60-120"]),
"penalty_time": MoPropertyMeta("penalty_time", "penaltyTime", "uint", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], ["300-900"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version151f, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version151f, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version151f, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
"modular": {
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x2, 0, 255, None, [], []),
"enable": MoPropertyMeta("enable", "enable", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x4, None, None, None, ["No", "Yes", "no", "yes"], []),
"fail_count": MoPropertyMeta("fail_count", "failCount", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x8, None, None, None, [], ["3-10"]),
"fail_window": MoPropertyMeta("fail_window", "failWindow", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x10, None, None, None, [], ["60-280"]),
"penalty_time": MoPropertyMeta("penalty_time", "penaltyTime", "uint", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], ["300-900"]),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x40, 0, 255, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_WRITE, 0x80, None, None, None, ["", "created", "deleted", "modified", "removed"], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version2013e, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"description": MoPropertyMeta("description", "description", "string", VersionMeta.Version2013e, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
},
}
prop_map = {
"classic": {
"dn": "dn",
"enable": "enable",
"failCount": "fail_count",
"failWindow": "fail_window",
"penaltyTime": "penalty_time",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
"modular": {
"dn": "dn",
"enable": "enable",
"failCount": "fail_count",
"failWindow": "fail_window",
"penaltyTime": "penalty_time",
"rn": "rn",
"status": "status",
"childAction": "child_action",
"description": "description",
},
}
def __init__(self, parent_mo_or_dn, **kwargs):
self._dirty_mask = 0
self.enable = None
self.fail_count = None
self.fail_window = None
self.penalty_time = None
self.status = None
self.child_action = None
self.description = None
ManagedObject.__init__(self, "IpBlocking", parent_mo_or_dn, **kwargs)
| true | true |
f7f840cfae2866dfd800ad567aecc9dd4d8e3e3b | 1,014 | py | Python | test.py | bosichong/17python.com | 378754e1288b444ab3657093aa18b7e3f03b5145 | [
"Apache-2.0"
] | 9 | 2017-09-02T05:54:06.000Z | 2019-04-11T02:34:41.000Z | test.py | bosichong/17python.com | 378754e1288b444ab3657093aa18b7e3f03b5145 | [
"Apache-2.0"
] | null | null | null | test.py | bosichong/17python.com | 378754e1288b444ab3657093aa18b7e3f03b5145 | [
"Apache-2.0"
] | 6 | 2017-10-25T02:47:45.000Z | 2019-12-21T06:35:01.000Z |
'''
环境配置
IDE vscode pycharm
代码仓库 github gitee
python 官方文档中文
https://docs.python.org/zh-cn/3.8/
程序的基本编写方法 IPO
I:Input 输入,程序的输入,数据机构。
P:Process 处理 程序的主要逻辑,算法。
O:Output 输出,程序的输出。
print([n*n for n in range(1,9)]) 列表推导
(n*n for n in range(9)) 生成器表达式
函数的复用
'''
# def getadd():
# return
# def add():
# pass
# add()
# def fact(n):
# if n==1:
# return 1
# return n * fact(n - 1)
# print(fact(2))
# 斐波那契数
a, b = 0, 1
while b < 20:
print(b)
a, b = b, a+b
import threading
import time
def pp(key):
while True:
print(key)
time.sleep(1)
t1 = threading.Thread(target=pp,args=("haha",))
t1.start()
t2 = threading.Thread(target=pp,args=("lailai",))
t2.start()
# a,b = map(int,input("请输入两个值','号分隔:").split(','))
# print(a+b)
# class Person():
# def __init__(self,name,age):
# self.name = name
# self.age = age
# def say(self):
# print("我叫{},我已经{}".format(self.name,self.age))
# p = Person("张三",18)
# p.say()
| 11.522727 | 56 | 0.559172 |
a, b = 0, 1
while b < 20:
print(b)
a, b = b, a+b
import threading
import time
def pp(key):
while True:
print(key)
time.sleep(1)
t1 = threading.Thread(target=pp,args=("haha",))
t1.start()
t2 = threading.Thread(target=pp,args=("lailai",))
t2.start()
| true | true |
f7f84149287f46989ac5f40c3d40329158fba3cb | 3,757 | py | Python | mongosync/config_file.py | caosiyang/py-mongo-sync | 980a37c6f2b012025d43878a315e2869af397dd5 | [
"MIT"
] | 98 | 2016-05-26T07:45:08.000Z | 2022-03-15T03:52:22.000Z | mongosync/config_file.py | caosiyang/py-mongo-sync | 980a37c6f2b012025d43878a315e2869af397dd5 | [
"MIT"
] | 31 | 2016-08-27T06:46:20.000Z | 2021-07-27T11:52:58.000Z | mongosync/config_file.py | caosiyang/py-mongo-sync | 980a37c6f2b012025d43878a315e2869af397dd5 | [
"MIT"
] | 51 | 2016-05-05T05:47:41.000Z | 2021-09-09T10:53:11.000Z | import toml
from bson.timestamp import Timestamp
from mongosync.config import Config, MongoConfig, EsConfig
from mongosync.mongo_utils import gen_namespace
class ConfigFile(object):
@staticmethod
def load(filepath):
""" Load config file and generate conf.
"""
conf = Config()
tml = toml.load(filepath)
conf.src_conf = MongoConfig(tml['src']['hosts'],
tml['src'].get('authdb', 'admin'),
tml['src'].get('username', ''),
tml['src'].get('password', ''))
if type not in tml['dst'] or tml['dst']['type'] == 'mongo':
conf.dst_conf = MongoConfig(tml['dst']['hosts'],
tml['dst'].get('authdb', 'admin'),
tml['dst'].get('username', ''),
tml['dst'].get('password', ''))
elif tml['dst']['type'] == 'es':
conf.dst_conf = EsConfig(tml['dst']['hosts'])
else:
raise Exception('invalid dst.type')
if 'sync' in tml and 'dbs' in tml['sync']:
for dbentry in tml['sync']['dbs']:
if 'db' not in dbentry:
raise Exception("'db' is missing in sync.dbs")
if not dbentry['db']:
raise Exception("'db' is empty in sync.dbs")
dbname = dbentry['db'].strip()
rename_db = dbentry['rename_db'].strip() if 'rename_db' in dbentry else ""
# update db map
if dbname and rename_db:
if dbname in conf.dbmap:
raise Exception('duplicate dbname in sync.dbs: %s' % dbname)
conf.dbmap[dbname] = rename_db
if 'colls' in dbentry and dbentry['colls']:
for collentry in dbentry['colls']:
if isinstance(collentry, str) or isinstance(collentry, unicode):
collname = collentry.strip()
ns = gen_namespace(dbname, collname)
conf.data_filter.add_include_coll(ns)
elif isinstance(collentry, dict):
if 'coll' not in collentry:
raise Exception("'coll' is missing in sync.dbs.colls")
if not collentry['coll']:
raise Exception("'coll' is empty in sync.dbs.colls")
collname = collentry['coll'].strip()
fields = frozenset([f.strip() for f in collentry['fields']] if 'fields' in collentry else [])
# update coll filter
ns = gen_namespace(dbname, collname)
conf.data_filter.add_include_coll(ns)
# update fields
if fields:
if ns in conf.fieldmap:
raise Exception("duplicate collname in sync.dbs.colls: %s" % ns)
conf.fieldmap[ns] = fields
else:
raise Exception('invalid entry in sync.dbs.colls: %s' % collentry)
else:
# update coll filter
conf.data_filter.add_include_coll(gen_namespace(dbname, '*'))
if 'sync' in tml and 'start_optime' in tml['sync']:
conf.start_optime = Timestamp(tml['sync']['start_optime'], 0)
if 'log' in tml and 'filepath' in tml['log']:
conf.logfilepath = tml['log']['filepath']
return conf
| 46.382716 | 121 | 0.46633 | import toml
from bson.timestamp import Timestamp
from mongosync.config import Config, MongoConfig, EsConfig
from mongosync.mongo_utils import gen_namespace
class ConfigFile(object):
@staticmethod
def load(filepath):
conf = Config()
tml = toml.load(filepath)
conf.src_conf = MongoConfig(tml['src']['hosts'],
tml['src'].get('authdb', 'admin'),
tml['src'].get('username', ''),
tml['src'].get('password', ''))
if type not in tml['dst'] or tml['dst']['type'] == 'mongo':
conf.dst_conf = MongoConfig(tml['dst']['hosts'],
tml['dst'].get('authdb', 'admin'),
tml['dst'].get('username', ''),
tml['dst'].get('password', ''))
elif tml['dst']['type'] == 'es':
conf.dst_conf = EsConfig(tml['dst']['hosts'])
else:
raise Exception('invalid dst.type')
if 'sync' in tml and 'dbs' in tml['sync']:
for dbentry in tml['sync']['dbs']:
if 'db' not in dbentry:
raise Exception("'db' is missing in sync.dbs")
if not dbentry['db']:
raise Exception("'db' is empty in sync.dbs")
dbname = dbentry['db'].strip()
rename_db = dbentry['rename_db'].strip() if 'rename_db' in dbentry else ""
if dbname and rename_db:
if dbname in conf.dbmap:
raise Exception('duplicate dbname in sync.dbs: %s' % dbname)
conf.dbmap[dbname] = rename_db
if 'colls' in dbentry and dbentry['colls']:
for collentry in dbentry['colls']:
if isinstance(collentry, str) or isinstance(collentry, unicode):
collname = collentry.strip()
ns = gen_namespace(dbname, collname)
conf.data_filter.add_include_coll(ns)
elif isinstance(collentry, dict):
if 'coll' not in collentry:
raise Exception("'coll' is missing in sync.dbs.colls")
if not collentry['coll']:
raise Exception("'coll' is empty in sync.dbs.colls")
collname = collentry['coll'].strip()
fields = frozenset([f.strip() for f in collentry['fields']] if 'fields' in collentry else [])
ns = gen_namespace(dbname, collname)
conf.data_filter.add_include_coll(ns)
if fields:
if ns in conf.fieldmap:
raise Exception("duplicate collname in sync.dbs.colls: %s" % ns)
conf.fieldmap[ns] = fields
else:
raise Exception('invalid entry in sync.dbs.colls: %s' % collentry)
else:
conf.data_filter.add_include_coll(gen_namespace(dbname, '*'))
if 'sync' in tml and 'start_optime' in tml['sync']:
conf.start_optime = Timestamp(tml['sync']['start_optime'], 0)
if 'log' in tml and 'filepath' in tml['log']:
conf.logfilepath = tml['log']['filepath']
return conf
| true | true |
f7f841c63e49029ce223634f7e52cf56ef76b22b | 627 | py | Python | prog_praxis/harvey.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | 1 | 2020-07-17T13:15:21.000Z | 2020-07-17T13:15:21.000Z | prog_praxis/harvey.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | null | null | null | prog_praxis/harvey.py | genos/online_problems | 324597e8b64d74ad96dbece551a8220a1b61e615 | [
"MIT"
] | null | null | null | from itertools import combinations, ifilter, imap, izip
def sums(n):
return [n * (n + 1) / 2,
n * (n + 1) * (2 * n + 1) / 6,
(n * (n + 1) / 2) ** 2]
def prop(sub):
return all(0 == a - b for (a, b) in
izip(sums(16), map(lambda x: 2 * x,
[sum(sub), sum(x**2 for x in sub),
sum(x**3 for x in sub)])))
if __name__ == "__main__":
from pprint import pprint
s = set(range(1, 17))
print next(imap(lambda sub: [sub, tuple(s.difference(sub))],
ifilter(prop, combinations(range(1, 17), 8))))
| 33 | 69 | 0.460925 | from itertools import combinations, ifilter, imap, izip
def sums(n):
return [n * (n + 1) / 2,
n * (n + 1) * (2 * n + 1) / 6,
(n * (n + 1) / 2) ** 2]
def prop(sub):
return all(0 == a - b for (a, b) in
izip(sums(16), map(lambda x: 2 * x,
[sum(sub), sum(x**2 for x in sub),
sum(x**3 for x in sub)])))
if __name__ == "__main__":
from pprint import pprint
s = set(range(1, 17))
print next(imap(lambda sub: [sub, tuple(s.difference(sub))],
ifilter(prop, combinations(range(1, 17), 8))))
| false | true |
f7f8430f045f30bfee6b9a7b0eb642ec66a7a4fe | 10,526 | py | Python | tests/test_integration.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | tests/test_integration.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | tests/test_integration.py | kotnik/nikola | d08ed98b1b9bf7cbdabf06a18b51a3acdb745625 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import codecs
from contextlib import contextmanager
import locale
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import lxml.html
from nose.plugins.skip import SkipTest
from context import nikola
from nikola import main
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
class EmptyBuildTest(unittest.TestCase):
"""Basic integration testcase."""
dataname = None
def setUp(self):
"""Setup a demo site."""
self.tmpdir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.tmpdir, "target")
self.init_command = nikola.plugins.command_init.CommandInit()
self.fill_site()
self.patch_site()
self.build()
def fill_site(self):
"""Add any needed initial content."""
self.init_command.create_empty_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
if self.dataname:
src = os.path.join(os.path.dirname(__file__), 'data',
self.dataname)
for root, dirs, files in os.walk(src):
for src_name in files:
rel_dir = os.path.relpath(root, src)
dst_file = os.path.join(self.target_dir, rel_dir, src_name)
src_file = os.path.join(root, src_name)
shutil.copy2(src_file, dst_file)
def patch_site(self):
"""Make any modifications you need to the site."""
def build(self):
"""Build the site."""
with cd(self.target_dir):
main.main(["build"])
def tearDown(self):
"""Remove the demo site."""
shutil.rmtree(self.tmpdir)
# Fixes Issue #438
try:
del sys.modules['conf']
except KeyError:
pass
def test_build(self):
"""Ensure the build did something."""
index_path = os.path.join(
self.target_dir, "output", "archive.html")
self.assertTrue(os.path.isfile(index_path))
class DemoBuildTest(EmptyBuildTest):
"""Test that a default build of --demo works."""
def fill_site(self):
"""Fill the site with demo content."""
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
# File for Issue #374 (empty post text)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013/03/06 19:08:15\n"
)
def test_index_in_sitemap(self):
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
class FuturePostTest(DemoBuildTest):
"""Test a site with future posts."""
def fill_site(self):
import datetime
from nikola.utils import current_time
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty1.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foo\n"
".. slug: foo\n"
".. date: %s\n" % (current_time() + datetime.timedelta(-1)).strftime('%Y/%m/%d %T')
)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty2.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: bar\n"
".. slug: bar\n"
".. date: %s\n" % (current_time() + datetime.timedelta(1)).strftime('%Y/%m/%d %T')
)
def test_future_post(self):
""" Ensure that the future post is not present in the index and sitemap."""
index_path = os.path.join(self.target_dir, "output", "index.html")
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
foo_path = os.path.join(self.target_dir, "output", "posts", "foo.html")
bar_path = os.path.join(self.target_dir, "output", "posts", "bar.html")
self.assertTrue(os.path.isfile(index_path))
self.assertTrue(os.path.isfile(foo_path))
self.assertTrue(os.path.isfile(bar_path))
index_data = codecs.open(index_path, "r", "utf8").read()
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('foo.html' in index_data)
self.assertFalse('bar.html' in index_data)
self.assertTrue('foo.html' in sitemap_data)
self.assertFalse('bar.html' in sitemap_data)
class TranslatedBuildTest(EmptyBuildTest):
"""Test a site with translated content."""
dataname = "translated_titles"
def __init__(self, *a, **kw):
super(TranslatedBuildTest, self).__init__(*a, **kw)
try:
locale.setlocale(locale.LC_ALL, ("es", "utf8"))
except:
raise SkipTest
def test_translated_titles(self):
"""Check that translated title is picked up."""
en_file = os.path.join(self.target_dir, "output", "stories", "1.html")
es_file = os.path.join(self.target_dir, "output", "es", "stories", "1.html")
# Files should be created
self.assertTrue(os.path.isfile(en_file))
self.assertTrue(os.path.isfile(es_file))
# And now let's check the titles
with codecs.open(en_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Foo | Demo Site')
with codecs.open(es_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Bar | Demo Site')
class RelativeLinkTest(DemoBuildTest):
"""Check that SITE_URL with a path doesn't break links."""
def patch_site(self):
"""Set the SITE_URL to have a path"""
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('SITE_URL = "http://nikola.ralsina.com.ar"',
'SITE_URL = "http://nikola.ralsina.com.ar/foo/bar/"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
def test_relative_links(self):
"""Check that the links in output/index.html are correct"""
test_path = os.path.join(self.target_dir, "output", "index.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
"""Test that the correct path is in sitemap, and not the wrong one."""
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
self.assertTrue('<loc>http://nikola.ralsina.com.ar/foo/bar/</loc>' in sitemap_data)
class TestCheck(DemoBuildTest):
"""The demo build should pass 'nikola check'"""
def test_check_links(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 0)
def test_check_files(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 0)
class TestCheckFailure(DemoBuildTest):
"""The demo build should pass 'nikola check'"""
def test_check_links_fail(self):
with cd(self.target_dir):
os.unlink(os.path.join("output", "archive.html"))
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 1)
def test_check_files_fail(self):
with cd(self.target_dir):
with codecs.open(os.path.join("output", "foobar"), "wb+", "utf8") as outf:
outf.write("foo")
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 1)
class RelativeLinkTest2(DemoBuildTest):
"""Check that dropping stories to the root doesn't break links."""
def patch_site(self):
"""Set the SITE_URL to have a path"""
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('("stories/*.txt", "stories", "story.tmpl", False),',
'("stories/*.txt", "", "story.tmpl", False),')
data = data.replace('("stories/*.rst", "stories", "story.tmpl", False),',
'("stories/*.rst", "", "story.tmpl", False),')
data = data.replace('# INDEX_PATH = ""',
'INDEX_PATH = "blog"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
outf.flush()
def test_relative_links(self):
"""Check that the links in a story are correct"""
test_path = os.path.join(self.target_dir, "output", "about-nikola.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
"""Test that the correct path is in sitemap, and not the wrong one."""
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
self.assertTrue('<loc>http://nikola.ralsina.com.ar/blog/</loc>' in sitemap_data)
| 38.416058 | 102 | 0.590348 |
from __future__ import unicode_literals, print_function
import codecs
from contextlib import contextmanager
import locale
import os
import shutil
import subprocess
import sys
import tempfile
import unittest
import lxml.html
from nose.plugins.skip import SkipTest
from context import nikola
from nikola import main
@contextmanager
def cd(path):
old_dir = os.getcwd()
os.chdir(path)
yield
os.chdir(old_dir)
class EmptyBuildTest(unittest.TestCase):
dataname = None
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.target_dir = os.path.join(self.tmpdir, "target")
self.init_command = nikola.plugins.command_init.CommandInit()
self.fill_site()
self.patch_site()
self.build()
def fill_site(self):
self.init_command.create_empty_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
if self.dataname:
src = os.path.join(os.path.dirname(__file__), 'data',
self.dataname)
for root, dirs, files in os.walk(src):
for src_name in files:
rel_dir = os.path.relpath(root, src)
dst_file = os.path.join(self.target_dir, rel_dir, src_name)
src_file = os.path.join(root, src_name)
shutil.copy2(src_file, dst_file)
def patch_site(self):
def build(self):
with cd(self.target_dir):
main.main(["build"])
def tearDown(self):
shutil.rmtree(self.tmpdir)
try:
del sys.modules['conf']
except KeyError:
pass
def test_build(self):
index_path = os.path.join(
self.target_dir, "output", "archive.html")
self.assertTrue(os.path.isfile(index_path))
class DemoBuildTest(EmptyBuildTest):
def fill_site(self):
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
pen(os.path.join(self.target_dir, 'posts', 'empty.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foobar\n"
".. slug: foobar\n"
".. date: 2013/03/06 19:08:15\n"
)
def test_index_in_sitemap(self):
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
class FuturePostTest(DemoBuildTest):
def fill_site(self):
import datetime
from nikola.utils import current_time
self.init_command.copy_sample_site(self.target_dir)
self.init_command.create_configuration(self.target_dir)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty1.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: foo\n"
".. slug: foo\n"
".. date: %s\n" % (current_time() + datetime.timedelta(-1)).strftime('%Y/%m/%d %T')
)
with codecs.open(os.path.join(self.target_dir, 'posts', 'empty2.txt'), "wb+", "utf8") as outf:
outf.write(
".. title: bar\n"
".. slug: bar\n"
".. date: %s\n" % (current_time() + datetime.timedelta(1)).strftime('%Y/%m/%d %T')
)
def test_future_post(self):
index_path = os.path.join(self.target_dir, "output", "index.html")
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
foo_path = os.path.join(self.target_dir, "output", "posts", "foo.html")
bar_path = os.path.join(self.target_dir, "output", "posts", "bar.html")
self.assertTrue(os.path.isfile(index_path))
self.assertTrue(os.path.isfile(foo_path))
self.assertTrue(os.path.isfile(bar_path))
index_data = codecs.open(index_path, "r", "utf8").read()
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertTrue('foo.html' in index_data)
self.assertFalse('bar.html' in index_data)
self.assertTrue('foo.html' in sitemap_data)
self.assertFalse('bar.html' in sitemap_data)
class TranslatedBuildTest(EmptyBuildTest):
dataname = "translated_titles"
def __init__(self, *a, **kw):
super(TranslatedBuildTest, self).__init__(*a, **kw)
try:
locale.setlocale(locale.LC_ALL, ("es", "utf8"))
except:
raise SkipTest
def test_translated_titles(self):
en_file = os.path.join(self.target_dir, "output", "stories", "1.html")
es_file = os.path.join(self.target_dir, "output", "es", "stories", "1.html")
self.assertTrue(os.path.isfile(en_file))
self.assertTrue(os.path.isfile(es_file))
with codecs.open(en_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Foo | Demo Site')
with codecs.open(es_file, 'r', 'utf8') as inf:
doc = lxml.html.parse(inf)
self.assertEqual(doc.find('//title').text, 'Bar | Demo Site')
class RelativeLinkTest(DemoBuildTest):
def patch_site(self):
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('SITE_URL = "http://nikola.ralsina.com.ar"',
'SITE_URL = "http://nikola.ralsina.com.ar/foo/bar/"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
def test_relative_links(self):
test_path = os.path.join(self.target_dir, "output", "index.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
self.assertTrue('<loc>http://nikola.ralsina.com.ar/foo/bar/</loc>' in sitemap_data)
class TestCheck(DemoBuildTest):
def test_check_links(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 0)
def test_check_files(self):
with cd(self.target_dir):
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 0)
class TestCheckFailure(DemoBuildTest):
def test_check_links_fail(self):
with cd(self.target_dir):
os.unlink(os.path.join("output", "archive.html"))
p = subprocess.call("nikola check -l", shell=True)
self.assertEqual(p, 1)
def test_check_files_fail(self):
with cd(self.target_dir):
with codecs.open(os.path.join("output", "foobar"), "wb+", "utf8") as outf:
outf.write("foo")
p = subprocess.call("nikola check -f", shell=True)
self.assertEqual(p, 1)
class RelativeLinkTest2(DemoBuildTest):
def patch_site(self):
conf_path = os.path.join(self.target_dir, "conf.py")
with codecs.open(conf_path, "rb", "utf-8") as inf:
data = inf.read()
data = data.replace('("stories/*.txt", "stories", "story.tmpl", False),',
'("stories/*.txt", "", "story.tmpl", False),')
data = data.replace('("stories/*.rst", "stories", "story.tmpl", False),',
'("stories/*.rst", "", "story.tmpl", False),')
data = data.replace('
'INDEX_PATH = "blog"')
with codecs.open(conf_path, "wb+", "utf8") as outf:
outf.write(data)
outf.flush()
def test_relative_links(self):
test_path = os.path.join(self.target_dir, "output", "about-nikola.html")
flag = False
with open(test_path, "rb") as inf:
data = inf.read()
for _, _, url, _ in lxml.html.iterlinks(data):
# Just need to be sure this one is ok
if url.endswith("css"):
self.assertFalse(url.startswith(".."))
flag = True
# But I also need to be sure it is there!
self.assertTrue(flag)
def test_index_in_sitemap(self):
sitemap_path = os.path.join(self.target_dir, "output", "sitemap.xml")
sitemap_data = codecs.open(sitemap_path, "r", "utf8").read()
self.assertFalse('<loc>http://nikola.ralsina.com.ar/</loc>' in sitemap_data)
self.assertTrue('<loc>http://nikola.ralsina.com.ar/blog/</loc>' in sitemap_data)
| true | true |
f7f8435f06cde9475c6292a87525217f83a015a4 | 1,288 | py | Python | 2018/Q1.py | s-cork/BIO | 7f3b3e1e7b47da5ea5f96569836946c4d28277fe | [
"MIT"
] | null | null | null | 2018/Q1.py | s-cork/BIO | 7f3b3e1e7b47da5ea5f96569836946c4d28277fe | [
"MIT"
] | null | null | null | 2018/Q1.py | s-cork/BIO | 7f3b3e1e7b47da5ea5f96569836946c4d28277fe | [
"MIT"
] | null | null | null | from math import ceil
def round_up(num):
''' expects a number
shifts the the number by 8 dp, discards the decimal places
shifts the number down by 6 place
takes the ceiling of this number
divides it by 100 and returns it.
So 43.000657543332 --> 4300065754.3332 --> 4300065754 --> 43.00065754
---> 44
'''
num = int(num*10**8)/10**6
num = ceil(num)/100
return num
def repay_program(interest, repay):
'''generator which yields the repayment at each iteration
starts with debt is 100
checks if the debt is less than the expected repayment
then checks if debt is less than 50
then checks if repayment is less then 50
otherwise yields the repayment
'''
debt = 100
while debt > 0:
debt = round_up(debt * interest)
if debt < debt * repay:
yield debt
debt = 0
elif debt < 50:
yield debt
debt = 0
elif debt * repay < 50:
yield 50
debt -= 50
else:
yield round_up(debt * repay)
debt = debt - round_up(debt * repay)
while True:
interest, repay = map(lambda x: int(x)/100, input('').split(' '))
interest +=1
print(round_up(sum(repay_program(interest, repay))))
| 25.76 | 73 | 0.592391 | from math import ceil
def round_up(num):
num = int(num*10**8)/10**6
num = ceil(num)/100
return num
def repay_program(interest, repay):
debt = 100
while debt > 0:
debt = round_up(debt * interest)
if debt < debt * repay:
yield debt
debt = 0
elif debt < 50:
yield debt
debt = 0
elif debt * repay < 50:
yield 50
debt -= 50
else:
yield round_up(debt * repay)
debt = debt - round_up(debt * repay)
while True:
interest, repay = map(lambda x: int(x)/100, input('').split(' '))
interest +=1
print(round_up(sum(repay_program(interest, repay))))
| true | true |
f7f843a5599aa5aacb8040c092d4f1aa27edf5f9 | 10,829 | py | Python | lib/python3.8/site-packages/ansible_collections/community/general/plugins/lookup/manifold.py | cjsteel/python3-venv-ansible-2.10.5 | c95395c4cae844dc66fddde9b4343966f4b2ecd5 | [
"Apache-1.1"
] | 5 | 2020-12-16T21:42:09.000Z | 2022-03-28T16:04:32.000Z | .ansible/collections/ansible_collections/community/general/plugins/lookup/manifold.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | .ansible/collections/ansible_collections/community/general/plugins/lookup/manifold.py | chronicc/proving-ground | 3e392122a05fb8383a3700954baebb0df330e9e3 | [
"MIT"
] | null | null | null | # (c) 2018, Arigato Machine Inc.
# (c) 2018, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
lookup: manifold
short_description: get credentials from Manifold.co
description:
- Retrieves resources' credentials from Manifold.co
options:
_terms:
description:
- Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
matched resources will be returned.
type: list
elements: string
required: False
api_token:
description:
- manifold API token
type: string
required: True
env:
- name: MANIFOLD_API_TOKEN
project:
description:
- The project label you want to get the resource for.
type: string
required: False
team:
description:
- The team label you want to get the resource for.
type: string
required: False
'''
EXAMPLES = '''
- name: all available resources
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
- name: all available resources for a specific project in specific team
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
- name: two specific resources
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
'''
RETURN = '''
_raw:
description:
- dictionary of credentials ready to be consumed as environment variables. If multiple resources define
the same environment variable(s), the last one returned by the Manifold API will take precedence.
type: dict
'''
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils import six
from ansible.utils.display import Display
from traceback import format_exception
import json
import sys
import os
display = Display()
class ApiError(Exception):
pass
class ManifoldApiClient(object):
base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
http_agent = 'python-manifold-ansible-1.0.0'
def __init__(self, token):
self._token = token
def request(self, api, endpoint, *args, **kwargs):
"""
Send a request to API backend and pre-process a response.
:param api: API to send a request to
:type api: str
:param endpoint: API endpoint to fetch data from
:type endpoint: str
:param args: other args for open_url
:param kwargs: other kwargs for open_url
:return: server response. JSON response is automatically deserialized.
:rtype: dict | list | str
"""
default_headers = {
'Authorization': "Bearer {0}".format(self._token),
'Accept': "*/*" # Otherwise server doesn't set content-type header
}
url = self.base_url.format(api=api, endpoint=endpoint)
headers = default_headers
arg_headers = kwargs.pop('headers', None)
if arg_headers:
headers.update(arg_headers)
try:
display.vvvv('manifold lookup connecting to {0}'.format(url))
response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
data = response.read()
if response.headers.get('content-type') == 'application/json':
data = json.loads(data)
return data
except ValueError:
raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
except HTTPError as e:
raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
err=str(e), url=url, response=e.read()))
except URLError as e:
raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
except SSLValidationError as e:
raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
except ConnectionError as e:
raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
def get_resources(self, team_id=None, project_id=None, label=None):
"""
Get resources list
:param team_id: ID of the Team to filter resources by
:type team_id: str
:param project_id: ID of the project to filter resources by
:type project_id: str
:param label: filter resources by a label, returns a list with one or zero elements
:type label: str
:return: list of resources
:rtype: list
"""
api = 'marketplace'
endpoint = 'resources'
query_params = {}
if team_id:
query_params['team_id'] = team_id
if project_id:
query_params['project_id'] = project_id
if label:
query_params['label'] = label
if query_params:
endpoint += '?' + urlencode(query_params)
return self.request(api, endpoint)
def get_teams(self, label=None):
"""
Get teams list
:param label: filter teams by a label, returns a list with one or zero elements
:type label: str
:return: list of teams
:rtype: list
"""
api = 'identity'
endpoint = 'teams'
data = self.request(api, endpoint)
# Label filtering is not supported by API, however this function provides uniform interface
if label:
data = list(filter(lambda x: x['body']['label'] == label, data))
return data
def get_projects(self, label=None):
"""
Get projects list
:param label: filter projects by a label, returns a list with one or zero elements
:type label: str
:return: list of projects
:rtype: list
"""
api = 'marketplace'
endpoint = 'projects'
query_params = {}
if label:
query_params['label'] = label
if query_params:
endpoint += '?' + urlencode(query_params)
return self.request(api, endpoint)
def get_credentials(self, resource_id):
"""
Get resource credentials
:param resource_id: ID of the resource to filter credentials by
:type resource_id: str
:return:
"""
api = 'marketplace'
endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
return self.request(api, endpoint)
class LookupModule(LookupBase):
def run(self, terms, variables=None, api_token=None, project=None, team=None):
"""
:param terms: a list of resources lookups to run.
:param variables: ansible variables active at the time of the lookup
:param api_token: API token
:param project: optional project label
:param team: optional team label
:return: a dictionary of resources credentials
"""
if not api_token:
api_token = os.getenv('MANIFOLD_API_TOKEN')
if not api_token:
raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var')
try:
labels = terms
client = ManifoldApiClient(api_token)
if team:
team_data = client.get_teams(team)
if len(team_data) == 0:
raise AnsibleError("Team '{0}' does not exist".format(team))
team_id = team_data[0]['id']
else:
team_id = None
if project:
project_data = client.get_projects(project)
if len(project_data) == 0:
raise AnsibleError("Project '{0}' does not exist".format(project))
project_id = project_data[0]['id']
else:
project_id = None
if len(labels) == 1: # Use server-side filtering if one resource is requested
resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
else: # Get all resources and optionally filter labels
resources_data = client.get_resources(team_id=team_id, project_id=project_id)
if labels:
resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
if labels and len(resources_data) < len(labels):
fetched_labels = [r['body']['label'] for r in resources_data]
not_found_labels = [label for label in labels if label not in fetched_labels]
raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
credentials = {}
cred_map = {}
for resource in resources_data:
resource_credentials = client.get_credentials(resource['id'])
if len(resource_credentials) and resource_credentials[0]['body']['values']:
for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
label = resource['body']['label']
if cred_key in credentials:
display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
"with label '{new_label}'".format(cred_key=cred_key,
old_label=cred_map[cred_key],
new_label=label))
credentials[cred_key] = cred_val
cred_map[cred_key] = label
ret = [credentials]
return ret
except ApiError as e:
raise AnsibleError('API Error: {0}'.format(str(e)))
except AnsibleError as e:
raise e
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
| 38.81362 | 119 | 0.593499 |
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
author:
- Kyrylo Galanov (!UNKNOWN) <galanoff@gmail.com>
lookup: manifold
short_description: get credentials from Manifold.co
description:
- Retrieves resources' credentials from Manifold.co
options:
_terms:
description:
- Optional list of resource labels to lookup on Manifold.co. If no resources are specified, all
matched resources will be returned.
type: list
elements: string
required: False
api_token:
description:
- manifold API token
type: string
required: True
env:
- name: MANIFOLD_API_TOKEN
project:
description:
- The project label you want to get the resource for.
type: string
required: False
team:
description:
- The team label you want to get the resource for.
type: string
required: False
'''
EXAMPLES = '''
- name: all available resources
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', api_token='SecretToken') }}"
- name: all available resources for a specific project in specific team
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', api_token='SecretToken', project='poject-1', team='team-2') }}"
- name: two specific resources
ansible.builtin.debug:
msg: "{{ lookup('community.general.manifold', 'resource-1', 'resource-2') }}"
'''
RETURN = '''
_raw:
description:
- dictionary of credentials ready to be consumed as environment variables. If multiple resources define
the same environment variable(s), the last one returned by the Manifold API will take precedence.
type: dict
'''
from ansible.errors import AnsibleError
from ansible.plugins.lookup import LookupBase
from ansible.module_utils.urls import open_url, ConnectionError, SSLValidationError
from ansible.module_utils.six.moves.urllib.error import HTTPError, URLError
from ansible.module_utils.six.moves.urllib.parse import urlencode
from ansible.module_utils import six
from ansible.utils.display import Display
from traceback import format_exception
import json
import sys
import os
display = Display()
class ApiError(Exception):
pass
class ManifoldApiClient(object):
base_url = 'https://api.{api}.manifold.co/v1/{endpoint}'
http_agent = 'python-manifold-ansible-1.0.0'
def __init__(self, token):
self._token = token
def request(self, api, endpoint, *args, **kwargs):
default_headers = {
'Authorization': "Bearer {0}".format(self._token),
'Accept': "*/*" # Otherwise server doesn't set content-type header
}
url = self.base_url.format(api=api, endpoint=endpoint)
headers = default_headers
arg_headers = kwargs.pop('headers', None)
if arg_headers:
headers.update(arg_headers)
try:
display.vvvv('manifold lookup connecting to {0}'.format(url))
response = open_url(url, headers=headers, http_agent=self.http_agent, *args, **kwargs)
data = response.read()
if response.headers.get('content-type') == 'application/json':
data = json.loads(data)
return data
except ValueError:
raise ApiError('JSON response can\'t be parsed while requesting {url}:\n{json}'.format(json=data, url=url))
except HTTPError as e:
raise ApiError('Server returned: {err} while requesting {url}:\n{response}'.format(
err=str(e), url=url, response=e.read()))
except URLError as e:
raise ApiError('Failed lookup url for {url} : {err}'.format(url=url, err=str(e)))
except SSLValidationError as e:
raise ApiError('Error validating the server\'s certificate for {url}: {err}'.format(url=url, err=str(e)))
except ConnectionError as e:
raise ApiError('Error connecting to {url}: {err}'.format(url=url, err=str(e)))
def get_resources(self, team_id=None, project_id=None, label=None):
api = 'marketplace'
endpoint = 'resources'
query_params = {}
if team_id:
query_params['team_id'] = team_id
if project_id:
query_params['project_id'] = project_id
if label:
query_params['label'] = label
if query_params:
endpoint += '?' + urlencode(query_params)
return self.request(api, endpoint)
def get_teams(self, label=None):
api = 'identity'
endpoint = 'teams'
data = self.request(api, endpoint)
if label:
data = list(filter(lambda x: x['body']['label'] == label, data))
return data
def get_projects(self, label=None):
api = 'marketplace'
endpoint = 'projects'
query_params = {}
if label:
query_params['label'] = label
if query_params:
endpoint += '?' + urlencode(query_params)
return self.request(api, endpoint)
def get_credentials(self, resource_id):
api = 'marketplace'
endpoint = 'credentials?' + urlencode({'resource_id': resource_id})
return self.request(api, endpoint)
class LookupModule(LookupBase):
def run(self, terms, variables=None, api_token=None, project=None, team=None):
if not api_token:
api_token = os.getenv('MANIFOLD_API_TOKEN')
if not api_token:
raise AnsibleError('API token is required. Please set api_token parameter or MANIFOLD_API_TOKEN env var')
try:
labels = terms
client = ManifoldApiClient(api_token)
if team:
team_data = client.get_teams(team)
if len(team_data) == 0:
raise AnsibleError("Team '{0}' does not exist".format(team))
team_id = team_data[0]['id']
else:
team_id = None
if project:
project_data = client.get_projects(project)
if len(project_data) == 0:
raise AnsibleError("Project '{0}' does not exist".format(project))
project_id = project_data[0]['id']
else:
project_id = None
if len(labels) == 1:
resources_data = client.get_resources(team_id=team_id, project_id=project_id, label=labels[0])
else:
resources_data = client.get_resources(team_id=team_id, project_id=project_id)
if labels:
resources_data = list(filter(lambda x: x['body']['label'] in labels, resources_data))
if labels and len(resources_data) < len(labels):
fetched_labels = [r['body']['label'] for r in resources_data]
not_found_labels = [label for label in labels if label not in fetched_labels]
raise AnsibleError("Resource(s) {0} do not exist".format(', '.join(not_found_labels)))
credentials = {}
cred_map = {}
for resource in resources_data:
resource_credentials = client.get_credentials(resource['id'])
if len(resource_credentials) and resource_credentials[0]['body']['values']:
for cred_key, cred_val in six.iteritems(resource_credentials[0]['body']['values']):
label = resource['body']['label']
if cred_key in credentials:
display.warning("'{cred_key}' with label '{old_label}' was replaced by resource data "
"with label '{new_label}'".format(cred_key=cred_key,
old_label=cred_map[cred_key],
new_label=label))
credentials[cred_key] = cred_val
cred_map[cred_key] = label
ret = [credentials]
return ret
except ApiError as e:
raise AnsibleError('API Error: {0}'.format(str(e)))
except AnsibleError as e:
raise e
except Exception:
exc_type, exc_value, exc_traceback = sys.exc_info()
raise AnsibleError(format_exception(exc_type, exc_value, exc_traceback))
| true | true |
f7f8440a7194c0f23a86001844e4201ef7e04b76 | 973 | py | Python | tests/api/conftest.py | oarepo/oarepo-micro-api | 313d928c5c588635b300cf51272c7cae4aff1645 | [
"MIT"
] | null | null | null | tests/api/conftest.py | oarepo/oarepo-micro-api | 313d928c5c588635b300cf51272c7cae4aff1645 | [
"MIT"
] | 9 | 2020-06-02T15:04:45.000Z | 2021-04-08T11:47:51.000Z | tests/api/conftest.py | oarepo/oarepo-micro-api | 313d928c5c588635b300cf51272c7cae4aff1645 | [
"MIT"
] | 2 | 2020-05-13T07:42:40.000Z | 2020-05-18T15:25:37.000Z | # -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CESNET.
#
# OARepo Micro API is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Pytest fixtures and plugins for the API application."""
from __future__ import absolute_import, print_function
import pytest
import oarepo_micro_api # import for coverage
from invenio_app.factory import create_api
from webtest import TestApp
from oarepo_micro_api.wsgi import application
@pytest.fixture(scope='module')
def app_config(app_config):
"""Get app config."""
app_config['SERVER_NAME'] = 'localhost'
app_config['PREFERRED_URL_SCHEME'] = 'http'
app_config['FLASK_ENV'] = 'development'
return app_config
@pytest.fixture(scope='module')
def wsgi(app):
"""Create test app."""
app = TestApp(application)
return app
@pytest.fixture(scope='module')
def create_app():
"""Create test app."""
return create_api
| 24.325 | 77 | 0.725591 |
from __future__ import absolute_import, print_function
import pytest
import oarepo_micro_api
from invenio_app.factory import create_api
from webtest import TestApp
from oarepo_micro_api.wsgi import application
@pytest.fixture(scope='module')
def app_config(app_config):
app_config['SERVER_NAME'] = 'localhost'
app_config['PREFERRED_URL_SCHEME'] = 'http'
app_config['FLASK_ENV'] = 'development'
return app_config
@pytest.fixture(scope='module')
def wsgi(app):
app = TestApp(application)
return app
@pytest.fixture(scope='module')
def create_app():
return create_api
| true | true |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.