code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# Copyright (c) 2019-2022, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import cv2
import numpy as np
import math
import os
import random
from nvidia.dali.pipeline import Pipeline
import nvidia.dali.ops as ops
import nvidia.dali.types as types
import nvidia.dali as dali
from test_utils import compare_pipelines
from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb
test_data_root = os.environ['DALI_EXTRA_PATH']
caffe_db_folder = os.path.join(test_data_root, 'db', 'lmdb')
def get_output_size(angle, input_size, parity_correction=True):
cosa = abs(math.cos(angle))
sina = abs(math.sin(angle))
(h, w) = input_size[0:2]
eps = 1e-2
out_w = int(math.ceil(w*cosa + h*sina - eps))
out_h = int(math.ceil(h*cosa + w*sina - eps))
if not parity_correction:
return (out_h, out_w)
if sina <= cosa:
if out_w % 2 != w % 2:
out_w += 1
if out_h % 2 != h % 2:
out_h += 1
else:
if out_w % 2 != h % 2:
out_w += 1
if out_h % 2 != w % 2:
out_h += 1
return (out_h, out_w)
def get_3d_lin_rotation(angle, axis):
# mirrors transform.h:rotation3D
if not angle:
return np.eye((3, 3), dtype=np.float32)
axis_norm = np.linalg.norm(axis)
axis = [dim / axis_norm for dim in axis]
u, v, w = axis
cosa = math.cos(angle)
sina = math.sin(angle)
return np.array([
[u*u + (v*v+w*w)*cosa, u*v*(1-cosa) - w*sina, u*w*(1-cosa) + v*sina],
[u*v*(1-cosa) + w*sina, v*v + (u*u+w*w)*cosa, v*w*(1-cosa) - u*sina],
[u*w*(1-cosa) - v*sina, v*w*(1-cosa) + u*sina, w*w + (u*u+v*v)*cosa],
], dtype=np.float32)
def get_3d_output_size(angle, axis, input_size, parity_correction=False):
transform = np.abs(get_3d_lin_rotation(angle, axis))
eps = 1e-2
in_size = np.array(input_size[2::-1], dtype=np.int32)
out_size = np.int32(np.ceil(np.matmul(transform, in_size) - eps))
if parity_correction:
dominant_axis = np.argmax(transform, axis=1)
out_size += (out_size % 2) ^ (in_size[dominant_axis] % 2)
return out_size[::-1]
def get_transform(angle, input_size, output_size):
cosa = math.cos(angle)
sina = math.sin(angle)
(out_h, out_w) = output_size[0:2]
(in_h, in_w) = input_size[0:2]
t1 = np.array([
[1, 0, -out_w*0.5],
[0, 1, -out_h*0.5],
[0, 0, 1]])
r = np.array([
[cosa, -sina, 0],
[sina, cosa, 0],
[0, 0, 1]])
t2 = np.array([
[1, 0, in_w*0.5],
[0, 1, in_h*0.5],
[0, 0, 1]])
return (np.matmul(t2, np.matmul(r, t1)))[0:2,0:3]
def ToCVMatrix(matrix):
offset = np.matmul(matrix, np.array([[0.5], [0.5], [1]]))
result = matrix.copy()
result[0][2] = offset[0] - 0.5
result[1][2] = offset[1] - 0.5
return result
def CVRotate(output_type, input_type, fixed_size):
def warp_fn(img, angle):
in_size = img.shape[0:2]
angle = math.radians(angle)
out_size = fixed_size if fixed_size is not None else get_output_size(angle, in_size)
matrix = get_transform(angle, in_size, out_size)
matrix = ToCVMatrix(matrix)
if output_type == dali.types.FLOAT or input_type == dali.types.FLOAT:
img = np.float32(img)
out_size_wh = (out_size[1], out_size[0])
out = cv2.warpAffine(img, matrix, out_size_wh, borderMode = cv2.BORDER_CONSTANT, borderValue = [42,42,42],
flags = (cv2.INTER_LINEAR|cv2.WARP_INVERSE_MAP))
if output_type == dali.types.UINT8 and input_type == dali.types.FLOAT:
out = np.uint8(np.clip(out, 0, 255))
return out
return warp_fn
class RotatePipeline(Pipeline):
def __init__(self, device, batch_size, output_type, input_type, fixed_size=None, num_threads=3, device_id=0, num_gpus=1):
super(RotatePipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.name = device
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
if input_type != dali.types.UINT8:
self.cast = ops.Cast(device = device, dtype = input_type)
else:
self.cast = None
self.uniform = ops.random.Uniform(range = (-180.0, 180.0), seed = 42)
self.rotate = ops.Rotate(device = device, size=fixed_size, fill_value = 42, dtype = output_type)
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
if self.rotate.device == "gpu":
images = images.gpu()
if self.cast:
images = self.cast(images)
outputs = self.rotate(images, angle = self.uniform())
return outputs
class CVPipeline(Pipeline):
def __init__(self, batch_size, output_type, input_type, fixed_size, num_threads=3, device_id=0, num_gpus=1):
super(CVPipeline, self).__init__(batch_size, num_threads, device_id, seed=7865, exec_async=False, exec_pipelined=False)
self.name = "cv"
self.input = ops.readers.Caffe(path = caffe_db_folder, shard_id = device_id, num_shards = num_gpus)
self.decode = ops.decoders.Image(device = "cpu", output_type = types.RGB)
self.rotate = ops.PythonFunction(function=CVRotate(output_type, input_type, fixed_size),
output_layouts="HWC")
self.uniform = ops.random.Uniform(range = (-180.0, 180.0), seed = 42)
self.iter = 0
def define_graph(self):
self.jpegs, self.labels = self.input(name = "Reader")
images = self.decode(self.jpegs)
angles = self.uniform()
outputs = self.rotate(images, angles)
return outputs
def compare(pipe1, pipe2, eps):
pipe1.build()
pipe2.build()
epoch_size = pipe1.epoch_size("Reader")
batch_size = pipe1.max_batch_size
niter = 1 if batch_size >= epoch_size else 2
compare_pipelines(pipe1, pipe2, batch_size, niter, eps)
io_types = [
(dali.types.UINT8, dali.types.UINT8),
(dali.types.UINT8, dali.types.FLOAT),
(dali.types.FLOAT, dali.types.UINT8),
(dali.types.FLOAT, dali.types.FLOAT)
]
def create_pipeline(backend, *args):
if backend == "cv":
return CVPipeline(*args)
else:
return RotatePipeline(backend, *args)
def run_cases(backend1, backend2, epsilon):
for batch_size in [1, 4, 19]:
for output_size in [None, (160,240)]:
for (itype, otype) in io_types:
def run_case(backend1, backend2, *args):
pipe1 = create_pipeline(backend1, *args)
pipe2 = create_pipeline(backend2, *args)
compare(pipe1, pipe2, epsilon)
yield run_case, backend1, backend2, batch_size, otype, itype, output_size
def test_gpu_vs_cv():
for test in run_cases("gpu", "cv", 8):
yield test
def test_cpu_vs_cv():
for test in run_cases("cpu", "cv", 8):
yield test
def test_gpu_vs_cpu():
for test in run_cases("gpu", "cpu", 1):
yield test
def infer_sequence_size(input_shapes, angles, axes=None):
assert(len(input_shapes) == len(angles))
assert(axes is None or len(axes) == len(angles))
if axes is None:
no_correction_shapes = [
np.array(get_output_size(math.radians(angle), shape, False), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)]
corrected_shapes = [
np.array(get_output_size(math.radians(angle), shape, True), dtype=np.int32)
for shape, angle in zip(input_shapes, angles)]
else:
no_correction_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, False), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)]
corrected_shapes = [
np.array(get_3d_output_size(math.radians(angle), axis, shape, True), dtype=np.int32)
for shape, angle, axis in zip(input_shapes, angles, axes)]
max_shape = np.max(no_correction_shapes, axis=0)
parity = np.sum(np.array(corrected_shapes, dtype=np.int32) % 2, axis=0)
for i in range(len(max_shape)):
if max_shape[i] % 2 != (2 * parity[i] > len(input_shapes)):
max_shape[i] += 1
return max_shape
def sequence_batch_output_size(unfolded_extents, input_batch, angle_batch, axis_batch=None):
def iter_by_groups():
assert(sum(unfolded_extents) == len(input_batch))
assert(len(input_batch) == len(angle_batch))
assert(axis_batch is None or len(axis_batch) == len(angle_batch))
offset = 0
for group in unfolded_extents:
yield input_batch[offset:offset + group], angle_batch[offset:offset + group],\
None if axis_batch is None else axis_batch[offset:offset + group]
offset += group
sequence_output_shape = [
infer_sequence_size([frame.shape for frame in input_frames], angles, axes)
for input_frames, angles, axes in iter_by_groups()]
return [
output_shape for output_shape, num_frames in zip(sequence_output_shape, unfolded_extents)
for _ in range(num_frames)]
class RotatePerFrameParamsProvider(ParamsProvider):
"""
Provides per frame angle argument input to the video rotate operator test.
The expanded baseline pipeline must be provided with additional argument ``size``
to make allowance for coalescing of inferred frames sizes
"""
def __init__(self, input_params):
super().__init__(input_params)
def expand_params(self):
assert(self.num_expand == 1)
expanded_params = super().expand_params()
params_dict = {param_data.desc.name: param_data for param_data in expanded_params}
expanded_angles = params_dict.get('angle')
expanded_axis = params_dict.get('axis')
assert expanded_angles is not None and 'size' not in self.fixed_params and 'size' not in params_dict
sequence_extents = [
[sample.shape[0] for sample in input_batch]
for input_batch in self.input_data]
output_size_params = (sequence_extents, self.unfolded_input, expanded_angles.data)
if expanded_axis is not None:
output_size_params += (expanded_axis.data,)
output_sizes = [
sequence_batch_output_size(*args)
for args in zip(*output_size_params)]
expanded_params.append(ArgData(ArgDesc("size", False, "cpu"), output_sizes))
return expanded_params
def __repr__(self):
return "{}({})".format(repr(self.__class__), repr(self.input_params))
def test_video():
def small_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-44., 44.), dtype=np.float32)
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_output(sample_desc):
return np.array([sample_desc.rng.randint(300, 400), rng.randint(300, 400)])
video_test_cases = [
(dali.fn.rotate, {'angle': 45.}, []),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, False)]),
(dali.fn.rotate, {}, [ArgCb("angle", random_angle, False)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", small_angle, True)])),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True)])),
(dali.fn.rotate, {}, [ArgCb("angle", small_angle, True), ArgCb("size", random_output, False)]),
]
rng = random.Random(42)
video_cases = get_video_input_cases("FHWC", rng, larger_shape=(512, 287))
input_cases = [("FHWC", input_data) for input_data in video_cases]
yield from sequence_suite_helper(rng, "F", input_cases, video_test_cases)
def test_3d_sequence():
rng = random.Random(42)
num_batches = 4
max_batch_size = 8
max_frames_num = 32
input_layout = "FDHWC"
np_rng = np.random.default_rng(42)
def get_random_sample():
num_frames = rng.randint(1, max_frames_num)
d, h, w = tuple(rng.randint(10, 50) for _ in range(3))
return np.int32(np_rng.uniform(0, 255, (num_frames, d, h, w, 3)))
def get_random_batch():
return [get_random_sample() for _ in range(rng.randint(1, max_batch_size))]
input_cases = [(input_layout, [get_random_batch() for _ in range(num_batches)])]
def random_angle(sample_desc):
return np.array(sample_desc.rng.uniform(-180., 180.), dtype=np.float32)
def random_axis(sample_desc):
return np.array([sample_desc.rng.uniform(-1, 1) for _ in range(3)], dtype=np.float32)
test_cases = [
(dali.fn.rotate, {'angle': 45., 'axis': np.array([1, 0, 0], dtype=np.float32)}, []),
(dali.fn.rotate, {'size': (50, 30, 20)}, [ArgCb("angle", random_angle, True), ArgCb("axis", random_axis, True)]),
(dali.fn.rotate, {}, RotatePerFrameParamsProvider([ArgCb("angle", random_angle, True), ArgCb("axis", random_axis, True)])),
]
yield from sequence_suite_helper(rng, "F", input_cases, test_cases)
| [
"numpy.clip",
"numpy.random.default_rng",
"math.cos",
"numpy.array",
"nvidia.dali.ops.readers.Caffe",
"numpy.linalg.norm",
"sequences_test_utils.ArgDesc",
"random.Random",
"numpy.max",
"math.sin",
"numpy.matmul",
"test_utils.compare_pipelines",
"sequences_test_utils.get_video_input_cases",
... | [((1055, 1097), 'os.path.join', 'os.path.join', (['test_data_root', '"""db"""', '"""lmdb"""'], {}), "(test_data_root, 'db', 'lmdb')\n", (1067, 1097), False, 'import os\n'), ((1829, 1849), 'numpy.linalg.norm', 'np.linalg.norm', (['axis'], {}), '(axis)\n', (1843, 1849), True, 'import numpy as np\n'), ((1919, 1934), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1927, 1934), False, 'import math\n'), ((1944, 1959), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1952, 1959), False, 'import math\n'), ((1969, 2302), 'numpy.array', 'np.array', (['[[u * u + (v * v + w * w) * cosa, u * v * (1 - cosa) - w * sina, u * w * (1 -\n cosa) + v * sina], [u * v * (1 - cosa) + w * sina, v * v + (u * u + w *\n w) * cosa, v * w * (1 - cosa) - u * sina], [u * w * (1 - cosa) - v *\n sina, v * w * (1 - cosa) + u * sina, w * w + (u * u + v * v) * cosa]]'], {'dtype': 'np.float32'}), '([[u * u + (v * v + w * w) * cosa, u * v * (1 - cosa) - w * sina, u *\n w * (1 - cosa) + v * sina], [u * v * (1 - cosa) + w * sina, v * v + (u *\n u + w * w) * cosa, v * w * (1 - cosa) - u * sina], [u * w * (1 - cosa) -\n v * sina, v * w * (1 - cosa) + u * sina, w * w + (u * u + v * v) * cosa\n ]], dtype=np.float32)\n', (1977, 2302), True, 'import numpy as np\n'), ((2381, 2424), 'numpy.array', 'np.array', (['input_size[2::-1]'], {'dtype': 'np.int32'}), '(input_size[2::-1], dtype=np.int32)\n', (2389, 2424), True, 'import numpy as np\n'), ((2718, 2733), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (2726, 2733), False, 'import math\n'), ((2745, 2760), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (2753, 2760), False, 'import math\n'), ((2844, 2909), 'numpy.array', 'np.array', (['[[1, 0, -out_w * 0.5], [0, 1, -out_h * 0.5], [0, 0, 1]]'], {}), '([[1, 0, -out_w * 0.5], [0, 1, -out_h * 0.5], [0, 0, 1]])\n', (2852, 2909), True, 'import numpy as np\n'), ((2939, 2995), 'numpy.array', 'np.array', (['[[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]]'], {}), '([[cosa, -sina, 0], [sina, cosa, 0], [0, 0, 1]])\n', (2947, 2995), True, 'import numpy as np\n'), ((3030, 3091), 'numpy.array', 'np.array', (['[[1, 0, in_w * 0.5], [0, 1, in_h * 0.5], [0, 0, 1]]'], {}), '([[1, 0, in_w * 0.5], [0, 1, in_h * 0.5], [0, 0, 1]])\n', (3038, 3091), True, 'import numpy as np\n'), ((6502, 6557), 'test_utils.compare_pipelines', 'compare_pipelines', (['pipe1', 'pipe2', 'batch_size', 'niter', 'eps'], {}), '(pipe1, pipe2, batch_size, niter, eps)\n', (6519, 6557), False, 'from test_utils import compare_pipelines\n'), ((8445, 8481), 'numpy.max', 'np.max', (['no_correction_shapes'], {'axis': '(0)'}), '(no_correction_shapes, axis=0)\n', (8451, 8481), True, 'import numpy as np\n'), ((11767, 11784), 'random.Random', 'random.Random', (['(42)'], {}), '(42)\n', (11780, 11784), False, 'import random\n'), ((11803, 11862), 'sequences_test_utils.get_video_input_cases', 'get_video_input_cases', (['"""FHWC"""', 'rng'], {'larger_shape': '(512, 287)'}), "('FHWC', rng, larger_shape=(512, 287))\n", (11824, 11862), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((12046, 12063), 'random.Random', 'random.Random', (['(42)'], {}), '(42)\n', (12059, 12063), False, 'import random\n'), ((12161, 12186), 'numpy.random.default_rng', 'np.random.default_rng', (['(42)'], {}), '(42)\n', (12182, 12186), True, 'import numpy as np\n'), ((1179, 1194), 'math.cos', 'math.cos', (['angle'], {}), '(angle)\n', (1187, 1194), False, 'import math\n'), ((1211, 1226), 'math.sin', 'math.sin', (['angle'], {}), '(angle)\n', (1219, 1226), False, 'import math\n'), ((1288, 1324), 'math.ceil', 'math.ceil', (['(w * cosa + h * sina - eps)'], {}), '(w * cosa + h * sina - eps)\n', (1297, 1324), False, 'import math\n'), ((1338, 1374), 'math.ceil', 'math.ceil', (['(h * cosa + w * sina - eps)'], {}), '(h * cosa + w * sina - eps)\n', (1347, 1374), False, 'import math\n'), ((1782, 1814), 'numpy.eye', 'np.eye', (['(3, 3)'], {'dtype': 'np.float32'}), '((3, 3), dtype=np.float32)\n', (1788, 1814), True, 'import numpy as np\n'), ((2538, 2566), 'numpy.argmax', 'np.argmax', (['transform'], {'axis': '(1)'}), '(transform, axis=1)\n', (2547, 2566), True, 'import numpy as np\n'), ((3222, 3251), 'numpy.array', 'np.array', (['[[0.5], [0.5], [1]]'], {}), '([[0.5], [0.5], [1]])\n', (3230, 3251), True, 'import numpy as np\n'), ((3480, 3499), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (3492, 3499), False, 'import math\n'), ((3831, 3980), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'matrix', 'out_size_wh'], {'borderMode': 'cv2.BORDER_CONSTANT', 'borderValue': '[42, 42, 42]', 'flags': '(cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP)'}), '(img, matrix, out_size_wh, borderMode=cv2.BORDER_CONSTANT,\n borderValue=[42, 42, 42], flags=cv2.INTER_LINEAR | cv2.WARP_INVERSE_MAP)\n', (3845, 3980), False, 'import cv2\n'), ((4477, 4562), 'nvidia.dali.ops.readers.Caffe', 'ops.readers.Caffe', ([], {'path': 'caffe_db_folder', 'shard_id': 'device_id', 'num_shards': 'num_gpus'}), '(path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus\n )\n', (4494, 4562), True, 'import nvidia.dali.ops as ops\n'), ((4586, 4641), 'nvidia.dali.ops.decoders.Image', 'ops.decoders.Image', ([], {'device': '"""cpu"""', 'output_type': 'types.RGB'}), "(device='cpu', output_type=types.RGB)\n", (4604, 4641), True, 'import nvidia.dali.ops as ops\n'), ((4822, 4872), 'nvidia.dali.ops.random.Uniform', 'ops.random.Uniform', ([], {'range': '(-180.0, 180.0)', 'seed': '(42)'}), '(range=(-180.0, 180.0), seed=42)\n', (4840, 4872), True, 'import nvidia.dali.ops as ops\n'), ((4899, 4975), 'nvidia.dali.ops.Rotate', 'ops.Rotate', ([], {'device': 'device', 'size': 'fixed_size', 'fill_value': '(42)', 'dtype': 'output_type'}), '(device=device, size=fixed_size, fill_value=42, dtype=output_type)\n', (4909, 4975), True, 'import nvidia.dali.ops as ops\n'), ((5647, 5732), 'nvidia.dali.ops.readers.Caffe', 'ops.readers.Caffe', ([], {'path': 'caffe_db_folder', 'shard_id': 'device_id', 'num_shards': 'num_gpus'}), '(path=caffe_db_folder, shard_id=device_id, num_shards=num_gpus\n )\n', (5664, 5732), True, 'import nvidia.dali.ops as ops\n'), ((5756, 5811), 'nvidia.dali.ops.decoders.Image', 'ops.decoders.Image', ([], {'device': '"""cpu"""', 'output_type': 'types.RGB'}), "(device='cpu', output_type=types.RGB)\n", (5774, 5811), True, 'import nvidia.dali.ops as ops\n'), ((5999, 6049), 'nvidia.dali.ops.random.Uniform', 'ops.random.Uniform', ([], {'range': '(-180.0, 180.0)', 'seed': '(42)'}), '(range=(-180.0, 180.0), seed=42)\n', (6017, 6049), True, 'import nvidia.dali.ops as ops\n'), ((11949, 12011), 'sequences_test_utils.sequence_suite_helper', 'sequence_suite_helper', (['rng', '"""F"""', 'input_cases', 'video_test_cases'], {}), "(rng, 'F', input_cases, video_test_cases)\n", (11970, 12011), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((13186, 13242), 'sequences_test_utils.sequence_suite_helper', 'sequence_suite_helper', (['rng', '"""F"""', 'input_cases', 'test_cases'], {}), "(rng, 'F', input_cases, test_cases)\n", (13207, 13242), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((3140, 3156), 'numpy.matmul', 'np.matmul', (['r', 't1'], {}), '(r, t1)\n', (3149, 3156), True, 'import numpy as np\n'), ((3760, 3775), 'numpy.float32', 'np.float32', (['img'], {}), '(img)\n', (3770, 3775), True, 'import numpy as np\n'), ((4711, 4752), 'nvidia.dali.ops.Cast', 'ops.Cast', ([], {'device': 'device', 'dtype': 'input_type'}), '(device=device, dtype=input_type)\n', (4719, 4752), True, 'import nvidia.dali.ops as ops\n'), ((8500, 8542), 'numpy.array', 'np.array', (['corrected_shapes'], {'dtype': 'np.int32'}), '(corrected_shapes, dtype=np.int32)\n', (8508, 8542), True, 'import numpy as np\n'), ((2455, 2484), 'numpy.matmul', 'np.matmul', (['transform', 'in_size'], {}), '(transform, in_size)\n', (2464, 2484), True, 'import numpy as np\n'), ((4083, 4103), 'numpy.clip', 'np.clip', (['out', '(0)', '(255)'], {}), '(out, 0, 255)\n', (4090, 4103), True, 'import numpy as np\n'), ((10709, 10738), 'sequences_test_utils.ArgDesc', 'ArgDesc', (['"""size"""', '(False)', '"""cpu"""'], {}), "('size', False, 'cpu')\n", (10716, 10738), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((11344, 11378), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'small_angle', '(False)'], {}), "('angle', small_angle, False)\n", (11349, 11378), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((11412, 11447), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'random_angle', '(False)'], {}), "('angle', random_angle, False)\n", (11417, 11447), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((11676, 11709), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'small_angle', '(True)'], {}), "('angle', small_angle, True)\n", (11681, 11709), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((11711, 11746), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""size"""', 'random_output', '(False)'], {}), "('size', random_output, False)\n", (11716, 11746), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((12878, 12915), 'numpy.array', 'np.array', (['[1, 0, 0]'], {'dtype': 'np.float32'}), '([1, 0, 0], dtype=np.float32)\n', (12886, 12915), True, 'import numpy as np\n'), ((12969, 13003), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'random_angle', '(True)'], {}), "('angle', random_angle, True)\n", (12974, 13003), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((13005, 13037), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""axis"""', 'random_axis', '(True)'], {}), "('axis', random_axis, True)\n", (13010, 13037), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((7777, 7796), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (7789, 7796), False, 'import math\n'), ((7942, 7961), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (7954, 7961), False, 'import math\n'), ((8121, 8140), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (8133, 8140), False, 'import math\n'), ((8307, 8326), 'math.radians', 'math.radians', (['angle'], {}), '(angle)\n', (8319, 8326), False, 'import math\n'), ((11510, 11543), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'small_angle', '(True)'], {}), "('angle', small_angle, True)\n", (11515, 11543), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((11607, 11641), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'random_angle', '(True)'], {}), "('angle', random_angle, True)\n", (11612, 11641), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((13096, 13130), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""angle"""', 'random_angle', '(True)'], {}), "('angle', random_angle, True)\n", (13101, 13130), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n'), ((13132, 13164), 'sequences_test_utils.ArgCb', 'ArgCb', (['"""axis"""', 'random_axis', '(True)'], {}), "('axis', random_axis, True)\n", (13137, 13164), False, 'from sequences_test_utils import ArgData, ArgDesc, get_video_input_cases, ParamsProvider, sequence_suite_helper, ArgCb\n')] |
#%%
import os
print(os.getcwd())
from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD
from dataset_utils import load_mnist
import numpy as np
from convolution_layer import ConvLayer
from maxpool_layer import MaxPool2x2
from flatten_layer import FlattenLayer
import sys
def iterate_minibatches(x, y, batch_size=16, verbose=True):
assert len(x) == len(y)
indices = np.arange(len(x))
np.random.shuffle(indices)
for i, start_idx in enumerate(range(0, len(x) - batch_size + 1, batch_size)):
if verbose:
print('\rBatch: {}/{}'.format(i + 1, len(x) // batch_size), end='')
sys.stdout.flush()
excerpt = indices[start_idx:start_idx + batch_size]
yield x[excerpt], y[excerpt]
def get_cnn():
nn = SequentialNN()
nn.add(ConvLayer(1, 2, filter_size=3)) # The output is of size N_obj 2 28 28
nn.add(ReLU()) # The output is of size N_obj 2 28 28
nn.add(MaxPool2x2()) # The output is of size N_obj 2 14 14
nn.add(ConvLayer(2, 4, filter_size=3)) # The output is of size N_obj 4 14 14
nn.add(ReLU()) # The output is of size N_obj 4 14 14
nn.add(MaxPool2x2()) # The output is of size N_obj 4 7 7
nn.add(FlattenLayer()) # The output is of size N_obj 196
nn.add(Dense(4 * 7 * 7, 32))
nn.add(ReLU())
nn.add(Dense(32, 1))
return nn
nn = get_cnn()
loss = Hinge()
optimizer = SGD(nn)
train = list(load_mnist(dataset='training', path='.'))
train_images = np.array([im[1] for im in train])
train_targets = np.array([im[0] for im in train])
# We will train a 0 vs. 1 classifier
x_train = train_images[train_targets < 2][:1000]
y_train = train_targets[train_targets < 2][:1000]
y_train = y_train * 2 - 1
y_train = y_train.reshape((-1, 1))
x_train = x_train.astype('float32') / 255.0
x_train = x_train.reshape((-1, 1, 28, 28))
# It will train for about 5 minutes
num_epochs = 3
batch_size = 32
# We will store the results here
history = {'loss': [], 'accuracy': []}
# `num_epochs` represents the number of iterations
for epoch in range(num_epochs):
print('Epoch {}/{}'.format(epoch + 1, num_epochs))
# We perform iteration a one-by-one iteration of the mini-batches
for x_batch, y_batch in iterate_minibatches(x_train, y_train, batch_size):
# Predict the target value
y_pred = nn.forward(x_batch)
# Compute the gradient of the loss
loss_grad = loss.backward(y_pred, y_batch)
# Perform backwards pass
nn.backward(x_batch, loss_grad)
# Update the params
optimizer.update_params()
# Save loss and accuracy values
history['loss'].append(loss.forward(y_pred, y_batch))
prediction_is_correct = (y_pred > 0) == (y_batch > 0)
history['accuracy'].append(np.mean(prediction_is_correct))
print()
#%%
import matplotlib.pyplot as plt
# Let's plot the results to get a better insight
plt.figure(figsize=(8, 5))
ax_1 = plt.subplot()
ax_1.plot(history['loss'], c='g', lw=2, label='train loss')
ax_1.set_ylabel('loss', fontsize=16)
ax_1.set_xlabel('#batches', fontsize=16)
ax_2 = plt.twinx(ax_1)
ax_2.plot(history['accuracy'], lw=3, label='train accuracy')
ax_2.set_ylabel('accuracy', fontsize=16)
| [
"flatten_layer.FlattenLayer",
"numpy.mean",
"convolution_layer.ConvLayer",
"Blocks.SGD",
"Blocks.SequentialNN",
"maxpool_layer.MaxPool2x2",
"matplotlib.pyplot.twinx",
"os.getcwd",
"numpy.array",
"matplotlib.pyplot.figure",
"Blocks.Dense",
"Blocks.Hinge",
"Blocks.ReLU",
"sys.stdout.flush",
... | [((1357, 1364), 'Blocks.Hinge', 'Hinge', ([], {}), '()\n', (1362, 1364), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1377, 1384), 'Blocks.SGD', 'SGD', (['nn'], {}), '(nn)\n', (1380, 1384), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1456, 1489), 'numpy.array', 'np.array', (['[im[1] for im in train]'], {}), '([im[1] for im in train])\n', (1464, 1489), True, 'import numpy as np\n'), ((1506, 1539), 'numpy.array', 'np.array', (['[im[0] for im in train]'], {}), '([im[0] for im in train])\n', (1514, 1539), True, 'import numpy as np\n'), ((2887, 2913), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 5)'}), '(figsize=(8, 5))\n', (2897, 2913), True, 'import matplotlib.pyplot as plt\n'), ((2922, 2935), 'matplotlib.pyplot.subplot', 'plt.subplot', ([], {}), '()\n', (2933, 2935), True, 'import matplotlib.pyplot as plt\n'), ((3082, 3097), 'matplotlib.pyplot.twinx', 'plt.twinx', (['ax_1'], {}), '(ax_1)\n', (3091, 3097), True, 'import matplotlib.pyplot as plt\n'), ((20, 31), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (29, 31), False, 'import os\n'), ((400, 426), 'numpy.random.shuffle', 'np.random.shuffle', (['indices'], {}), '(indices)\n', (417, 426), True, 'import numpy as np\n'), ((764, 778), 'Blocks.SequentialNN', 'SequentialNN', ([], {}), '()\n', (776, 778), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1399, 1439), 'dataset_utils.load_mnist', 'load_mnist', ([], {'dataset': '"""training"""', 'path': '"""."""'}), "(dataset='training', path='.')\n", (1409, 1439), False, 'from dataset_utils import load_mnist\n'), ((791, 821), 'convolution_layer.ConvLayer', 'ConvLayer', (['(1)', '(2)'], {'filter_size': '(3)'}), '(1, 2, filter_size=3)\n', (800, 821), False, 'from convolution_layer import ConvLayer\n'), ((872, 878), 'Blocks.ReLU', 'ReLU', ([], {}), '()\n', (876, 878), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((929, 941), 'maxpool_layer.MaxPool2x2', 'MaxPool2x2', ([], {}), '()\n', (939, 941), False, 'from maxpool_layer import MaxPool2x2\n'), ((993, 1023), 'convolution_layer.ConvLayer', 'ConvLayer', (['(2)', '(4)'], {'filter_size': '(3)'}), '(2, 4, filter_size=3)\n', (1002, 1023), False, 'from convolution_layer import ConvLayer\n'), ((1074, 1080), 'Blocks.ReLU', 'ReLU', ([], {}), '()\n', (1078, 1080), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1131, 1143), 'maxpool_layer.MaxPool2x2', 'MaxPool2x2', ([], {}), '()\n', (1141, 1143), False, 'from maxpool_layer import MaxPool2x2\n'), ((1193, 1207), 'flatten_layer.FlattenLayer', 'FlattenLayer', ([], {}), '()\n', (1205, 1207), False, 'from flatten_layer import FlattenLayer\n'), ((1254, 1274), 'Blocks.Dense', 'Dense', (['(4 * 7 * 7)', '(32)'], {}), '(4 * 7 * 7, 32)\n', (1259, 1274), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1287, 1293), 'Blocks.ReLU', 'ReLU', ([], {}), '()\n', (1291, 1293), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((1306, 1318), 'Blocks.Dense', 'Dense', (['(32)', '(1)'], {}), '(32, 1)\n', (1311, 1318), False, 'from Blocks import ReLU, SequentialNN, Dense, Hinge, SGD\n'), ((622, 640), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (638, 640), False, 'import sys\n'), ((2756, 2786), 'numpy.mean', 'np.mean', (['prediction_is_correct'], {}), '(prediction_is_correct)\n', (2763, 2786), True, 'import numpy as np\n')] |
# Normal-exponential using out-of-band probes
# normex: negative control probes
# noob: ‘out-of-band’ Infinium I probes
# Lib
import logging
import numpy as np
import pandas as pd
from statsmodels import robust
from scipy.stats import norm, lognorm
# App
from ..models import ControlType, ArrayType
from ..models.sketchy_probes import qualityMask450, qualityMaskEPIC, qualityMaskEPICPLUS, qualityMaskmouse
__all__ = ['preprocess_noob']
LOGGER = logging.getLogger(__name__)
def preprocess_noob(container, offset=15, pval_probes_df=None, quality_mask_df=None, nonlinear_dye_correction=True, debug=False, unit_test_oob=False): # v1.4.5+
""" NOOB pythonized copy of https://github.com/zwdzwd/sesame/blob/master/R/background_correction.R
- The function takes a SigSet and returns a modified SigSet with the background subtracted.
- Background is modelled in a normal distribution and true signal in an exponential distribution.
- The Norm-Exp deconvolution is parameterized using Out-Of-Band (oob) probes.
- includes snps, but not control probes yet
- output should replace the container instead of returning debug dataframes
- II RED and II GREEN both have data, but manifest doesn't have a way to track this, so function tracks it.
- keep IlmnID as index for meth/unmeth snps, and convert fg_green
if nonlinear_dye_correction=True, this uses a sesame method in place of minfi method, in a later step.
if unit_test_oob==True, returns the intermediate data instead of updating the SigSet/SampleDataContainer.
"""
if debug:
print(f"DEBUG NOOB {debug} nonlinear_dye_correction={nonlinear_dye_correction}, pval_probes_df={pval_probes_df.shape if isinstance(pval_probes_df,pd.DataFrame) else 'None'}, quality_mask_df={quality_mask_df.shape if isinstance(quality_mask_df,pd.DataFrame) else 'None'}")
# stack- need one long list of values, regardless of Meth/Uneth
ibG = pd.concat([
container.ibG.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'),
container.ibG.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U')
])
ibG = ibG[ ~ibG['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
ibR = pd.concat([
container.ibR.reset_index().rename(columns={'Meth': 'mean_value'}).assign(used='M'), #.drop(columns=['Meth','Unmeth']),
container.ibR.reset_index().rename(columns={'Unmeth': 'mean_value'}).assign(used='U') #.drop(columns=['Meth','Unmeth'])
])
ibR = ibR[ ~ibR['mean_value'].isna() ].drop(columns=['Meth','Unmeth'])
# out-of-band is Green-Unmeth and Red-Meth
# exclude failing probes
pval = pval_probes_df.loc[ pval_probes_df['poobah_pval'] > container.poobah_sig ].index if isinstance(pval_probes_df, pd.DataFrame) else []
qmask = quality_mask_df.loc[ quality_mask_df['quality_mask'] == 0 ].index if isinstance(quality_mask_df, pd.DataFrame) else []
# the ignored errors here should only be from probes that are both pval failures and qmask failures.
Rmeth = list(container.oobR['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Runmeth = list(container.oobR['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobR = pd.DataFrame( Rmeth + Runmeth, columns=['mean_value'])
Gmeth = list(container.oobG['Meth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
Gunmeth = list(container.oobG['Unmeth'].drop(index=pval, errors='ignore').drop(index=qmask, errors='ignore'))
oobG = pd.DataFrame( Gmeth + Gunmeth, columns=['mean_value'])
# minfi test
# ref fg_green = 442614 | vs ibG 442672 = 396374 + 46240
# ref fg_red = 528410 | vs ibR 528482 = 439279 + 89131
# ref oob_green = 178374
# ref oob_red = 92578
#oobR = pd.DataFrame( data={'mean_value': container.oobR['Meth']})
#oobG = pd.DataFrame( data={'mean_value': container.oobG['Unmeth']})
#print(f" oobR {oobR.shape} oobG {oobG.shape}")
#import pdb;pdb.set_trace()
debug_warnings = ""
if oobR['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobR = oobR.dropna()
if oobG['mean_value'].isna().sum() > 0:
debug_warnings += f" NOOB: oobG had {oobG['mean_value'].isna().sum()} NaNs"
oobG = oobG.dropna()
if ibG['mean_value'].isna().sum() > 0 or ibR['mean_value'].isna().sum() > 0:
raise ValueError("ibG or ibR is missing probe intensities. need to filter them out.")
if debug:
print(f"ibG {len(ibG)} ibR {len(ibR)} oobG {len(oobG)} oobR {len(oobR)} | {debug_warnings}")
# set minimum intensity to 1
ibG_affected = len(ibG.loc[ ibG['mean_value'] < 1 ].index)
ibR_affected = len(ibR.loc[ ibR['mean_value'] < 1 ].index)
ibG.loc[ ibG['mean_value'] < 1, 'mean_value'] = 1
ibR.loc[ ibR['mean_value'] < 1, 'mean_value'] = 1
oobG_affected = len(oobG[ oobG['mean_value'] < 1])
oobR_affected = len(oobR[ oobR['mean_value'] < 1])
oobG.loc[ oobG.mean_value < 1, 'mean_value'] = 1
oobR.loc[ oobR.mean_value < 1, 'mean_value'] = 1
if debug:
if ibR_affected > 0 or ibR_affected > 0:
print(f"ib: Set {ibR_affected} red and {ibG_affected} green to 1.0 ({len(ibR[ ibR['mean_value'] == 1 ].index)}, {len(ibG[ ibG['mean_value'] == 1 ].index)})")
if oobG_affected > 0 or oobR_affected > 0:
print(f"oob: Set {oobR_affected} red and {oobG_affected} green to 1.0 ({len(oobR[ oobR['mean_value'] == 1 ].index)}, {len(oobG[ oobG['mean_value'] == 1 ].index)})")
# do background correction in each channel; returns "normalized in-band signal"
ibG_nl, params_green = normexp_bg_corrected(ibG, oobG, offset, sample_name=container.sample.name)
ibR_nl, params_red = normexp_bg_corrected(ibR, oobR, offset, sample_name=container.sample.name)
noob_green = ibG_nl.round({'bg_corrected':0})
noob_red = ibR_nl.round({'bg_corrected':0})
if unit_test_oob:
return {
'oobR': oobR,
'oobG': oobG,
'noob_green': noob_green,
'noob_red': noob_red,
}
# by default, this last step is omitted for sesame
if nonlinear_dye_correction == True:
# update() expects noob_red/green to have IlmnIDs in index, and contain bg_corrected for ALL probes.
container.update_probe_means(noob_green, noob_red)
elif nonlinear_dye_correction == False:
# this "linear" method may be anologous to the ratio quantile normalization described in Nature: https://www.nature.com/articles/s41598-020-72664-6
normexp_bg_correct_control(container.ctrl_green, params_green)
normexp_bg_correct_control(container.ctrl_red, params_red)
mask_green = container.ctrl_green['Control_Type'].isin(ControlType.normalization_green())
mask_red = container.ctrl_red['Control_Type'].isin(ControlType.normalization_red())
avg_green = container.ctrl_green[mask_green]['bg_corrected'].mean()
avg_red = container.ctrl_red[mask_red]['bg_corrected'].mean()
rg_ratios = avg_red / avg_green
red_factor = 1 / rg_ratios
container.update_probe_means(noob_green, noob_red, red_factor)
container._SigSet__minfi_noob = True
elif nonlinear_dye_correction is None:
if debug:
LOGGER.info("skipping linear/nonlinear dye-bias correction step")
# skips the minfi-linear step and won't trigger the sesame nonlinear dye bias step downstream, if you REALLY want it uncorrected. Mostly for debugging / benchmarking.
container.update_probe_means(noob_green, noob_red)
class BackgroundCorrectionParams():
""" used in apply_bg_correction """
__slots__ = (
'bg_mean',
'bg_mad',
'mean_signal',
'offset',
)
def __init__(self, bg_mean, bg_mad, mean_signal, offset):
# note: default offset was 15. In v1.3.3 (Jan 2020) I kept 15, after finding this made results match sesame's NOOB output exactly, if dye step ommitted.
# offset is specified in the preprocess_noob() function.
self.bg_mean = bg_mean
self.bg_mad = bg_mad
self.mean_signal = mean_signal
self.offset = offset
def normexp_bg_corrected(fg_probes, ctrl_probes, offset, sample_name=None):
""" analogous to sesame's backgroundCorrectionNoobCh1 """
fg_means = fg_probes['mean_value']
if fg_means.min() == fg_means.max():
LOGGER.error(f"{sample_name}: min and max intensity are same. Sample probably bad.")
params = BackgroundCorrectionParams(bg_mean=1.0, bg_mad=1.0, mean_signal=1.0, offset=15)
fg_probes['bg_corrected'] = 1.0
return fg_probes, params
fg_mean, _fg_mad = huber(fg_means)
bg_mean, bg_mad = huber(ctrl_probes['mean_value'])
mean_signal = np.maximum(fg_mean - bg_mean, 10) # "alpha" in sesame function
params = BackgroundCorrectionParams(bg_mean, bg_mad, mean_signal, offset)
corrected_signals = apply_bg_correction(fg_means, params)
fg_probes['bg_corrected'] = corrected_signals
fg_probes['bg_corrected'] = fg_probes['bg_corrected'].round(1)
return fg_probes, params
def normexp_bg_correct_control(control_probes, params):
"""Function for getting xcs controls for preprocessNoob"""
control_means = control_probes['mean_value']
corrected_signals = apply_bg_correction(control_means, params)
control_probes['bg_corrected'] = corrected_signals
return control_probes
def apply_bg_correction(mean_values, params):
""" this function won't work with float16 in practice (underflow). limits use to float32 """
if not isinstance(params, BackgroundCorrectionParams):
raise ValueError('params is not a BackgroundCorrectionParams instance')
np.seterr(under='ignore') # 'raise to explore fixing underflow warning here'
bg_mean = params.bg_mean #mu
bg_mad = params.bg_mad #sigma
mean_signal = params.mean_signal #alpha
offset = params.offset
mu_sf = mean_values - bg_mean - (bg_mad ** 2) / mean_signal
#try:
# signal_part_one = mu_sf + (bg_mad ** 2)
# signal_part_two = np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
# signal = signal_part_one * signal_part_two
#except:
# print(signal_part_one, norm(mu_sf, bg_mad).logpdf(0), norm(mu_sf, bg_mad).logsf(0))
# norm is from scipy.stats
signal = mu_sf + (bg_mad ** 2) * np.exp(norm(mu_sf, bg_mad).logpdf(0) - norm(mu_sf, bg_mad).logsf(0))
""" COMPARE with sesame:
signal <- mu.sf + sigma2 * exp(
dnorm(0, mean = mu.sf, sd = sigma, log = TRUE) -
pnorm(
0, mean = mu.sf, sd = sigma,
lower.tail = FALSE, log.p = TRUE))
"""
# sesame: "Limit of numerical accuracy reached with very low intensity or very high background:
# setting adjusted intensities to small value"
signal = np.maximum(signal, 1e-6)
true_signal = signal + offset
return true_signal
def huber(vector):
"""Huber function. Designed to mirror MASS huber function in R
Parameters
----------
vector: list
list of float values
Returns
-------
local_median: float
calculated mu value
mad_scale: float
calculated s value
"""
num_values = len(vector)
positive_factor = 1.5
convergence_tol = 1.0e-6
mad_scale = robust.mad(vector)
local_median = np.median(vector)
init_local_median = local_median
if not (local_median or mad_scale):
return local_median, mad_scale
while True:
yy = np.minimum(
np.maximum(
local_median - positive_factor * mad_scale,
vector,
),
local_median + positive_factor * mad_scale,
)
init_local_median = sum(yy) / num_values
if abs(local_median - init_local_median) < convergence_tol * mad_scale:
return local_median, mad_scale
local_median = init_local_median
def _apply_sesame_quality_mask(data_container):
""" adapted from sesame's qualityMask function, which is applied just after poobah
to remove probes Wanding thinks are sketchy.
OUTPUT: this pandas DataFrame will have NaNs for probes to be excluded and 0.0 for probes to be retained. NaNs converted to 1.0 in final processing output.
SESAME:
masked <- sesameDataGet(paste0(sset@platform, '.probeInfo'))$mask
to use TCGA masking, only applies to HM450
"""
if data_container.array_type not in (
# ArrayType.ILLUMINA_27K,
ArrayType.ILLUMINA_450K,
ArrayType.ILLUMINA_EPIC,
ArrayType.ILLUMINA_EPIC_PLUS,
ArrayType.ILLUMINA_MOUSE):
LOGGER.info(f"Quality masking is not supported for {data_container.array_type}.")
return
# load set of probes to remove from local file
if data_container.array_type == ArrayType.ILLUMINA_450K:
probes = qualityMask450
elif data_container.array_type == ArrayType.ILLUMINA_EPIC:
probes = qualityMaskEPIC
elif data_container.array_type == ArrayType.ILLUMINA_EPIC_PLUS:
# this is a bit of a hack; probe names don't match epic, so I'm temporarily renaming, then filtering, then reverting.
probes = qualityMaskEPICPLUS
elif data_container.array_type == ArrayType.ILLUMINA_MOUSE:
probes = qualityMaskmouse
# v1.6+: the 1.0s are good probes and the 0.0 are probes to be excluded.
cgs = pd.DataFrame( np.zeros((len(data_container.man.index), 1)), index=data_container.man.index, columns=['quality_mask'])
cgs['quality_mask'] = 1.0
snps = pd.DataFrame( np.zeros((len(data_container.snp_man.index), 1)), index=data_container.snp_man.index, columns=['quality_mask'])
snps['quality_mask'] = 1.0
df = pd.concat([cgs, snps])
df.loc[df.index.isin(probes), 'quality_mask'] = 0
#LOGGER.info(f"DEBUG quality_mask: {df.shape}, {df['quality_mask'].value_counts()} from {probes.shape} probes")
return df
""" ##### DEPRECATED (<v1.5.0) #####
def _old_reprocess_noob_sesame_v144(container, offset=15, debug=False):
''' NOOB pythonized copy of https://github.com/zwdzwd/sesame/blob/master/R/background_correction.R
- The function takes a SigSet and returns a modified SigSet with that background subtracted.
- Background is modelled in a normal distribution and true signal in an exponential distribution.
- The Norm-Exp deconvolution is parameterized using Out-Of-Band (oob) probes.
- includes snps, but not control probes yet
- output should replace the container instead of returning debug dataframes
- II RED and II GREEN both have data, but manifest doesn't have a way to track this, so function tracks it.
'''
# get in-band red and green channel probe means
#ibR <- c(IR(sset), II(sset)[,'U']) # in-band red signal = IR_meth + IR_unmeth + II[unmeth]
#ibG <- c(IG(sset), II(sset)[,'M']) # in-band green signal = IG_meth + IG_unmeth + II[meth]
# cols: mean_value, IlmnID, probe_type (I,II); index: illumina_id
#CHECKED: AddressA or AddressB for each probe subtype matches probes.py
raw = container.snp_methylated.data_frame
snp_IR_meth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Red')][['mean_value','AddressB_ID']]
.reset_index().rename(columns={'AddressB_ID':'illumina_id'}).set_index('illumina_id'))
snp_IR_meth['Channel'] = 'Red'
snp_IG_meth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Grn')][['mean_value','AddressB_ID']]
.reset_index().rename(columns={'AddressB_ID':'illumina_id'}).set_index('illumina_id'))
snp_IG_meth['Channel'] = 'Grn'
snp_II_meth = (raw[(raw['Infinium_Design_Type'] == 'II')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_II_meth['Channel'] = 'Grn'
raw = container.snp_unmethylated.data_frame
snp_IR_unmeth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Red')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_IR_unmeth['Channel'] = 'Red'
snp_IG_unmeth = (raw[(raw['Infinium_Design_Type'] == 'I') & (raw['Color_Channel'] == 'Grn')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_IG_unmeth['Channel'] = 'Grn'
snp_II_unmeth = (raw[(raw['Infinium_Design_Type'] == 'II')][['mean_value','AddressA_ID']]
.reset_index().rename(columns={'AddressA_ID':'illumina_id'}).set_index('illumina_id'))
snp_II_unmeth['Channel'] = 'Red'
if debug:
print('snp probes:', snp_IR_meth.shape, snp_IG_unmeth.shape, snp_II_meth.shape, snp_II_unmeth.shape)
#--> copy over snps, but first get snps with illumina_id in index
# swap index on all snps from IlmnID to illumina_id
## note: 350076 II + 89203 IR + 46298 IG = 485577 (including rs probes, but excl controls)
ibG = container.fg_green # --> self.raw_dataset.get_fg_values(self.manifest, Channel.GREEN)
ibG['Channel'] = 'Grn'
ibG.index.name = 'illumina_id'
ibR = container.fg_red # --> self.raw_dataset.get_fg_values(self.manifest, Channel.RED)
ibR['Channel'] = 'Red'
ibR.index.name = 'illumina_id'
# to match sesame, extra probes are IR_unmeth and IG_unmeth in ibR red and ibG green, respectively.
ibG = pd.concat([ibG,
snp_IG_meth,
snp_IG_unmeth,
snp_II_meth
], sort=True).drop('probe_type', axis=1)
# sort=True, because column order varies
ibR = pd.concat([ibR,
snp_IR_meth,
snp_IR_unmeth,
snp_II_unmeth
], sort=True).drop('probe_type', axis=1)
if debug:
print('in-bound Green:', ibG.shape) # green IG is AddressB, (meth) according to PROBE_SUBSETS
print('in-bound Red:', ibR.shape) # red IR is AddressA (unmeth) according to PROBE_SUBSETS
### at this point, ibG ibR probe counts match sesame EXACTLY
# set minimum intensity to 1
ibR_affected = len(ibR.loc[ ibR['mean_value'] < 1 ].index)
ibG_affected = len(ibG.loc[ ibG['mean_value'] < 1 ].index)
ibR.loc[ ibR['mean_value'] < 1, 'mean_value'] = 1
ibG.loc[ ibG['mean_value'] < 1, 'mean_value'] = 1
if debug:
print(f"IB: Set {ibR_affected} red and {ibG_affected} green to 1.0 ({len(ibR[ ibR['mean_value'] == 1 ].index)}, {len(ibG[ ibG['mean_value'] == 1 ].index)})")
red_dupes = len(ibR.index)-len(ibR.drop_duplicates().index)
grn_dupes = len(ibG.index)-len(ibG.drop_duplicates().index)
if debug and (red_dupes or grn_dupes):
print(f"duplicate probes: {red_dupes} red and {grn_dupes} green")
ref = container.manifest.data_frame # [['Infinium_Design_Type','Color_Channel']]
# using a copy .oobG and .oobR here; does not update the idat or other source data probe_means
# adopted from raw_dataset.filter_oob_probes here
oobR = (container.oobR.merge(container.manifest.data_frame[['AddressB_ID']],
how='left',
left_index=True,
right_index=True)
.reset_index()
.rename(columns={'AddressB_ID':'illumina_id', 'Unnamed: 0': 'IlmnID'})
.set_index('illumina_id')
)
oobR = pd.DataFrame(list(oobR['meth']) + list(oobR['unmeth']), columns=['mean_value'])
oobG = (container.oobG.merge(container.manifest.data_frame[['AddressA_ID']],
how='left',
left_index=True,
right_index=True)
.reset_index()
.rename(columns={'AddressA_ID':'illumina_id', 'Unnamed: 0': 'IlmnID'})
.set_index('illumina_id')
)
oobG = pd.DataFrame(list(oobG['meth']) + list(oobG['unmeth']), columns=['mean_value'])
oobG_affected = len(oobG[ oobG['mean_value'] < 1])
oobG.loc[ oobG.mean_value < 1, 'mean_value'] = 1
oobR_affected = len(oobR[ oobR['mean_value'] < 1])
oobR.loc[ oobR.mean_value < 1, 'mean_value'] = 1
# here: do bg_subtract AND normalization step here ...
## do background correction in each channel; returns "normalized in-band signal"
ibR_nl, params_red = normexp_bg_corrected(ibR, oobR, offset, sample_name=container.sample.name)
#<- .backgroundCorrectionNoobCh1(ibR, oobR(sset), ctl(sset)$R, getBackgroundR(sset, bgR), offset=offset)
ibG_nl, params_green = normexp_bg_corrected(ibG, oobG, offset, sample_name=container.sample.name)
# <- .backgroundCorrectionNoobCh1(ibG, oobG(sset), ctl(sset)$G, getBackgroundG(sset, bgG), offset=offset)
ibG_nl = ibG_nl.round({'bg_corrected':0})
ibR_nl = ibR_nl.round({'bg_corrected':0})
#print('ibG_nl', ibG_nl.shape)
#print('ibR_nl', ibR_nl.shape)
noob_green = ibG_nl
noob_red = ibR_nl
if debug:
print(f"OOB: Set {oobR_affected} red and {oobG_affected} green to 1.0; shapes: {oobG.shape}, {oobR.shape}")
print(f"noob_red with Grn: {len(noob_red[noob_red['Channel'] == 'Grn'])} noob_green with Red: {len(noob_green[noob_green['Channel'] == 'Red'])}")
ref_IG = ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']=='I')]
ref_IR = ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']=='I')]
ref_II = ref[ref['Infinium_Design_Type']=='II'] # II channel is NaN, but BOTH channels have data
print(f"from manifest: ref_IG {ref_IG.shape} ref_IR {ref_IR.shape} ref_II {ref_II.shape}")
# Combine and return red (IG + IR + II_unmeth) and green (IG + IR + II_meth)
# ibR_nl has IlmnID and illumina_id (index); ref has IlmnID as index
# ref_meth/ref_unmeth from probes.py
ref_meth = pd.concat([
ref[(ref['Color_Channel'].isna()) & (ref['Infinium_Design_Type']=='II')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']== 'I')]['AddressB_ID'].reset_index().rename(columns={'AddressB_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']== 'I')]['AddressB_ID'].reset_index().rename(columns={'AddressB_ID':'illumina_id'}),
]) #.set_index('illumina_id') # .drop('illumina_id', axis=1)
ref_unmeth = pd.concat([
ref[(ref['Color_Channel'].isna()) & (ref['Infinium_Design_Type']=='II')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Grn') & (ref['Infinium_Design_Type']== 'I')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
ref[(ref['Color_Channel']=='Red') & (ref['Infinium_Design_Type']== 'I')]['AddressA_ID'].reset_index().rename(columns={'AddressA_ID':'illumina_id'}),
]) #.set_index('illumina_id') # .drop('illumina_id', axis=1)
noob_meth_G = noob_green[noob_green.index.isin(ref_meth['illumina_id'])]
noob_unmeth_G = noob_green[noob_green.index.isin(ref_unmeth['illumina_id'])]
noob_meth_R = noob_red[noob_red.index.isin(ref_meth['illumina_id'])]
noob_unmeth_R = noob_red[noob_red.index.isin(ref_unmeth['illumina_id'])]
noob_meth_dupes = pd.concat([noob_meth_G, noob_meth_R])
noob_unmeth_dupes = pd.concat([noob_unmeth_G, noob_unmeth_R])
# CONFIRMED: this dedupe method below matches sesame's output exactly for noob_meth
noob_meth = (noob_meth_dupes[~noob_meth_dupes.index.duplicated(keep='first')]
.set_index('IlmnID')
.sort_index()
.rename(columns={'bg_corrected':'meth'})
)
# conveniently, the FIRST value of each duplicate probe appears to be the one we want for both meth/unmeth R/G channels
noob_unmeth = (noob_unmeth_dupes[~noob_unmeth_dupes.index.duplicated(keep='first')]
.set_index('IlmnID')
.sort_index()
.rename(columns={'bg_corrected':'unmeth'})
)
# update II, IG, IR, oobR, oobG, ctrl_red, ctrl_green
# --> --> probes.py subsets concatenate these:
# fg_green
# GREEN + AddressA + II
# GREEN + AddressA + IG
# GREEN + AddressB + IG
# oob_green
# RED + AddressA + IR
# fg_red
# RED + AddressA + II
# RED + AddressA + IR
# RED + AddressB + IR
# oob_red
# GREEN + AddressB + IG
#
# methylated
# GREEN + AddressA + II
# GREEN + AddressB + I
# RED + AddressB + I
# unmethylated
# RED + AddressA + II
# GREEN + AddressA + I
# RED + AddressA + I
# RETROFITTING BELOW -- may not work, as sesame works with noob_meth / noob_unmeth instead
try:
container.methylated.set_bg_corrected(noob_green, noob_red)
container.unmethylated.set_bg_corrected(noob_green, noob_red)
container.methylated.set_noob(1.0)
container.unmethylated.set_noob(1.0)
except ValueError as e:
print(e)
if debug:
LOGGER.warning("could not update container methylated / unmethylated noob values, because preprocess_sesame_noob has already run once.")
# output df should have sample meth or unmeth in a column with sample name and IlmnID as index. 485512 rows
if debug:
return {
'noob_meth': noob_meth,
'noob_unmeth': noob_unmeth,
'oobR': oobR,
'oobG': oobG,
'noob_green': noob_green,
'noob_red': noob_red,
'dupe_meth': noob_meth_dupes,
'dupe_unmeth': noob_unmeth_dupes,
}
return # noob_meth, noob_unmeth
"""
| [
"logging.getLogger",
"numpy.median",
"scipy.stats.norm",
"statsmodels.robust.mad",
"pandas.concat",
"pandas.DataFrame",
"numpy.maximum",
"numpy.seterr"
] | [((450, 477), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (467, 477), False, 'import logging\n'), ((3266, 3319), 'pandas.DataFrame', 'pd.DataFrame', (['(Rmeth + Runmeth)'], {'columns': "['mean_value']"}), "(Rmeth + Runmeth, columns=['mean_value'])\n", (3278, 3319), True, 'import pandas as pd\n'), ((3556, 3609), 'pandas.DataFrame', 'pd.DataFrame', (['(Gmeth + Gunmeth)'], {'columns': "['mean_value']"}), "(Gmeth + Gunmeth, columns=['mean_value'])\n", (3568, 3609), True, 'import pandas as pd\n'), ((8862, 8895), 'numpy.maximum', 'np.maximum', (['(fg_mean - bg_mean)', '(10)'], {}), '(fg_mean - bg_mean, 10)\n', (8872, 8895), True, 'import numpy as np\n'), ((9820, 9845), 'numpy.seterr', 'np.seterr', ([], {'under': '"""ignore"""'}), "(under='ignore')\n", (9829, 9845), True, 'import numpy as np\n'), ((10964, 10989), 'numpy.maximum', 'np.maximum', (['signal', '(1e-06)'], {}), '(signal, 1e-06)\n', (10974, 10989), True, 'import numpy as np\n'), ((11444, 11462), 'statsmodels.robust.mad', 'robust.mad', (['vector'], {}), '(vector)\n', (11454, 11462), False, 'from statsmodels import robust\n'), ((11482, 11499), 'numpy.median', 'np.median', (['vector'], {}), '(vector)\n', (11491, 11499), True, 'import numpy as np\n'), ((13859, 13881), 'pandas.concat', 'pd.concat', (['[cgs, snps]'], {}), '([cgs, snps])\n', (13868, 13881), True, 'import pandas as pd\n'), ((11671, 11733), 'numpy.maximum', 'np.maximum', (['(local_median - positive_factor * mad_scale)', 'vector'], {}), '(local_median - positive_factor * mad_scale, vector)\n', (11681, 11733), True, 'import numpy as np\n'), ((10491, 10510), 'scipy.stats.norm', 'norm', (['mu_sf', 'bg_mad'], {}), '(mu_sf, bg_mad)\n', (10495, 10510), False, 'from scipy.stats import norm, lognorm\n'), ((10523, 10542), 'scipy.stats.norm', 'norm', (['mu_sf', 'bg_mad'], {}), '(mu_sf, bg_mad)\n', (10527, 10542), False, 'from scipy.stats import norm, lognorm\n')] |
# Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import logging
from typing import List
import numpy as np
from pydantic import BaseModel, validator
from ray.rllib.agents.dqn import ApexTrainer, R2D2Trainer # noqa
from ray.rllib.agents.impala import ImpalaTrainer # noqa
from ray.rllib.agents.ppo import PPOTrainer # noqa
from compiler_gym.datasets import BenchmarkUri
from compiler_gym.envs import CompilerEnv
from compiler_gym.util.timer import Timer
logger = logging.getLogger(__name__)
class InferenceResult(BaseModel):
"""Represents the result of running an RL agent on a problem."""
# The benchmark URI.
benchmark: str
inference_walltime_seconds: float
commandline: str
episode_len: int
instruction_count_init: int
instruction_count_final: int
instruction_count_oz: int
instruction_count_reduction: float
"""The final instruction count, normalized to -Oz."""
object_size_init: int
object_size_final: int
object_size_oz: int
object_size_reduction: float
"""The final object size, normalized to -Oz."""
runtimes_init: List[float]
runtimes_final: List[float]
runtimes_o3: List[float]
runtime_reduction: float
"""The final runtime, normalized to -Oz."""
@classmethod
def from_agent(
cls, env: CompilerEnv, agent, runtime: bool = True, runtimes_count: int = 30
):
# We calculate our own reward at the end, no need for incremental
# rewards during inference.
env.reward_space = None
# Run inference on the environment.
observation, done = env.reset(), False
with Timer() as inference_timer:
while not done:
action = agent.compute_action(observation)
observation, _, done, _ = env.step(action)
instruction_count_init = env.unwrapped.observation["IrInstructionCountO0"]
instruction_count_final = env.unwrapped.observation["IrInstructionCount"]
instruction_count_oz = env.unwrapped.observation["IrInstructionCountOz"]
object_size_init = env.unwrapped.observation["ObjectTextSizeO0"]
object_size_final = env.unwrapped.observation["ObjectTextSizeBytes"]
object_size_oz = env.unwrapped.observation["ObjectTextSizeOz"]
runtimes_init = []
runtimes_o3 = []
runtimes_final = []
try:
if runtime and env.unwrapped.observation["IsRunnable"]:
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_final = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_final) == runtimes_count
), f"{len(runtimes_final)} != {runtimes_count}"
env.reset()
env.send_param(
"llvm.set_runtimes_per_observation_count", str(runtimes_count)
)
env.unwrapped.observation["Runtime"] # warmup
runtimes_init = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_init) == runtimes_count
), f"{len(runtimes_init)} != {runtimes_count}"
env.send_param("llvm.apply_baseline_optimizations", "-O3")
env.unwrapped.observation["Runtime"] # warmup
runtimes_o3 = env.unwrapped.observation["Runtime"].tolist()
assert (
len(runtimes_o3) == runtimes_count
), f"{len(runtimes_o3)} != {runtimes_count}"
except Exception as e: # pylint: disable=broad-except
logger.warning("Failed to compute runtime: %s", e)
return cls(
benchmark=env.benchmark.uri,
inference_walltime_seconds=inference_timer.time,
commandline=env.commandline(),
episode_len=len(env.actions),
instruction_count_init=instruction_count_init,
instruction_count_final=instruction_count_final,
instruction_count_oz=instruction_count_oz,
instruction_count_reduction=instruction_count_oz
/ max(instruction_count_final, 1),
object_size_init=object_size_init,
object_size_final=object_size_final,
object_size_oz=object_size_oz,
object_size_reduction=object_size_oz / max(object_size_final, 1),
runtimes_init=runtimes_init,
runtimes_final=runtimes_final,
runtimes_o3=runtimes_o3,
runtime_reduction=np.median(runtimes_o3 or [0])
/ max(np.median(runtimes_final or [0]), 1),
)
@validator("benchmark", pre=True)
def validate_benchmark(cls, value):
if isinstance(value, BenchmarkUri):
return str(value)
return value
| [
"logging.getLogger",
"numpy.median",
"compiler_gym.util.timer.Timer",
"pydantic.validator"
] | [((596, 623), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (613, 623), False, 'import logging\n'), ((4873, 4905), 'pydantic.validator', 'validator', (['"""benchmark"""'], {'pre': '(True)'}), "('benchmark', pre=True)\n", (4882, 4905), False, 'from pydantic import BaseModel, validator\n'), ((1754, 1761), 'compiler_gym.util.timer.Timer', 'Timer', ([], {}), '()\n', (1759, 1761), False, 'from compiler_gym.util.timer import Timer\n'), ((4771, 4800), 'numpy.median', 'np.median', (['(runtimes_o3 or [0])'], {}), '(runtimes_o3 or [0])\n', (4780, 4800), True, 'import numpy as np\n'), ((4819, 4851), 'numpy.median', 'np.median', (['(runtimes_final or [0])'], {}), '(runtimes_final or [0])\n', (4828, 4851), True, 'import numpy as np\n')] |
import re
from array import *
import fileinput
import sys, getopt
import csv
def main(argv):
vlog = ''
top = ''
try:
opts, args = getopt.getopt(argv,"hf:t:",["Vlog=","Top="])
except getopt.GetoptError:
print ('script_gen.py -r <verilog file> -t <top module name>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print ('top_wrapper_generator.py -r <NumberOfRows> -c <NumberOfCols> -b <FrameBitsPerRow> -f <MaxFramesPerCol> -d <desync_flag>')
sys.exit()
elif opt in ("-f", "--Vlog"):
vlog = arg
elif opt in ("-t", "--Top"):
top = arg
if not top :
top = vlog.replace('.v','')
print ('File :',vlog)
print ('Top_module :', top)
script_str = ''
try:
with open("./template_temp.txt", 'r') as file :
script_str = file.read()
except IOError:
print("template_temp.txt not accessible")
sys.exit(2)
script_str = script_str.replace("template.v", vlog)
script_str = script_str.replace("-top template", '-top '+top)
script_str = script_str.replace("template.json", top+'.json')
if script_str :
with open(top+'.ys', 'w') as file:
file.write(script_str)
if __name__ == "__main__":
main(sys.argv[1:])
| [
"getopt.getopt",
"sys.exit"
] | [((151, 198), 'getopt.getopt', 'getopt.getopt', (['argv', '"""hf:t:"""', "['Vlog=', 'Top=']"], {}), "(argv, 'hf:t:', ['Vlog=', 'Top='])\n", (164, 198), False, 'import sys, getopt\n'), ((306, 317), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (314, 317), False, 'import sys, getopt\n'), ((522, 532), 'sys.exit', 'sys.exit', ([], {}), '()\n', (530, 532), False, 'import sys, getopt\n'), ((971, 982), 'sys.exit', 'sys.exit', (['(2)'], {}), '(2)\n', (979, 982), False, 'import sys, getopt\n')] |
# Generated by Django 3.0.8 on 2020-11-16 19:09
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('hello', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('realname', models.CharField(max_length=64)),
('phone', models.CharField(max_length=16)),
('email', models.EmailField(max_length=254)),
('sign', models.BooleanField()),
('create_time', models.DateTimeField(auto_now=True)),
],
options={
'ordering': ['-id'],
},
),
migrations.CreateModel(
name='paperclip',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=200)),
('abstract', models.CharField(max_length=200)),
('publish_time', models.DateTimeField()),
('create_time', models.DateTimeField(auto_now=True)),
('pid', models.CharField(max_length=16)),
],
),
migrations.AlterUniqueTogether(
name='guest',
unique_together=None,
),
migrations.RemoveField(
model_name='guest',
name='event',
),
migrations.DeleteModel(
name='Event',
),
migrations.DeleteModel(
name='Guest',
),
migrations.AddField(
model_name='author',
name='paperclip',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='hello.paperclip'),
),
migrations.AlterUniqueTogether(
name='author',
unique_together={('phone', 'paperclip')},
),
]
| [
"django.db.migrations.AlterUniqueTogether",
"django.db.migrations.DeleteModel",
"django.db.models.EmailField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.AutoField",
"django.db.models.DateTimeField",
"django.db.migrations.RemoveField",
"django.db.models.CharFie... | [((1389, 1455), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""guest"""', 'unique_together': 'None'}), "(name='guest', unique_together=None)\n", (1419, 1455), False, 'from django.db import migrations, models\n'), ((1500, 1556), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""guest"""', 'name': '"""event"""'}), "(model_name='guest', name='event')\n", (1522, 1556), False, 'from django.db import migrations, models\n'), ((1601, 1637), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Event"""'}), "(name='Event')\n", (1623, 1637), False, 'from django.db import migrations, models\n'), ((1670, 1706), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Guest"""'}), "(name='Guest')\n", (1692, 1706), False, 'from django.db import migrations, models\n'), ((1946, 2037), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""author"""', 'unique_together': "{('phone', 'paperclip')}"}), "(name='author', unique_together={('phone',\n 'paperclip')})\n", (1976, 2037), False, 'from django.db import migrations, models\n'), ((1841, 1930), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""hello.paperclip"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'hello.paperclip')\n", (1858, 1930), False, 'from django.db import migrations, models\n'), ((350, 443), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (366, 443), False, 'from django.db import migrations, models\n'), ((471, 502), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(64)'}), '(max_length=64)\n', (487, 502), False, 'from django.db import migrations, models\n'), ((531, 562), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (547, 562), False, 'from django.db import migrations, models\n'), ((591, 624), 'django.db.models.EmailField', 'models.EmailField', ([], {'max_length': '(254)'}), '(max_length=254)\n', (608, 624), False, 'from django.db import migrations, models\n'), ((652, 673), 'django.db.models.BooleanField', 'models.BooleanField', ([], {}), '()\n', (671, 673), False, 'from django.db import migrations, models\n'), ((708, 743), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (728, 743), False, 'from django.db import migrations, models\n'), ((952, 1045), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (968, 1045), False, 'from django.db import migrations, models\n'), ((1070, 1102), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1086, 1102), False, 'from django.db import migrations, models\n'), ((1134, 1166), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (1150, 1166), False, 'from django.db import migrations, models\n'), ((1202, 1224), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (1222, 1224), False, 'from django.db import migrations, models\n'), ((1259, 1294), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (1279, 1294), False, 'from django.db import migrations, models\n'), ((1321, 1352), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(16)'}), '(max_length=16)\n', (1337, 1352), False, 'from django.db import migrations, models\n')] |
import multiprocessing as mp
import pytest
from kawadi.text_search import SearchInText
@pytest.fixture()
def input_data():
text_to_find = "String distance algorithm"
text_to_search = """SIFT4 is a general purpose string distance algorithm inspired by JaroWinkler and Longest Common Subsequence. It was developed to produce a distance measure that matches as close as possible to the human perception of string distance. Hence it takes into account elements like character substitution, character distance, longest common subsequence etc. It was developed using experimental testing, and without theoretical background."""
return text_to_find, text_to_search
@pytest.fixture()
def output():
return [
{
"sim_score": 1.0,
"searched_text": "string distance algorithm",
"to_find": "string distance algorithm",
"start": 27,
"end": 52,
}
]
def custom_score(**kwargs):
if kwargs["slide_of_text"] == kwargs["text_to_find"]:
return 1.0
else:
return 0.0
def test_search_in_text(input_data, output) -> None:
search_text = SearchInText()
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test multiprocessing
search_text = SearchInText(multiprocessing=True, max_workers=4)
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test threshold
search_text = SearchInText(
score_threshold=0.99, multiprocessing=True, max_workers=4
)
result = search_text.find_in_text("something stupid", input_data[1])
assert result == []
# test max_workers
search_text = SearchInText(score_threshold=0.99, multiprocessing=True)
assert search_text.max_workers == mp.cpu_count()
def test_search_in_text_custom_score(input_data, output) -> None:
search_text = SearchInText(custom_score_func=custom_score)
result = search_text.find_in_text(input_data[0], input_data[1])
assert output == result
# test if invalid output
search_text = SearchInText(score_threshold=1, custom_score_func=custom_score)
with pytest.raises(ValueError):
result = search_text.find_in_text(input_data[0], input_data[1])
def test_search_in_text_sliding_window_errors() -> None:
search_text = SearchInText()
with pytest.raises(Exception):
search_text.find_in_text("ABC", "")
with pytest.raises(Exception):
search_text.find_in_text("", "ABC")
| [
"pytest.fixture",
"kawadi.text_search.SearchInText",
"multiprocessing.cpu_count",
"pytest.raises"
] | [((92, 108), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (106, 108), False, 'import pytest\n'), ((678, 694), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (692, 694), False, 'import pytest\n'), ((1145, 1159), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {}), '()\n', (1157, 1159), False, 'from kawadi.text_search import SearchInText\n'), ((1302, 1351), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {'multiprocessing': '(True)', 'max_workers': '(4)'}), '(multiprocessing=True, max_workers=4)\n', (1314, 1351), False, 'from kawadi.text_search import SearchInText\n'), ((1488, 1559), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {'score_threshold': '(0.99)', 'multiprocessing': '(True)', 'max_workers': '(4)'}), '(score_threshold=0.99, multiprocessing=True, max_workers=4)\n', (1500, 1559), False, 'from kawadi.text_search import SearchInText\n'), ((1713, 1769), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {'score_threshold': '(0.99)', 'multiprocessing': '(True)'}), '(score_threshold=0.99, multiprocessing=True)\n', (1725, 1769), False, 'from kawadi.text_search import SearchInText\n'), ((1909, 1953), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {'custom_score_func': 'custom_score'}), '(custom_score_func=custom_score)\n', (1921, 1953), False, 'from kawadi.text_search import SearchInText\n'), ((2098, 2161), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {'score_threshold': '(1)', 'custom_score_func': 'custom_score'}), '(score_threshold=1, custom_score_func=custom_score)\n', (2110, 2161), False, 'from kawadi.text_search import SearchInText\n'), ((2347, 2361), 'kawadi.text_search.SearchInText', 'SearchInText', ([], {}), '()\n', (2359, 2361), False, 'from kawadi.text_search import SearchInText\n'), ((1808, 1822), 'multiprocessing.cpu_count', 'mp.cpu_count', ([], {}), '()\n', (1820, 1822), True, 'import multiprocessing as mp\n'), ((2171, 2196), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (2184, 2196), False, 'import pytest\n'), ((2371, 2395), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2384, 2395), False, 'import pytest\n'), ((2451, 2475), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (2464, 2475), False, 'import pytest\n')] |
import os
basedir = os.path.abspath(os.path.dirname(__file__))
class Config:
SECRET_KEY = os.environ.get('SECRET_KEY')
DEBUG = False
class DevelopmentConfig:
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://levy:Dadiesboy12@localhost/ronchezfitness'
SQLALCHEMY_TRACK_MODIFICATIONS = False
class ProductionConfig:
DEBUG = False
SQLALCHEMY_DATABASE_URI = os.environ.get('postgres_uri')
SQLALCHEMY_TRACK_MODIFICATIONS = True
class TestingConfig:
DEBUG = True
TESTING = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'flask_test.db')
PRESERVE_CONTEXT_ON_EXCEPTION = False
SQLALCHEMY_TRACK_MODIFICATIONS = False
config_by_name = dict(
dev=DevelopmentConfig,
test=TestingConfig,
prod=ProductionConfig
)
key = Config.SECRET_KEY
| [
"os.path.join",
"os.path.dirname",
"os.environ.get"
] | [((37, 62), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (52, 62), False, 'import os\n'), ((97, 125), 'os.environ.get', 'os.environ.get', (['"""SECRET_KEY"""'], {}), "('SECRET_KEY')\n", (111, 125), False, 'import os\n'), ((401, 431), 'os.environ.get', 'os.environ.get', (['"""postgres_uri"""'], {}), "('postgres_uri')\n", (415, 431), False, 'import os\n'), ((578, 616), 'os.path.join', 'os.path.join', (['basedir', '"""flask_test.db"""'], {}), "(basedir, 'flask_test.db')\n", (590, 616), False, 'import os\n')] |
import pandas as pd
from crosstab.mega_analysis.pivot_result_to_pixel_intensities import *
def lateralisation_to_pixel_intensities(all_combined_gifs, df,
semiology_term,
quantiles, method='non-linear', scale_factor=10,
intensity_label='lateralised intensity',
use_semiology_dictionary=False):
"""
runs pivot_result_to_pixel_intensities when the input has already been mapped to gifs as a result of
running QUERY_LATERALISATION.
This is the final step in the query_lateralisation pathway.
<NAME> 2019
"""
# isn't really a pivot_result but let's use consistent notations:
pivot_result = all_combined_gifs[['pt #s']].T
all_combined_gifs_intensities = pivot_result_to_pixel_intensities(pivot_result, df,
method=method, scale_factor=scale_factor, quantiles=quantiles,
use_main_df_calibration=False)
# now we just need to transpose it and add the other columns back
a2 = all_combined_gifs[['Gif Parcellations']].T
a3 = all_combined_gifs[['Semiology Term']].T
all_combined_gifs_intensities.index = [intensity_label]
all_lateralised_gifs = pd.concat([a3, a2, pivot_result, all_combined_gifs_intensities], sort=False).T
all_lateralised_gifs.loc[0, 'Semiology Term'] = str(semiology_term)
all_lateralised_gifs.loc[1, 'Semiology Term'] = 'use_semiology_dictionary='+str(use_semiology_dictionary)
return all_lateralised_gifs | [
"pandas.concat"
] | [((1344, 1420), 'pandas.concat', 'pd.concat', (['[a3, a2, pivot_result, all_combined_gifs_intensities]'], {'sort': '(False)'}), '([a3, a2, pivot_result, all_combined_gifs_intensities], sort=False)\n', (1353, 1420), True, 'import pandas as pd\n')] |
from overrides import overrides
import torch
from allennlp.common.checks import ConfigurationError
from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler
@LearningRateScheduler.register("polynomial_decay")
class PolynomialDecay(LearningRateScheduler):
"""
Implements polynomial decay Learning rate scheduling. The learning rate is first
linearly increased for the first `warmup_steps` training steps. Then it is decayed for
`total_steps` - `warmup_steps` from the initial learning rate to `end_learning_rate` using a polynomial
of degree `power`.
Formally,
`lr` = (`initial_lr` - `end_learning_rate`) *
((`total_steps` - `steps`)/(`total_steps` - `warmup_steps`)) ** `power`
# Parameters
total_steps: `int`, required
The total number of steps to adjust the learning rate for.
warmup_steps : `int`, required
The number of steps to linearly increase the learning rate.
power : `float`, optional (default = `1.0`)
The power of the polynomial used for decaying.
end_learning_rate : `float`, optional (default = `0.0`)
Final learning rate to decay towards.
"""
def __init__(
self,
optimizer: torch.optim.Optimizer,
num_epochs: int,
num_steps_per_epoch: int,
power=1.0,
warmup_steps=0,
end_learning_rate=0.0,
last_epoch: int = -1,
):
super().__init__(optimizer, last_epoch)
# Sanity check here.
if num_steps_per_epoch is None:
raise ConfigurationError(
"'num_steps_per_epoch' is required for this LR scheduler.\n\n"
"If you know how many batches per epoch for your training data, you can set this value "
"directly in your config. Otherwise you'll need to use compatible settings with your data loader "
"so that it can report an accurate number of batches per epoch. "
"If you're using the MultiProcessDataLoader, "
"this means you either need to set 'batches_per_epoch' "
"or leave 'max_instances_in_memory' as None (if your entire dataset can fit into memory)."
)
self.power = power
self.warmup_steps = warmup_steps
self.total_steps = num_epochs * num_steps_per_epoch
self.end_learning_rate = end_learning_rate
self.steps = 0
self.step_batch(0)
@overrides
def get_values(self):
if self.warmup_steps > 0 and self.steps < self.warmup_steps:
f = self.steps / self.warmup_steps
return [f * lr for lr in self.base_values]
if self.steps >= self.total_steps:
return [self.end_learning_rate for _ in self.base_values]
current_decay_steps = self.total_steps - self.steps
total_decay_steps = self.total_steps - self.warmup_steps
f = (current_decay_steps / total_decay_steps) ** self.power
return [
f * (lr - self.end_learning_rate) + self.end_learning_rate for lr in self.base_values
]
@overrides
def step(self, metric: float = None) -> None:
pass
@overrides
def step_batch(self, batch_num_total: int = None) -> None:
if batch_num_total is None:
self.steps += 1
else:
self.steps = batch_num_total
for param_group, lr in zip(self.optimizer.param_groups, self.get_values()):
param_group[self.param_group_field] = lr
| [
"allennlp.common.checks.ConfigurationError",
"allennlp.training.learning_rate_schedulers.learning_rate_scheduler.LearningRateScheduler.register"
] | [((204, 254), 'allennlp.training.learning_rate_schedulers.learning_rate_scheduler.LearningRateScheduler.register', 'LearningRateScheduler.register', (['"""polynomial_decay"""'], {}), "('polynomial_decay')\n", (234, 254), False, 'from allennlp.training.learning_rate_schedulers.learning_rate_scheduler import LearningRateScheduler\n'), ((1585, 2110), 'allennlp.common.checks.ConfigurationError', 'ConfigurationError', (['"""\'num_steps_per_epoch\' is required for this LR scheduler.\n\nIf you know how many batches per epoch for your training data, you can set this value directly in your config. Otherwise you\'ll need to use compatible settings with your data loader so that it can report an accurate number of batches per epoch. If you\'re using the MultiProcessDataLoader, this means you either need to set \'batches_per_epoch\' or leave \'max_instances_in_memory\' as None (if your entire dataset can fit into memory)."""'], {}), '(\n """\'num_steps_per_epoch\' is required for this LR scheduler.\n\nIf you know how many batches per epoch for your training data, you can set this value directly in your config. Otherwise you\'ll need to use compatible settings with your data loader so that it can report an accurate number of batches per epoch. If you\'re using the MultiProcessDataLoader, this means you either need to set \'batches_per_epoch\' or leave \'max_instances_in_memory\' as None (if your entire dataset can fit into memory)."""\n )\n', (1603, 2110), False, 'from allennlp.common.checks import ConfigurationError\n')] |
"""
TO DO:
1. Lot of edge cases not accounted for
2. Could use some unit testing scripts for sanity check
3. What are the bounds for years?
"""
import mysql
from mysql.connector import Error
import re
import numpy as np
def reject_outliers(data, m = 6.):
d = np.abs(data - np.median(data))
mdev = np.median(d)
s = d/mdev if mdev else 0.
return data[s<m]
def tag_year():
try:
conn = mysql.connector.connect(host='127.0.0.1',port = 3307,database='explorer_db',user='root',password = '')
if conn.is_connected():
print("Connection successful: ",conn.get_server_info())
cur = conn.cursor(buffered = True)
cur1 = conn.cursor()
cur.execute("SELECT event_key,htext FROM event WHERE htext IS NOT NULL AND event_year IS NULL")
count = 0
cent = {"first":"1st","second":"2nd","third":"3rd","fourth":"4th","fifth":"5th","sixth":"6th","seventh":"7th","eighth":"8th","ninth":"9th","tenth":"10th",
"eleventh":"11th","twelfth":"12th","thirteenth":"13th","fourteenth":"14th","fifteenth":"15th",
"sixteenth":"16th","seventeenth":"17th","eighteenth":"18th","nineteenth":"19th","twentieth":"20th","twentyfirst":"21st"}
mylist = list()
for row in cur:
text = row[1].lower()
pos = text.find("references[edit]")
pos2 = text.find("further reading[edit]")
if pos!=0:
sub1 = text[:pos]
sub2 = text[pos2:]
text = sub1+sub2
#print(text,"\n\n")
if "century" in text:
#print("YES\n\n")
mylist = re.findall("\d+[a-z][a-z]\s*-*century",text)
#print(mylist)
sec_list = re.findall("[f,s,t,e,n][a-z][a-z]+\s*-*century",text)
#print(sec_list)
sec_list = [i.replace(i[:i.find(" century")],cent[i[:i.find(" century")]]) for i in sec_list if (i[:i.find(" century")]) in cent]
mylist = mylist+sec_list
#print(mylist)
mylist = [re.sub(r"[a-z][a-z]\s*-*century","00",i) for i in mylist]
#print(mylist)
years = re.findall('([1][0-9][0-9][0-9])',row[1])
years2 = re.findall('([2][0-1][0-2][0-9])',row[1])
years = years+years2 + mylist
if not years:
allyear = "NULL"
else:
allyear = np.array([int(i) for i in years])
allyear = reject_outliers(allyear)
cur1.execute('''UPDATE event set event_year = %s WHERE event_key = %s''',(str(allyear[0]),row[0]))
#print(allyear)
print(len(allyear),count)
count+=1
conn.commit()
cur.close()
cur1.close()
conn.close()
print(count,"rows")
print("Done check database!")
except Error as e:
print("Error while connecting to MySQL", e)
| [
"re.sub",
"re.findall",
"mysql.connector.connect",
"numpy.median"
] | [((330, 342), 'numpy.median', 'np.median', (['d'], {}), '(d)\n', (339, 342), True, 'import numpy as np\n'), ((438, 544), 'mysql.connector.connect', 'mysql.connector.connect', ([], {'host': '"""127.0.0.1"""', 'port': '(3307)', 'database': '"""explorer_db"""', 'user': '"""root"""', 'password': '""""""'}), "(host='127.0.0.1', port=3307, database='explorer_db',\n user='root', password='')\n", (461, 544), False, 'import mysql\n'), ((302, 317), 'numpy.median', 'np.median', (['data'], {}), '(data)\n', (311, 317), True, 'import numpy as np\n'), ((2220, 2262), 're.findall', 're.findall', (['"""([1][0-9][0-9][0-9])"""', 'row[1]'], {}), "('([1][0-9][0-9][0-9])', row[1])\n", (2230, 2262), False, 'import re\n'), ((2283, 2325), 're.findall', 're.findall', (['"""([2][0-1][0-2][0-9])"""', 'row[1]'], {}), "('([2][0-1][0-2][0-9])', row[1])\n", (2293, 2325), False, 'import re\n'), ((1664, 1711), 're.findall', 're.findall', (['"""\\\\d+[a-z][a-z]\\\\s*-*century"""', 'text'], {}), "('\\\\d+[a-z][a-z]\\\\s*-*century', text)\n", (1674, 1711), False, 'import re\n'), ((1767, 1822), 're.findall', 're.findall', (['"""[f,s,t,e,n][a-z][a-z]+\\\\s*-*century"""', 'text'], {}), "('[f,s,t,e,n][a-z][a-z]+\\\\s*-*century', text)\n", (1777, 1822), False, 'import re\n'), ((2098, 2140), 're.sub', 're.sub', (['"""[a-z][a-z]\\\\s*-*century"""', '"""00"""', 'i'], {}), "('[a-z][a-z]\\\\s*-*century', '00', i)\n", (2104, 2140), False, 'import re\n')] |
import unittest
from pyhmmer.easel import Alphabet
from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch
class TestErrors(unittest.TestCase):
def test_unexpected_error(self):
err = UnexpectedError(1, "p7_ReconfigLength")
self.assertEqual(repr(err), "UnexpectedError(1, 'p7_ReconfigLength')")
self.assertEqual(str(err), "Unexpected error occurred in 'p7_ReconfigLength': eslFAIL (status code 1)")
def test_allocation_error(self):
err = AllocationError("ESL_SQ", 16)
self.assertEqual(repr(err), "AllocationError('ESL_SQ', 16)")
self.assertEqual(str(err), "Could not allocate 16 bytes for type ESL_SQ")
err2 = AllocationError("float", 4, 32)
self.assertEqual(repr(err2), "AllocationError('float', 4, 32)")
self.assertEqual(str(err2), "Could not allocate 128 bytes for an array of 32 float")
def test_easel_error(self):
err = EaselError(1, "failure")
self.assertEqual(repr(err), "EaselError(1, 'failure')")
self.assertEqual(str(err), "Error raised from C code: failure, eslFAIL (status code 1)")
def test_alphabet_mismatch(self):
err = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(repr(err), "AlphabetMismatch(Alphabet.dna(), Alphabet.rna())")
self.assertEqual(str(err), "Expected Alphabet.dna(), found Alphabet.rna()")
self.assertNotEqual(err, 1)
err2 = AlphabetMismatch(Alphabet.dna(), Alphabet.rna())
self.assertEqual(err, err)
self.assertEqual(err, err2)
err3 = AlphabetMismatch(Alphabet.dna(), Alphabet.amino())
self.assertNotEqual(err, err3)
| [
"pyhmmer.easel.Alphabet.amino",
"pyhmmer.errors.AllocationError",
"pyhmmer.easel.Alphabet.dna",
"pyhmmer.errors.UnexpectedError",
"pyhmmer.errors.EaselError",
"pyhmmer.easel.Alphabet.rna"
] | [((233, 272), 'pyhmmer.errors.UnexpectedError', 'UnexpectedError', (['(1)', '"""p7_ReconfigLength"""'], {}), "(1, 'p7_ReconfigLength')\n", (248, 272), False, 'from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch\n'), ((516, 545), 'pyhmmer.errors.AllocationError', 'AllocationError', (['"""ESL_SQ"""', '(16)'], {}), "('ESL_SQ', 16)\n", (531, 545), False, 'from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch\n'), ((713, 744), 'pyhmmer.errors.AllocationError', 'AllocationError', (['"""float"""', '(4)', '(32)'], {}), "('float', 4, 32)\n", (728, 744), False, 'from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch\n'), ((957, 981), 'pyhmmer.errors.EaselError', 'EaselError', (['(1)', '"""failure"""'], {}), "(1, 'failure')\n", (967, 981), False, 'from pyhmmer.errors import UnexpectedError, AllocationError, EaselError, AlphabetMismatch\n'), ((1213, 1227), 'pyhmmer.easel.Alphabet.dna', 'Alphabet.dna', ([], {}), '()\n', (1225, 1227), False, 'from pyhmmer.easel import Alphabet\n'), ((1229, 1243), 'pyhmmer.easel.Alphabet.rna', 'Alphabet.rna', ([], {}), '()\n', (1241, 1243), False, 'from pyhmmer.easel import Alphabet\n'), ((1486, 1500), 'pyhmmer.easel.Alphabet.dna', 'Alphabet.dna', ([], {}), '()\n', (1498, 1500), False, 'from pyhmmer.easel import Alphabet\n'), ((1502, 1516), 'pyhmmer.easel.Alphabet.rna', 'Alphabet.rna', ([], {}), '()\n', (1514, 1516), False, 'from pyhmmer.easel import Alphabet\n'), ((1622, 1636), 'pyhmmer.easel.Alphabet.dna', 'Alphabet.dna', ([], {}), '()\n', (1634, 1636), False, 'from pyhmmer.easel import Alphabet\n'), ((1638, 1654), 'pyhmmer.easel.Alphabet.amino', 'Alphabet.amino', ([], {}), '()\n', (1652, 1654), False, 'from pyhmmer.easel import Alphabet\n')] |
import base64
STANDARD_ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ234567'
CUSTOM_ALPHABET = 'abcdefghjkmnprstuvwxyz0123456789'
ENCODE_TRANS = str.maketrans(STANDARD_ALPHABET, CUSTOM_ALPHABET)
DECODE_TRANS = str.maketrans(CUSTOM_ALPHABET, STANDARD_ALPHABET)
PADDING_LETTER = '='
def encode(buffer):
assert type(buffer) == bytes or type(buffer) == bytearray, "please pass an bytes"
b32encoded = base64.b32encode(buffer) # encode bytes
b32str = b32encoded.decode().replace(PADDING_LETTER, "") # translate chars
return b32str.translate(ENCODE_TRANS) # remove padding char
def decode(b32str):
assert type(b32str) == str, "please pass an str"
# pad to 8's multiple with '='
b32len = len(b32str)
if b32len % 8 > 0:
padded_len = b32len + (8 - b32len % 8)
b32str = b32str.ljust(padded_len, PADDING_LETTER)
# translate and decode
return base64.b32decode(b32str.translate(DECODE_TRANS))
def decode_to_words(b32str):
result = bytearray()
for c in b32str:
result.append(CUSTOM_ALPHABET.index(c))
return result
def encode_words(words):
result = ""
for v in words:
result += CUSTOM_ALPHABET[v]
return result
| [
"base64.b32encode"
] | [((399, 423), 'base64.b32encode', 'base64.b32encode', (['buffer'], {}), '(buffer)\n', (415, 423), False, 'import base64\n')] |
"""
A* grid planning
author: <NAME>(@Atsushi_twi)
<NAME> (<EMAIL>)
See Wikipedia article (https://en.wikipedia.org/wiki/A*_search_algorithm)
"""
import math
from node import Node
from obstacle_map import Position
class AStarPlanner:
def __init__(self, obstacle_map):
"""
Initialize grid map for a star planning
ox: x position list of Obstacles [m]
oy: y position list of Obstacles [m]
robot_radius: robot radius[m]
"""
self.obstacle_map = obstacle_map
self.motion = self.get_motion_model()
self._handlers = []
self._all_nodes = dict()
def _create_node(self, *args, **kwargs):
return Node(*args, parent=self, **kwargs)
def add_handler(self, handler):
self._handlers.append(handler)
def node_at(self, world_position):
grid_position = self.obstacle_map.world_to_grid(world_position)
return self.node_at_grid(grid_position)
def node_at_grid(self, grid_position):
try:
node = self._all_nodes[grid_position]
except KeyError:
node = self._create_node(grid_position)
self._all_nodes[grid_position] = node
return node
def plan(self, start_position, goal_position):
"""
A star path search
input:
s_x: start x position [m]
s_y: start y position [m]
gx: goal x position [m]
gy: goal y position [m]
output:
rx: x position list of the final path
ry: y position list of the final path
"""
start_node = self.node_at(start_position)
goal_node = self.node_at(goal_position)
open_set = {start_node}
closed_set = set()
while open_set:
current = min(
open_set,
key=lambda o: o.cost + self.calc_heuristic(goal_node, o)
)
# Remove the item from the open set, and add it to the closed set
open_set.remove(current)
closed_set.add(current)
# show graph
for handler in self._handlers:
handler.on_position_update(current.world_position)
if current is goal_node:
print("Goal found")
break
# expand_grid search grid based on motion model
for motion in self.motion:
new_cost = current.cost + motion[2]
node = self.node_at_grid(
Position(
current.grid_position.x + motion[0],
current.grid_position.y + motion[1],
)
)
# If the node is not safe, do nothing
if not node.is_ok:
continue
if node in closed_set:
continue
if node not in open_set:
open_set.add(node) # discovered a new node
node.update(cost=new_cost, previous=current)
elif node.cost > new_cost:
# This path is the best until now. record it
node.update(cost=new_cost, previous=current)
path = self.calc_final_path(goal_node)
for handler in self._handlers:
handler.on_final_path(path)
return path
@staticmethod
def calc_final_path(goal_node):
# generate final course
result = list()
node = goal_node
while True:
result.append(node.world_position)
node = node.previous
if not node:
return result
@staticmethod
def calc_heuristic(node_1, node_2):
weight = 1.0 # weight of heuristic
pos_1 = node_1.grid_position
pos_2 = node_2.grid_position
return weight * math.hypot(pos_1.x - pos_2.x, pos_1.y - pos_2.y)
@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
return motion
| [
"obstacle_map.Position",
"math.hypot",
"node.Node",
"math.sqrt"
] | [((698, 732), 'node.Node', 'Node', (['*args'], {'parent': 'self'}), '(*args, parent=self, **kwargs)\n', (702, 732), False, 'from node import Node\n'), ((3874, 3922), 'math.hypot', 'math.hypot', (['(pos_1.x - pos_2.x)', '(pos_1.y - pos_2.y)'], {}), '(pos_1.x - pos_2.x, pos_1.y - pos_2.y)\n', (3884, 3922), False, 'import math\n'), ((4138, 4150), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4147, 4150), False, 'import math\n'), ((4179, 4191), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4188, 4191), False, 'import math\n'), ((4220, 4232), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4229, 4232), False, 'import math\n'), ((4260, 4272), 'math.sqrt', 'math.sqrt', (['(2)'], {}), '(2)\n', (4269, 4272), False, 'import math\n'), ((2529, 2615), 'obstacle_map.Position', 'Position', (['(current.grid_position.x + motion[0])', '(current.grid_position.y + motion[1])'], {}), '(current.grid_position.x + motion[0], current.grid_position.y +\n motion[1])\n', (2537, 2615), False, 'from obstacle_map import Position\n')] |
import datetime as dt
import pytest
from cuenca_validations.types import (
EntryType,
SavingCategory,
TransactionStatus,
WalletTransactionType,
)
from cuenca import BalanceEntry, Saving, WalletTransaction
@pytest.mark.vcr
def test_create_wallet_transaction():
wallet_id = 'LAvWUDH6OpQk-ber3E_zUEiQ'
deposit = WalletTransaction.create(
wallet_uri=f'/savings/{wallet_id}',
transaction_type=WalletTransactionType.deposit,
amount=10000,
)
assert deposit.id is not None
assert deposit.transaction_type == WalletTransactionType.deposit
assert deposit.status == TransactionStatus.succeeded
wallet = deposit.wallet
assert wallet.id == wallet_id
@pytest.mark.vcr
def test_retrieve_wallet_transaction():
id = 'LT32GEaFQR03cJRBcqb0p7uI'
transaction = WalletTransaction.retrieve(id)
assert transaction.id == id
assert transaction.status == TransactionStatus.succeeded
@pytest.mark.vcr
def test_query_wallet_transactions():
wallet_uri = '/savings/LAGdf-FVVeQeeKrmYpF5NIfA'
query = WalletTransaction.all(wallet_uri=wallet_uri)
transactions = [txn for txn in query]
assert len(transactions) == 2
@pytest.mark.vcr
def test_complete_flow_wallets():
# create wallet
saving = Saving.create(
name='Ahorros',
category=SavingCategory.travel,
goal_amount=1000000,
goal_date=dt.datetime.now() + dt.timedelta(days=365),
)
assert saving.balance == 0
assert saving.wallet_uri == f'/savings/{saving.id}'
# deposit money in wallet
deposit = WalletTransaction.create(
wallet_uri=saving.wallet_uri,
transaction_type=WalletTransactionType.deposit,
amount=10000,
)
assert deposit.status == TransactionStatus.succeeded
saving.refresh()
assert saving.balance == deposit.amount
deposit_uri = f'/wallet_transactions/{deposit.id}'
# withdraw money from wallet
withdrawal = WalletTransaction.create(
wallet_uri=saving.wallet_uri,
transaction_type=WalletTransactionType.withdrawal,
amount=2000,
)
assert withdrawal.status == TransactionStatus.succeeded
saving.refresh()
assert saving.balance == deposit.amount - withdrawal.amount
withdrawal_uri = f'/wallet_transactions/{withdrawal.id}'
# Check all transactions was created
query = WalletTransaction.all(wallet_uri=saving.wallet_uri)
transactions_db = [wt.id for wt in query]
assert deposit.id in transactions_db
assert withdrawal.id in transactions_db
# check balance entries created for wallet
entries = BalanceEntry.all(wallet_id=saving.id)
wallet_entries = [entry for entry in entries]
assert len(wallet_entries) == 2
# default -> deposit -> wallet (credit in wallet)
credit = [be for be in wallet_entries if be.type == EntryType.credit][0]
assert credit.related_transaction_uri == deposit_uri
assert credit.amount == deposit.amount
# default <- withdrawal <- wallet (debit in wallet)
debit = [be for be in wallet_entries if be.type == EntryType.debit][0]
assert debit.amount == withdrawal.amount
assert debit.related_transaction_uri == withdrawal_uri
# check balance entries created in default, related with wallet
entries = BalanceEntry.all(
wallet_id='default', funding_instrument_uri=saving.wallet_uri
)
default_entries = [entry for entry in entries]
assert len(default_entries) == 2
# default -> deposit -> wallet (debit in default)
debit = [be for be in default_entries if be.type == EntryType.debit][0]
assert debit.related_transaction_uri == deposit_uri
assert debit.amount == deposit.amount
# default <- withdrawal <- wallet (credit in default)
credit = [be for be in default_entries if be.type == EntryType.credit][0]
assert credit.amount == withdrawal.amount
assert credit.related_transaction_uri == withdrawal_uri
| [
"cuenca.WalletTransaction.retrieve",
"datetime.datetime.now",
"cuenca.WalletTransaction.all",
"datetime.timedelta",
"cuenca.WalletTransaction.create",
"cuenca.BalanceEntry.all"
] | [((337, 463), 'cuenca.WalletTransaction.create', 'WalletTransaction.create', ([], {'wallet_uri': 'f"""/savings/{wallet_id}"""', 'transaction_type': 'WalletTransactionType.deposit', 'amount': '(10000)'}), "(wallet_uri=f'/savings/{wallet_id}',\n transaction_type=WalletTransactionType.deposit, amount=10000)\n", (361, 463), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((826, 856), 'cuenca.WalletTransaction.retrieve', 'WalletTransaction.retrieve', (['id'], {}), '(id)\n', (852, 856), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((1072, 1116), 'cuenca.WalletTransaction.all', 'WalletTransaction.all', ([], {'wallet_uri': 'wallet_uri'}), '(wallet_uri=wallet_uri)\n', (1093, 1116), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((1587, 1708), 'cuenca.WalletTransaction.create', 'WalletTransaction.create', ([], {'wallet_uri': 'saving.wallet_uri', 'transaction_type': 'WalletTransactionType.deposit', 'amount': '(10000)'}), '(wallet_uri=saving.wallet_uri, transaction_type=\n WalletTransactionType.deposit, amount=10000)\n', (1611, 1708), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((1963, 2086), 'cuenca.WalletTransaction.create', 'WalletTransaction.create', ([], {'wallet_uri': 'saving.wallet_uri', 'transaction_type': 'WalletTransactionType.withdrawal', 'amount': '(2000)'}), '(wallet_uri=saving.wallet_uri, transaction_type=\n WalletTransactionType.withdrawal, amount=2000)\n', (1987, 2086), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((2373, 2424), 'cuenca.WalletTransaction.all', 'WalletTransaction.all', ([], {'wallet_uri': 'saving.wallet_uri'}), '(wallet_uri=saving.wallet_uri)\n', (2394, 2424), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((2618, 2655), 'cuenca.BalanceEntry.all', 'BalanceEntry.all', ([], {'wallet_id': 'saving.id'}), '(wallet_id=saving.id)\n', (2634, 2655), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((3292, 3371), 'cuenca.BalanceEntry.all', 'BalanceEntry.all', ([], {'wallet_id': '"""default"""', 'funding_instrument_uri': 'saving.wallet_uri'}), "(wallet_id='default', funding_instrument_uri=saving.wallet_uri)\n", (3308, 3371), False, 'from cuenca import BalanceEntry, Saving, WalletTransaction\n'), ((1405, 1422), 'datetime.datetime.now', 'dt.datetime.now', ([], {}), '()\n', (1420, 1422), True, 'import datetime as dt\n'), ((1425, 1447), 'datetime.timedelta', 'dt.timedelta', ([], {'days': '(365)'}), '(days=365)\n', (1437, 1447), True, 'import datetime as dt\n')] |
'''
XlPy/inputs
___________
Validates input file selection, configurations, and matches file types.
:copyright: (c) 2015 The Regents of the University of California.
:license: GNU GPL, see licenses/GNU GPLv3.txt for more details.
'''
# load modules
import operator as op
from xldlib.onstart.main import APP
from xldlib.utils import logger
from xldlib.xlpy import wrappers
# CHECKER
# -------
@logger.call('xlpy', 'debug')
@wrappers.threadprogress(3, 2, op.attrgetter('quantitative'))
@wrappers.threadmessage("Checking inputs...")
def checkinputs():
'''Validates the processed input files'''
source = APP.discovererthread
# crosslinkers
source.parameters.checkcrosslinkers()
# files
source.files.checkfile()
source.files.unzipfiles()
source.files.matchfile()
if source.quantitative:
source.files.checkengine()
| [
"xldlib.xlpy.wrappers.threadmessage",
"xldlib.utils.logger.call",
"operator.attrgetter"
] | [((421, 449), 'xldlib.utils.logger.call', 'logger.call', (['"""xlpy"""', '"""debug"""'], {}), "('xlpy', 'debug')\n", (432, 449), False, 'from xldlib.utils import logger\n'), ((513, 557), 'xldlib.xlpy.wrappers.threadmessage', 'wrappers.threadmessage', (['"""Checking inputs..."""'], {}), "('Checking inputs...')\n", (535, 557), False, 'from xldlib.xlpy import wrappers\n'), ((481, 510), 'operator.attrgetter', 'op.attrgetter', (['"""quantitative"""'], {}), "('quantitative')\n", (494, 510), True, 'import operator as op\n')] |
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from scipy.spatial import KDTree
import cv2
import yaml
import math
import numpy as np
STATE_COUNT_THRESHOLD = 3
TL_LOOK_AHEAD = 100
TL_LOOK_BEHIND = 15
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.camera_image = None
self.lights = []
self.waypoint_tree = None
self.state = TrafficLight.UNKNOWN
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
self.update_traffic_lights()
def is_stop_tl_state(self, tl_state):
return tl_state == TrafficLight.RED or tl_state == TrafficLight.YELLOW
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[w.pose.pose.position.x, w.pose.pose.position.y] for w in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def update_traffic_lights(self):
'''
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
light_wp, state = self.process_traffic_lights()
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
light_wp = light_wp if self.is_stop_tl_state(state) else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, pose):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
pose (Pose): position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
px = pose.position.x
py = pose.position.y
closest_idx = -1
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([px, py], 1)[1]
return closest_idx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
return light.state
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
if not self.pose:
return -1, TrafficLight.UNKNOWN
stop_line_positions = self.config['stop_line_positions']
car_position = self.get_closest_waypoint(self.pose.pose)
for i, light in enumerate(self.lights):
light_stop_pose = Pose()
light_stop_pose.position.x = stop_line_positions[i][0]
light_stop_pose.position.y = stop_line_positions[i][1]
# get the wp closest to each light_position
light_stop_wp = self.get_closest_waypoint(light_stop_pose)
if car_position - TL_LOOK_BEHIND <= light_stop_wp and light_stop_wp <= car_position + TL_LOOK_AHEAD:
state = self.get_light_state(light)
return light_stop_wp, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| [
"rospy.logerr",
"rospy.Subscriber",
"rospy.init_node",
"rospy.get_param",
"scipy.spatial.KDTree",
"std_msgs.msg.Int32",
"yaml.load",
"rospy.spin",
"geometry_msgs.msg.Pose",
"rospy.Publisher"
] | [((450, 480), 'rospy.init_node', 'rospy.init_node', (['"""tl_detector"""'], {}), "('tl_detector')\n", (465, 480), False, 'import rospy\n'), ((729, 769), 'rospy.get_param', 'rospy.get_param', (['"""/traffic_light_config"""'], {}), "('/traffic_light_config')\n", (744, 769), False, 'import rospy\n'), ((792, 816), 'yaml.load', 'yaml.load', (['config_string'], {}), '(config_string)\n', (801, 816), False, 'import yaml\n'), ((856, 913), 'rospy.Publisher', 'rospy.Publisher', (['"""/traffic_waypoint"""', 'Int32'], {'queue_size': '(1)'}), "('/traffic_waypoint', Int32, queue_size=1)\n", (871, 913), False, 'import rospy\n'), ((1026, 1086), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/current_pose"""', 'PoseStamped', 'self.pose_cb'], {}), "('/current_pose', PoseStamped, self.pose_cb)\n", (1042, 1086), False, 'import rospy\n'), ((1095, 1155), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/base_waypoints"""', 'Lane', 'self.waypoints_cb'], {}), "('/base_waypoints', Lane, self.waypoints_cb)\n", (1111, 1155), False, 'import rospy\n'), ((1164, 1243), 'rospy.Subscriber', 'rospy.Subscriber', (['"""/vehicle/traffic_lights"""', 'TrafficLightArray', 'self.traffic_cb'], {}), "('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)\n", (1180, 1243), False, 'import rospy\n'), ((1252, 1264), 'rospy.spin', 'rospy.spin', ([], {}), '()\n', (1262, 1264), False, 'import rospy\n'), ((1731, 1756), 'scipy.spatial.KDTree', 'KDTree', (['self.waypoints_2d'], {}), '(self.waypoints_2d)\n', (1737, 1756), False, 'from scipy.spatial import KDTree\n'), ((4117, 4123), 'geometry_msgs.msg.Pose', 'Pose', ([], {}), '()\n', (4121, 4123), False, 'from geometry_msgs.msg import PoseStamped, Pose\n'), ((4743, 4788), 'rospy.logerr', 'rospy.logerr', (['"""Could not start traffic node."""'], {}), "('Could not start traffic node.')\n", (4755, 4788), False, 'import rospy\n'), ((2456, 2471), 'std_msgs.msg.Int32', 'Int32', (['light_wp'], {}), '(light_wp)\n', (2461, 2471), False, 'from std_msgs.msg import Int32\n'), ((2535, 2554), 'std_msgs.msg.Int32', 'Int32', (['self.last_wp'], {}), '(self.last_wp)\n', (2540, 2554), False, 'from std_msgs.msg import Int32\n')] |
# Generated by Django 2.1.7 on 2019-03-29 20:19
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('e_secretary', '0009_profile'),
]
operations = [
migrations.AlterModelOptions(
name='professor',
options={'ordering': ['title']},
),
migrations.RemoveField(
model_name='professor',
name='email',
),
migrations.RemoveField(
model_name='professor',
name='fname',
),
migrations.RemoveField(
model_name='professor',
name='lname',
),
migrations.RemoveField(
model_name='student',
name='email',
),
migrations.RemoveField(
model_name='student',
name='fname',
),
migrations.RemoveField(
model_name='student',
name='lname',
),
migrations.AddField(
model_name='profile',
name='email',
field=models.EmailField(default='<EMAIL>', max_length=254, null=True),
),
migrations.AddField(
model_name='profile',
name='fname',
field=models.CharField(default='First', help_text='First Name', max_length=50),
),
migrations.AddField(
model_name='profile',
name='lname',
field=models.CharField(default='Last', help_text='Last Name', max_length=50),
),
migrations.AlterField(
model_name='profile',
name='grammateia',
field=models.BooleanField(default=False),
),
migrations.DeleteModel(
name='Grammateia',
),
]
| [
"django.db.models.EmailField",
"django.db.migrations.DeleteModel",
"django.db.models.BooleanField",
"django.db.migrations.AlterModelOptions",
"django.db.migrations.RemoveField",
"django.db.models.CharField"
] | [((228, 307), 'django.db.migrations.AlterModelOptions', 'migrations.AlterModelOptions', ([], {'name': '"""professor"""', 'options': "{'ordering': ['title']}"}), "(name='professor', options={'ordering': ['title']})\n", (256, 307), False, 'from django.db import migrations, models\n'), ((352, 412), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""professor"""', 'name': '"""email"""'}), "(model_name='professor', name='email')\n", (374, 412), False, 'from django.db import migrations, models\n'), ((457, 517), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""professor"""', 'name': '"""fname"""'}), "(model_name='professor', name='fname')\n", (479, 517), False, 'from django.db import migrations, models\n'), ((562, 622), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""professor"""', 'name': '"""lname"""'}), "(model_name='professor', name='lname')\n", (584, 622), False, 'from django.db import migrations, models\n'), ((667, 725), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""student"""', 'name': '"""email"""'}), "(model_name='student', name='email')\n", (689, 725), False, 'from django.db import migrations, models\n'), ((770, 828), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""student"""', 'name': '"""fname"""'}), "(model_name='student', name='fname')\n", (792, 828), False, 'from django.db import migrations, models\n'), ((873, 931), 'django.db.migrations.RemoveField', 'migrations.RemoveField', ([], {'model_name': '"""student"""', 'name': '"""lname"""'}), "(model_name='student', name='lname')\n", (895, 931), False, 'from django.db import migrations, models\n'), ((1702, 1743), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Grammateia"""'}), "(name='Grammateia')\n", (1724, 1743), False, 'from django.db import migrations, models\n'), ((1075, 1138), 'django.db.models.EmailField', 'models.EmailField', ([], {'default': '"""<EMAIL>"""', 'max_length': '(254)', 'null': '(True)'}), "(default='<EMAIL>', max_length=254, null=True)\n", (1092, 1138), False, 'from django.db import migrations, models\n'), ((1258, 1330), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""First"""', 'help_text': '"""First Name"""', 'max_length': '(50)'}), "(default='First', help_text='First Name', max_length=50)\n", (1274, 1330), False, 'from django.db import migrations, models\n'), ((1450, 1520), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""Last"""', 'help_text': '"""Last Name"""', 'max_length': '(50)'}), "(default='Last', help_text='Last Name', max_length=50)\n", (1466, 1520), False, 'from django.db import migrations, models\n'), ((1647, 1681), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (1666, 1681), False, 'from django.db import migrations, models\n')] |
"""
Implements Pseudo-outcome based Two-step Nets, namely the DR-learner, the PW-learner and the
RA-learner.
"""
# Author: <NAME>
from typing import Callable, Optional, Tuple
import jax.numpy as jnp
import numpy as onp
import pandas as pd
from sklearn.model_selection import StratifiedKFold
import catenets.logger as log
from catenets.models.constants import (
DEFAULT_AVG_OBJECTIVE,
DEFAULT_BATCH_SIZE,
DEFAULT_CF_FOLDS,
DEFAULT_LAYERS_OUT,
DEFAULT_LAYERS_OUT_T,
DEFAULT_LAYERS_R,
DEFAULT_LAYERS_R_T,
DEFAULT_N_ITER,
DEFAULT_N_ITER_MIN,
DEFAULT_N_ITER_PRINT,
DEFAULT_NONLIN,
DEFAULT_PATIENCE,
DEFAULT_PENALTY_L2,
DEFAULT_SEED,
DEFAULT_STEP_SIZE,
DEFAULT_STEP_SIZE_T,
DEFAULT_UNITS_OUT,
DEFAULT_UNITS_OUT_T,
DEFAULT_UNITS_R,
DEFAULT_UNITS_R_T,
DEFAULT_VAL_SPLIT,
)
from catenets.models.jax.base import BaseCATENet, train_output_net_only
from catenets.models.jax.disentangled_nets import predict_snet3, train_snet3
from catenets.models.jax.flextenet import predict_flextenet, train_flextenet
from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np
from catenets.models.jax.offsetnet import predict_offsetnet, train_offsetnet
from catenets.models.jax.representation_nets import (
predict_snet1,
predict_snet2,
train_snet1,
train_snet2,
)
from catenets.models.jax.snet import predict_snet, train_snet
from catenets.models.jax.tnet import predict_t_net, train_tnet
from catenets.models.jax.transformation_utils import (
DR_TRANSFORMATION,
PW_TRANSFORMATION,
RA_TRANSFORMATION,
_get_transformation_function,
)
T_STRATEGY = "T"
S1_STRATEGY = "Tar"
S2_STRATEGY = "S2"
S3_STRATEGY = "S3"
S_STRATEGY = "S"
OFFSET_STRATEGY = "Offset"
FLEX_STRATEGY = "Flex"
ALL_STRATEGIES = [
T_STRATEGY,
S1_STRATEGY,
S2_STRATEGY,
S3_STRATEGY,
S_STRATEGY,
FLEX_STRATEGY,
OFFSET_STRATEGY,
]
class PseudoOutcomeNet(BaseCATENet):
"""
Class implements TwoStepLearners based on pseudo-outcome regression as discussed in
Curth &<NAME> (2021): RA-learner, PW-learner and DR-learner
Parameters
----------
first_stage_strategy: str, default 't'
which nuisance estimator to use in first stage
first_stage_args: dict
Any additional arguments to pass to first stage training function
data_split: bool, default False
Whether to split the data in two folds for estimation
cross_fit: bool, default False
Whether to perform cross fitting
n_cf_folds: int
Number of crossfitting folds to use
transformation: str, default 'AIPW'
pseudo-outcome to use ('AIPW' for DR-learner, 'HT' for PW learner, 'RA' for RA-learner)
binary_y: bool, default False
Whether the outcome is binary
n_layers_out: int
First stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Dense layer)
n_units_out: int
First stage Number of hidden units in each hypothesis layer
n_layers_r: int
First stage Number of representation layers before hypothesis layers (distinction between
hypothesis layers and representation layers is made to match TARNet & SNets)
n_units_r: int
First stage Number of hidden units in each representation layer
n_layers_out_t: int
Second stage Number of hypothesis layers (n_layers_out x n_units_out + 1 x Dense layer)
n_units_out_t: int
Second stage Number of hidden units in each hypothesis layer
n_layers_r_t: int
Second stage Number of representation layers before hypothesis layers (distinction between
hypothesis layers and representation layers is made to match TARNet & SNets)
n_units_r_t: int
Second stage Number of hidden units in each representation layer
penalty_l2: float
First stage l2 (ridge) penalty
penalty_l2_t: float
Second stage l2 (ridge) penalty
step_size: float
First stage learning rate for optimizer
step_size_t: float
Second stage learning rate for optimizer
n_iter: int
Maximum number of iterations
batch_size: int
Batch size
val_split_prop: float
Proportion of samples used for validation split (can be 0)
early_stopping: bool, default True
Whether to use early stopping
patience: int
Number of iterations to wait before early stopping after decrease in validation loss
n_iter_min: int
Minimum number of iterations to go through before starting early stopping
n_iter_print: int
Number of iterations after which to print updates
seed: int
Seed used
nonlin: string, default 'elu'
Nonlinearity to use in NN
"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
first_stage_args: Optional[dict] = None,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
transformation: str = DR_TRANSFORMATION,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
) -> None:
# settings
self.first_stage_strategy = first_stage_strategy
self.first_stage_args = first_stage_args
self.binary_y = binary_y
self.transformation = transformation
self.data_split = data_split
self.cross_fit = cross_fit
self.n_cf_folds = n_cf_folds
# model architecture hyperparams
self.n_layers_out = n_layers_out
self.n_layers_out_t = n_layers_out_t
self.n_layers_r = n_layers_r
self.n_layers_r_t = n_layers_r_t
self.n_units_out = n_units_out
self.n_units_out_t = n_units_out_t
self.n_units_r = n_units_r
self.n_units_r_t = n_units_r_t
self.nonlin = nonlin
# other hyperparameters
self.penalty_l2 = penalty_l2
self.penalty_l2_t = penalty_l2_t
self.step_size = step_size
self.step_size_t = step_size_t
self.n_iter = n_iter
self.batch_size = batch_size
self.n_iter_print = n_iter_print
self.seed = seed
self.val_split_prop = val_split_prop
self.early_stopping = early_stopping
self.patience = patience
self.n_iter_min = n_iter_min
self.rescale_transformation = rescale_transformation
def _get_train_function(self) -> Callable:
return train_pseudooutcome_net
def fit(
self,
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
p: Optional[jnp.ndarray] = None,
) -> "PseudoOutcomeNet":
# overwrite super so we can pass p as extra param
# some quick input checks
X = check_X_is_np(X)
self._check_inputs(w, p)
train_func = self._get_train_function()
train_params = self.get_params()
if "transformation" not in train_params.keys():
train_params.update({"transformation": self.transformation})
if self.rescale_transformation:
self._params, self._predict_funs, self._scale_factor = train_func(
X, y, w, p, **train_params
)
else:
self._params, self._predict_funs = train_func(X, y, w, p, **train_params)
return self
def _get_predict_function(self) -> Callable:
# Two step nets do not need this
pass
def predict(
self, X: jnp.ndarray, return_po: bool = False, return_prop: bool = False
) -> jnp.ndarray:
# check input
if return_po:
raise NotImplementedError(
"TwoStepNets have no Potential outcome predictors."
)
if return_prop:
raise NotImplementedError("TwoStepNets have no Propensity predictors.")
if isinstance(X, pd.DataFrame):
X = X.values
if self.rescale_transformation:
return 1 / self._scale_factor * self._predict_funs(self._params, X)
else:
return self._predict_funs(self._params, X)
class DRNet(PseudoOutcomeNet):
"""Wrapper for DR-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=DR_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
class RANet(PseudoOutcomeNet):
"""Wrapper for RA-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=RA_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
class PWNet(PseudoOutcomeNet):
"""Wrapper for PW-learner using PseudoOutcomeNet"""
def __init__(
self,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
nonlin: str = DEFAULT_NONLIN,
first_stage_args: Optional[dict] = None,
) -> None:
super().__init__(
first_stage_strategy=first_stage_strategy,
data_split=data_split,
cross_fit=cross_fit,
n_cf_folds=n_cf_folds,
transformation=PW_TRANSFORMATION,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_layers_out_t=n_layers_out_t,
n_layers_r_t=n_layers_r_t,
n_units_out=n_units_out,
n_units_r=n_units_r,
n_units_out_t=n_units_out_t,
n_units_r_t=n_units_r_t,
penalty_l2=penalty_l2,
penalty_l2_t=penalty_l2_t,
step_size=step_size,
step_size_t=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
n_iter_min=n_iter_min,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
rescale_transformation=rescale_transformation,
first_stage_args=first_stage_args,
)
def train_pseudooutcome_net(
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
p: Optional[jnp.ndarray] = None,
first_stage_strategy: str = T_STRATEGY,
data_split: bool = False,
cross_fit: bool = False,
n_cf_folds: int = DEFAULT_CF_FOLDS,
transformation: str = DR_TRANSFORMATION,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_layers_r_t: int = DEFAULT_LAYERS_R_T,
n_layers_out_t: int = DEFAULT_LAYERS_OUT_T,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
n_units_out_t: int = DEFAULT_UNITS_OUT_T,
n_units_r_t: int = DEFAULT_UNITS_R_T,
penalty_l2: float = DEFAULT_PENALTY_L2,
penalty_l2_t: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
step_size_t: float = DEFAULT_STEP_SIZE_T,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
rescale_transformation: bool = False,
return_val_loss: bool = False,
nonlin: str = DEFAULT_NONLIN,
avg_objective: bool = DEFAULT_AVG_OBJECTIVE,
first_stage_args: Optional[dict] = None,
) -> Tuple:
# get shape of data
n, d = X.shape
if p is not None:
p = check_shape_1d_data(p)
# get transformation function
transformation_function = _get_transformation_function(transformation)
# get strategy name
if first_stage_strategy not in ALL_STRATEGIES:
raise ValueError(
"Parameter first stage should be in "
"catenets.models.pseudo_outcome_nets.ALL_STRATEGIES. "
"You passed {}".format(first_stage_strategy)
)
# split data as wanted
if p is None or transformation is not PW_TRANSFORMATION:
if not cross_fit:
if not data_split:
log.debug("Training first stage with all data (no data splitting)")
# use all data for both
fit_mask = onp.ones(n, dtype=bool)
pred_mask = onp.ones(n, dtype=bool)
else:
log.debug("Training first stage with half of the data (data splitting)")
# split data in half
fit_idx = onp.random.choice(n, int(onp.round(n / 2)))
fit_mask = onp.zeros(n, dtype=bool)
fit_mask[fit_idx] = 1
pred_mask = ~fit_mask
mu_0, mu_1, pi_hat = _train_and_predict_first_stage(
X,
y,
w,
fit_mask,
pred_mask,
first_stage_strategy=first_stage_strategy,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_units_out=n_units_out,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
transformation=transformation,
first_stage_args=first_stage_args,
)
if data_split:
# keep only prediction data
X, y, w = X[pred_mask, :], y[pred_mask, :], w[pred_mask, :]
if p is not None:
p = p[pred_mask, :]
else:
log.debug(f"Training first stage in {n_cf_folds} folds (cross-fitting)")
# do cross fitting
mu_0, mu_1, pi_hat = onp.zeros((n, 1)), onp.zeros((n, 1)), onp.zeros((n, 1))
splitter = StratifiedKFold(
n_splits=n_cf_folds, shuffle=True, random_state=seed
)
fold_count = 1
for train_idx, test_idx in splitter.split(X, w):
log.debug(f"Training fold {fold_count}.")
fold_count = fold_count + 1
pred_mask = onp.zeros(n, dtype=bool)
pred_mask[test_idx] = 1
fit_mask = ~pred_mask
(
mu_0[pred_mask],
mu_1[pred_mask],
pi_hat[pred_mask],
) = _train_and_predict_first_stage(
X,
y,
w,
fit_mask,
pred_mask,
first_stage_strategy=first_stage_strategy,
binary_y=binary_y,
n_layers_out=n_layers_out,
n_layers_r=n_layers_r,
n_units_out=n_units_out,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
transformation=transformation,
first_stage_args=first_stage_args,
)
log.debug("Training second stage.")
if p is not None:
# use known propensity score
p = check_shape_1d_data(p)
pi_hat = p
# second stage
y, w = check_shape_1d_data(y), check_shape_1d_data(w)
# transform data and fit on transformed data
if transformation is PW_TRANSFORMATION:
mu_0 = None
mu_1 = None
pseudo_outcome = transformation_function(y=y, w=w, p=pi_hat, mu_0=mu_0, mu_1=mu_1)
if rescale_transformation:
scale_factor = onp.std(y) / onp.std(pseudo_outcome)
if scale_factor > 1:
scale_factor = 1
else:
pseudo_outcome = scale_factor * pseudo_outcome
params, predict_funs = train_output_net_only(
X,
pseudo_outcome,
binary_y=False,
n_layers_out=n_layers_out_t,
n_units_out=n_units_out_t,
n_layers_r=n_layers_r_t,
n_units_r=n_units_r_t,
penalty_l2=penalty_l2_t,
step_size=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
return_val_loss=return_val_loss,
nonlin=nonlin,
avg_objective=avg_objective,
)
return params, predict_funs, scale_factor
else:
return train_output_net_only(
X,
pseudo_outcome,
binary_y=False,
n_layers_out=n_layers_out_t,
n_units_out=n_units_out_t,
n_layers_r=n_layers_r_t,
n_units_r=n_units_r_t,
penalty_l2=penalty_l2_t,
step_size=step_size_t,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
return_val_loss=return_val_loss,
nonlin=nonlin,
avg_objective=avg_objective,
)
def _train_and_predict_first_stage(
X: jnp.ndarray,
y: jnp.ndarray,
w: jnp.ndarray,
fit_mask: jnp.ndarray,
pred_mask: jnp.ndarray,
first_stage_strategy: str,
binary_y: bool = False,
n_layers_out: int = DEFAULT_LAYERS_OUT,
n_layers_r: int = DEFAULT_LAYERS_R,
n_units_out: int = DEFAULT_UNITS_OUT,
n_units_r: int = DEFAULT_UNITS_R,
penalty_l2: float = DEFAULT_PENALTY_L2,
step_size: float = DEFAULT_STEP_SIZE,
n_iter: int = DEFAULT_N_ITER,
batch_size: int = DEFAULT_BATCH_SIZE,
val_split_prop: float = DEFAULT_VAL_SPLIT,
early_stopping: bool = True,
patience: int = DEFAULT_PATIENCE,
n_iter_min: int = DEFAULT_N_ITER_MIN,
n_iter_print: int = DEFAULT_N_ITER_PRINT,
seed: int = DEFAULT_SEED,
nonlin: str = DEFAULT_NONLIN,
avg_objective: bool = False,
transformation: str = DR_TRANSFORMATION,
first_stage_args: Optional[dict] = None,
) -> Tuple:
if len(w.shape) > 1:
w = w.reshape((len(w),))
if first_stage_args is None:
first_stage_args = {}
# split the data
X_fit, y_fit, w_fit = X[fit_mask, :], y[fit_mask], w[fit_mask]
X_pred = X[pred_mask, :]
train_fun: Callable
predict_fun: Callable
if first_stage_strategy == T_STRATEGY:
train_fun, predict_fun = train_tnet, predict_t_net
elif first_stage_strategy == S_STRATEGY:
train_fun, predict_fun = train_snet, predict_snet
elif first_stage_strategy == S1_STRATEGY:
train_fun, predict_fun = train_snet1, predict_snet1
elif first_stage_strategy == S2_STRATEGY:
train_fun, predict_fun = train_snet2, predict_snet2
elif first_stage_strategy == S3_STRATEGY:
train_fun, predict_fun = train_snet3, predict_snet3
elif first_stage_strategy == OFFSET_STRATEGY:
train_fun, predict_fun = train_offsetnet, predict_offsetnet
elif first_stage_strategy == FLEX_STRATEGY:
train_fun, predict_fun = train_flextenet, predict_flextenet
else:
raise ValueError(
"{} is not a valid first stage strategy for a PseudoOutcomeNet".format(
first_stage_strategy
)
)
log.debug("Training PO estimators")
trained_params, pred_fun = train_fun(
X_fit,
y_fit,
w_fit,
binary_y=binary_y,
n_layers_r=n_layers_r,
n_units_r=n_units_r,
n_layers_out=n_layers_out,
n_units_out=n_units_out,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
**first_stage_args,
)
if first_stage_strategy in [S_STRATEGY, S2_STRATEGY, S3_STRATEGY]:
_, mu_0, mu_1, pi_hat = predict_fun(
X_pred, trained_params, pred_fun, return_po=True, return_prop=True
)
else:
if transformation is not PW_TRANSFORMATION:
_, mu_0, mu_1 = predict_fun(
X_pred, trained_params, pred_fun, return_po=True
)
else:
mu_0, mu_1 = onp.nan, onp.nan
if transformation is not RA_TRANSFORMATION:
log.debug("Training propensity net")
params_prop, predict_fun_prop = train_output_net_only(
X_fit,
w_fit,
binary_y=True,
n_layers_out=n_layers_out,
n_units_out=n_units_out,
n_layers_r=n_layers_r,
n_units_r=n_units_r,
penalty_l2=penalty_l2,
step_size=step_size,
n_iter=n_iter,
batch_size=batch_size,
val_split_prop=val_split_prop,
early_stopping=early_stopping,
patience=patience,
n_iter_min=n_iter_min,
n_iter_print=n_iter_print,
seed=seed,
nonlin=nonlin,
avg_objective=avg_objective,
)
pi_hat = predict_fun_prop(params_prop, X_pred)
else:
pi_hat = onp.nan
return mu_0, mu_1, pi_hat
| [
"catenets.logger.debug",
"catenets.models.jax.model_utils.check_shape_1d_data",
"numpy.ones",
"catenets.models.jax.transformation_utils._get_transformation_function",
"sklearn.model_selection.StratifiedKFold",
"numpy.zeros",
"numpy.std",
"catenets.models.jax.base.train_output_net_only",
"numpy.round... | [((18080, 18124), 'catenets.models.jax.transformation_utils._get_transformation_function', '_get_transformation_function', (['transformation'], {}), '(transformation)\n', (18108, 18124), False, 'from catenets.models.jax.transformation_utils import DR_TRANSFORMATION, PW_TRANSFORMATION, RA_TRANSFORMATION, _get_transformation_function\n'), ((22247, 22282), 'catenets.logger.debug', 'log.debug', (['"""Training second stage."""'], {}), "('Training second stage.')\n", (22256, 22282), True, 'import catenets.logger as log\n'), ((26636, 26671), 'catenets.logger.debug', 'log.debug', (['"""Training PO estimators"""'], {}), "('Training PO estimators')\n", (26645, 26671), True, 'import catenets.logger as log\n'), ((7687, 7703), 'catenets.models.jax.model_utils.check_X_is_np', 'check_X_is_np', (['X'], {}), '(X)\n', (7700, 7703), False, 'from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np\n'), ((17992, 18014), 'catenets.models.jax.model_utils.check_shape_1d_data', 'check_shape_1d_data', (['p'], {}), '(p)\n', (18011, 18014), False, 'from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np\n'), ((22355, 22377), 'catenets.models.jax.model_utils.check_shape_1d_data', 'check_shape_1d_data', (['p'], {}), '(p)\n', (22374, 22377), False, 'from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np\n'), ((22428, 22450), 'catenets.models.jax.model_utils.check_shape_1d_data', 'check_shape_1d_data', (['y'], {}), '(y)\n', (22447, 22450), False, 'from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np\n'), ((22452, 22474), 'catenets.models.jax.model_utils.check_shape_1d_data', 'check_shape_1d_data', (['w'], {}), '(w)\n', (22471, 22474), False, 'from catenets.models.jax.model_utils import check_shape_1d_data, check_X_is_np\n'), ((22949, 23439), 'catenets.models.jax.base.train_output_net_only', 'train_output_net_only', (['X', 'pseudo_outcome'], {'binary_y': '(False)', 'n_layers_out': 'n_layers_out_t', 'n_units_out': 'n_units_out_t', 'n_layers_r': 'n_layers_r_t', 'n_units_r': 'n_units_r_t', 'penalty_l2': 'penalty_l2_t', 'step_size': 'step_size_t', 'n_iter': 'n_iter', 'batch_size': 'batch_size', 'val_split_prop': 'val_split_prop', 'early_stopping': 'early_stopping', 'patience': 'patience', 'n_iter_min': 'n_iter_min', 'n_iter_print': 'n_iter_print', 'seed': 'seed', 'return_val_loss': 'return_val_loss', 'nonlin': 'nonlin', 'avg_objective': 'avg_objective'}), '(X, pseudo_outcome, binary_y=False, n_layers_out=\n n_layers_out_t, n_units_out=n_units_out_t, n_layers_r=n_layers_r_t,\n n_units_r=n_units_r_t, penalty_l2=penalty_l2_t, step_size=step_size_t,\n n_iter=n_iter, batch_size=batch_size, val_split_prop=val_split_prop,\n early_stopping=early_stopping, patience=patience, n_iter_min=n_iter_min,\n n_iter_print=n_iter_print, seed=seed, return_val_loss=return_val_loss,\n nonlin=nonlin, avg_objective=avg_objective)\n', (22970, 23439), False, 'from catenets.models.jax.base import BaseCATENet, train_output_net_only\n'), ((23741, 24231), 'catenets.models.jax.base.train_output_net_only', 'train_output_net_only', (['X', 'pseudo_outcome'], {'binary_y': '(False)', 'n_layers_out': 'n_layers_out_t', 'n_units_out': 'n_units_out_t', 'n_layers_r': 'n_layers_r_t', 'n_units_r': 'n_units_r_t', 'penalty_l2': 'penalty_l2_t', 'step_size': 'step_size_t', 'n_iter': 'n_iter', 'batch_size': 'batch_size', 'val_split_prop': 'val_split_prop', 'early_stopping': 'early_stopping', 'patience': 'patience', 'n_iter_min': 'n_iter_min', 'n_iter_print': 'n_iter_print', 'seed': 'seed', 'return_val_loss': 'return_val_loss', 'nonlin': 'nonlin', 'avg_objective': 'avg_objective'}), '(X, pseudo_outcome, binary_y=False, n_layers_out=\n n_layers_out_t, n_units_out=n_units_out_t, n_layers_r=n_layers_r_t,\n n_units_r=n_units_r_t, penalty_l2=penalty_l2_t, step_size=step_size_t,\n n_iter=n_iter, batch_size=batch_size, val_split_prop=val_split_prop,\n early_stopping=early_stopping, patience=patience, n_iter_min=n_iter_min,\n n_iter_print=n_iter_print, seed=seed, return_val_loss=return_val_loss,\n nonlin=nonlin, avg_objective=avg_objective)\n', (23762, 24231), False, 'from catenets.models.jax.base import BaseCATENet, train_output_net_only\n'), ((20378, 20450), 'catenets.logger.debug', 'log.debug', (['f"""Training first stage in {n_cf_folds} folds (cross-fitting)"""'], {}), "(f'Training first stage in {n_cf_folds} folds (cross-fitting)')\n", (20387, 20450), True, 'import catenets.logger as log\n'), ((20594, 20663), 'sklearn.model_selection.StratifiedKFold', 'StratifiedKFold', ([], {'n_splits': 'n_cf_folds', 'shuffle': '(True)', 'random_state': 'seed'}), '(n_splits=n_cf_folds, shuffle=True, random_state=seed)\n', (20609, 20663), False, 'from sklearn.model_selection import StratifiedKFold\n'), ((22750, 22760), 'numpy.std', 'onp.std', (['y'], {}), '(y)\n', (22757, 22760), True, 'import numpy as onp\n'), ((22763, 22786), 'numpy.std', 'onp.std', (['pseudo_outcome'], {}), '(pseudo_outcome)\n', (22770, 22786), True, 'import numpy as onp\n'), ((27821, 27857), 'catenets.logger.debug', 'log.debug', (['"""Training propensity net"""'], {}), "('Training propensity net')\n", (27830, 27857), True, 'import catenets.logger as log\n'), ((27902, 28340), 'catenets.models.jax.base.train_output_net_only', 'train_output_net_only', (['X_fit', 'w_fit'], {'binary_y': '(True)', 'n_layers_out': 'n_layers_out', 'n_units_out': 'n_units_out', 'n_layers_r': 'n_layers_r', 'n_units_r': 'n_units_r', 'penalty_l2': 'penalty_l2', 'step_size': 'step_size', 'n_iter': 'n_iter', 'batch_size': 'batch_size', 'val_split_prop': 'val_split_prop', 'early_stopping': 'early_stopping', 'patience': 'patience', 'n_iter_min': 'n_iter_min', 'n_iter_print': 'n_iter_print', 'seed': 'seed', 'nonlin': 'nonlin', 'avg_objective': 'avg_objective'}), '(X_fit, w_fit, binary_y=True, n_layers_out=\n n_layers_out, n_units_out=n_units_out, n_layers_r=n_layers_r, n_units_r\n =n_units_r, penalty_l2=penalty_l2, step_size=step_size, n_iter=n_iter,\n batch_size=batch_size, val_split_prop=val_split_prop, early_stopping=\n early_stopping, patience=patience, n_iter_min=n_iter_min, n_iter_print=\n n_iter_print, seed=seed, nonlin=nonlin, avg_objective=avg_objective)\n', (27923, 28340), False, 'from catenets.models.jax.base import BaseCATENet, train_output_net_only\n'), ((18573, 18640), 'catenets.logger.debug', 'log.debug', (['"""Training first stage with all data (no data splitting)"""'], {}), "('Training first stage with all data (no data splitting)')\n", (18582, 18640), True, 'import catenets.logger as log\n'), ((18708, 18731), 'numpy.ones', 'onp.ones', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (18716, 18731), True, 'import numpy as onp\n'), ((18760, 18783), 'numpy.ones', 'onp.ones', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (18768, 18783), True, 'import numpy as onp\n'), ((18818, 18890), 'catenets.logger.debug', 'log.debug', (['"""Training first stage with half of the data (data splitting)"""'], {}), "('Training first stage with half of the data (data splitting)')\n", (18827, 18890), True, 'import catenets.logger as log\n'), ((19025, 19049), 'numpy.zeros', 'onp.zeros', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (19034, 19049), True, 'import numpy as onp\n'), ((20515, 20532), 'numpy.zeros', 'onp.zeros', (['(n, 1)'], {}), '((n, 1))\n', (20524, 20532), True, 'import numpy as onp\n'), ((20534, 20551), 'numpy.zeros', 'onp.zeros', (['(n, 1)'], {}), '((n, 1))\n', (20543, 20551), True, 'import numpy as onp\n'), ((20553, 20570), 'numpy.zeros', 'onp.zeros', (['(n, 1)'], {}), '((n, 1))\n', (20562, 20570), True, 'import numpy as onp\n'), ((20800, 20841), 'catenets.logger.debug', 'log.debug', (['f"""Training fold {fold_count}."""'], {}), "(f'Training fold {fold_count}.')\n", (20809, 20841), True, 'import catenets.logger as log\n'), ((20915, 20939), 'numpy.zeros', 'onp.zeros', (['n'], {'dtype': 'bool'}), '(n, dtype=bool)\n', (20924, 20939), True, 'import numpy as onp\n'), ((18979, 18995), 'numpy.round', 'onp.round', (['(n / 2)'], {}), '(n / 2)\n', (18988, 18995), True, 'import numpy as onp\n')] |
import random
from plotting_utils import plot_spectrogram_to_numpy, image_for_logger, plot_to_image
import numpy as np
import tensorflow as tf
class GParrotLogger():
def __init__(self, logdir, ali_path='ali'):
# super(ParrotLogger, self).__init__(logdir)
self.writer = tf.summary.create_file_writer(logdir)
def log_training(self, train_loss, loss_list, accuracy_list, grad_norm, learning_rate, duration, iteration):
(speaker_encoder_loss, gender_autoencoder_loss, gender_classification_loss, gender_adv_loss,
gender_autoencoder_destandardized_loss) = loss_list
speaker_encoder_acc, gender_classification_acc = accuracy_list
with self.writer.as_default():
tf.summary.scalar("training.loss", train_loss, iteration)
tf.summary.scalar("training.loss.spenc", speaker_encoder_loss, iteration)
tf.summary.scalar("training.loss.gauto", gender_autoencoder_loss, iteration)
tf.summary.scalar("training.loss.gautotrue", gender_autoencoder_destandardized_loss, iteration)
tf.summary.scalar("training.loss.gcla", gender_classification_loss, iteration)
tf.summary.scalar("training.loss.gadv", gender_adv_loss, iteration)
tf.summary.scalar('training.acc.spenc', speaker_encoder_acc, iteration)
tf.summary.scalar('training.acc.gcla', gender_classification_acc, iteration)
tf.summary.scalar("grad.norm", grad_norm, iteration)
tf.summary.scalar("learning.rate", learning_rate, iteration)
tf.summary.scalar("duration", duration, iteration)
self.writer.flush()
def log_validation(self, val_loss, loss_list, accuracy_list, iteration):
(speaker_encoder_loss, gender_autoencoder_loss, gender_classification_loss, gender_adv_loss,
gender_autoencoder_destandardized_loss) = loss_list
speaker_encoder_acc, gender_classification_acc = accuracy_list
with self.writer.as_default():
tf.summary.scalar("validation.loss", val_loss, iteration)
tf.summary.scalar("validation.loss.spenc", speaker_encoder_loss, iteration)
tf.summary.scalar("validation.loss.gauto", gender_autoencoder_loss, iteration)
tf.summary.scalar("validation.loss.gautotrue", gender_autoencoder_destandardized_loss, iteration)
tf.summary.scalar("validation.loss.gcla", gender_classification_loss, iteration)
tf.summary.scalar("validation.loss.gadv", gender_adv_loss, iteration)
tf.summary.scalar('validation.acc.spenc', speaker_encoder_acc, iteration)
tf.summary.scalar('validation.acc.gcla', gender_classification_acc, iteration)
self.writer.flush()
| [
"tensorflow.summary.scalar",
"tensorflow.summary.create_file_writer"
] | [((292, 329), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (321, 329), True, 'import tensorflow as tf\n'), ((729, 786), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss"""', 'train_loss', 'iteration'], {}), "('training.loss', train_loss, iteration)\n", (746, 786), True, 'import tensorflow as tf\n'), ((799, 872), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss.spenc"""', 'speaker_encoder_loss', 'iteration'], {}), "('training.loss.spenc', speaker_encoder_loss, iteration)\n", (816, 872), True, 'import tensorflow as tf\n'), ((885, 961), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss.gauto"""', 'gender_autoencoder_loss', 'iteration'], {}), "('training.loss.gauto', gender_autoencoder_loss, iteration)\n", (902, 961), True, 'import tensorflow as tf\n'), ((974, 1073), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss.gautotrue"""', 'gender_autoencoder_destandardized_loss', 'iteration'], {}), "('training.loss.gautotrue',\n gender_autoencoder_destandardized_loss, iteration)\n", (991, 1073), True, 'import tensorflow as tf\n'), ((1082, 1160), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss.gcla"""', 'gender_classification_loss', 'iteration'], {}), "('training.loss.gcla', gender_classification_loss, iteration)\n", (1099, 1160), True, 'import tensorflow as tf\n'), ((1173, 1240), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.loss.gadv"""', 'gender_adv_loss', 'iteration'], {}), "('training.loss.gadv', gender_adv_loss, iteration)\n", (1190, 1240), True, 'import tensorflow as tf\n'), ((1254, 1325), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.acc.spenc"""', 'speaker_encoder_acc', 'iteration'], {}), "('training.acc.spenc', speaker_encoder_acc, iteration)\n", (1271, 1325), True, 'import tensorflow as tf\n'), ((1338, 1414), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""training.acc.gcla"""', 'gender_classification_acc', 'iteration'], {}), "('training.acc.gcla', gender_classification_acc, iteration)\n", (1355, 1414), True, 'import tensorflow as tf\n'), ((1428, 1480), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""grad.norm"""', 'grad_norm', 'iteration'], {}), "('grad.norm', grad_norm, iteration)\n", (1445, 1480), True, 'import tensorflow as tf\n'), ((1493, 1553), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""learning.rate"""', 'learning_rate', 'iteration'], {}), "('learning.rate', learning_rate, iteration)\n", (1510, 1553), True, 'import tensorflow as tf\n'), ((1566, 1616), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""duration"""', 'duration', 'iteration'], {}), "('duration', duration, iteration)\n", (1583, 1616), True, 'import tensorflow as tf\n'), ((2011, 2068), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss"""', 'val_loss', 'iteration'], {}), "('validation.loss', val_loss, iteration)\n", (2028, 2068), True, 'import tensorflow as tf\n'), ((2081, 2156), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss.spenc"""', 'speaker_encoder_loss', 'iteration'], {}), "('validation.loss.spenc', speaker_encoder_loss, iteration)\n", (2098, 2156), True, 'import tensorflow as tf\n'), ((2169, 2247), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss.gauto"""', 'gender_autoencoder_loss', 'iteration'], {}), "('validation.loss.gauto', gender_autoencoder_loss, iteration)\n", (2186, 2247), True, 'import tensorflow as tf\n'), ((2260, 2361), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss.gautotrue"""', 'gender_autoencoder_destandardized_loss', 'iteration'], {}), "('validation.loss.gautotrue',\n gender_autoencoder_destandardized_loss, iteration)\n", (2277, 2361), True, 'import tensorflow as tf\n'), ((2370, 2455), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss.gcla"""', 'gender_classification_loss', 'iteration'], {}), "('validation.loss.gcla', gender_classification_loss, iteration\n )\n", (2387, 2455), True, 'import tensorflow as tf\n'), ((2463, 2532), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.loss.gadv"""', 'gender_adv_loss', 'iteration'], {}), "('validation.loss.gadv', gender_adv_loss, iteration)\n", (2480, 2532), True, 'import tensorflow as tf\n'), ((2546, 2619), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.acc.spenc"""', 'speaker_encoder_acc', 'iteration'], {}), "('validation.acc.spenc', speaker_encoder_acc, iteration)\n", (2563, 2619), True, 'import tensorflow as tf\n'), ((2632, 2710), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation.acc.gcla"""', 'gender_classification_acc', 'iteration'], {}), "('validation.acc.gcla', gender_classification_acc, iteration)\n", (2649, 2710), True, 'import tensorflow as tf\n')] |
import pyspiel
game = pyspiel.load_game('bridge(use_double_dummy_result=false)')
line = '30 32 10 35 50 45 21 7 1 42 39 43 0 16 40 20 36 15 22 44 26 6 4 51 47 46 25 14 29 5 34 11 49 31 37 9 41 13 24 8 28 17 48 23 33 18 3 19 38 2 27 12 56 57 52 63 52 52 52 0 32 48 8 3 51 47 15 44 28 16 4 14 50 2 10 49 5 37 9 36 31 24 20 46 22 12 26 13 25 19 1 43 41 17 27 7 33 45 39 40 23 29 6 11 30 18 21 35 38 42 34'
actions = (int(x) for x in line.split(' '))
state = game.new_initial_state()
for a in actions: state.apply_action(a)
print(state)
| [
"pyspiel.load_game"
] | [((22, 80), 'pyspiel.load_game', 'pyspiel.load_game', (['"""bridge(use_double_dummy_result=false)"""'], {}), "('bridge(use_double_dummy_result=false)')\n", (39, 80), False, 'import pyspiel\n')] |
import pytest
from gorgona.stages.cleaners import NumberCleaner
@pytest.fixture()
def setup_number_cleaner():
nc = NumberCleaner(
'',
'',
)
return nc
def test_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("7") == ""
def test_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3") == ""
def test_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("9'5") == ""
def test_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("0'257175") == ""
def test_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("9`9") == ""
def test_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("0`985776") == ""
def test_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5 6") == ""
def test_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3 839118") == ""
def test_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("4k6") == ""
def test_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3k504421") == ""
def test_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("4к4") == ""
def test_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("5к117864") == ""
def test_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("774464") == ""
def test_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("35655") == ""
def test_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("249910'9") == ""
def test_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("480142'838693") == ""
def test_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("154095`1") == ""
def test_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("85818`184705") == ""
def test_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("306485 3") == ""
def test_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("22721 546337") == ""
def test_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("464830k0") == ""
def test_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("955186k918058") == ""
def test_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("570511к2") == ""
def test_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("564964к869484") == ""
def test_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-4") == ""
def test_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-5") == ""
def test_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0'0") == ""
def test_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-8'803962") == ""
def test_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0`5") == ""
def test_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-7`895475") == ""
def test_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-9 8") == ""
def test_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-8 551966") == ""
def test_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-2k5") == ""
def test_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3k484318") == ""
def test_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-2к5") == ""
def test_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3к283697") == ""
def test_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-138166") == ""
def test_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-94352") == ""
def test_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-473778'5") == ""
def test_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-787864'453129") == ""
def test_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-911004`4") == ""
def test_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-392620`715189") == ""
def test_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-908466 6") == ""
def test_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-279418 645330") == ""
def test_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-591608k5") == ""
def test_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-997435k133244") == ""
def test_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-172174к1") == ""
def test_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-733910к513370") == ""
def test_left_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 4") == "hello "
def test_left_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 7") == "hello "
def test_left_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 3'5") == "hello "
def test_left_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 1'414237") == "hello "
def test_left_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 2`5") == "hello "
def test_left_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 6`792669") == "hello "
def test_left_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 8 6") == "hello "
def test_left_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 4 732535") == "hello "
def test_left_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 7k2") == "hello "
def test_left_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 9k798422") == "hello "
def test_left_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 0к2") == "hello "
def test_left_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 6к449708") == "hello "
def test_left_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 84908") == "hello "
def test_left_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 434178") == "hello "
def test_left_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 580178'5") == "hello "
def test_left_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 403087'446030") == "hello "
def test_left_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 99510`9") == "hello "
def test_left_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 880343`699877") == "hello "
def test_left_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 525007 2") == "hello "
def test_left_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 872947 296824") == "hello "
def test_left_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 450966k4") == "hello "
def test_left_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 993633k963503") == "hello "
def test_left_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 902081к2") == "hello "
def test_left_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 398410к5738") == "hello "
def test_left_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6") == "hello "
def test_left_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -6") == "hello "
def test_left_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6'2") == "hello "
def test_left_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -3'759377") == "hello "
def test_left_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -7`1") == "hello "
def test_left_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1`502604") == "hello "
def test_left_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -2 3") == "hello "
def test_left_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1 393569") == "hello "
def test_left_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -6k3") == "hello "
def test_left_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1k432422") == "hello "
def test_left_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -5к5") == "hello "
def test_left_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1к68404") == "hello "
def test_left_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -518862") == "hello "
def test_left_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -311825") == "hello "
def test_left_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -13646'6") == "hello "
def test_left_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -155588'658068") == "hello "
def test_left_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -902010`6") == "hello "
def test_left_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -339050`817304") == "hello "
def test_left_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -923620 6") == "hello "
def test_left_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -277075 908827") == "hello "
def test_left_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -770630k5") == "hello "
def test_left_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -543724k219469") == "hello "
def test_left_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -118460к2") == "hello "
def test_left_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -159072к256757") == "hello "
def test_right_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("2 hello") == " hello"
def test_right_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("1 hello") == " hello"
def test_right_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("6'4 hello") == " hello"
def test_right_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3'58431 hello") == " hello"
def test_right_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("0`5 hello") == " hello"
def test_right_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("5`155738 hello") == " hello"
def test_right_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5 3 hello") == " hello"
def test_right_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("2 912797 hello") == " hello"
def test_right_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("5k3 hello") == " hello"
def test_right_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("9k911768 hello") == " hello"
def test_right_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("3к3 hello") == " hello"
def test_right_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("3к750248 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("42678 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("215188 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("455258'3 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("806580'611928 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("479352`5 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("519252`685635 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("928184 7 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("489262 493403 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("34773k1 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("675960k827611 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("876524к5 hello") == " hello"
def test_right_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("55243к431074 hello") == " hello"
def test_right_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-7 hello") == " hello"
def test_right_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-1 hello") == " hello"
def test_right_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-5'2 hello") == " hello"
def test_right_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-9'814320 hello") == " hello"
def test_right_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-0`8 hello") == " hello"
def test_right_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3`877194 hello") == " hello"
def test_right_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-8 6 hello") == " hello"
def test_right_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-3 873345 hello") == " hello"
def test_right_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-8k9 hello") == " hello"
def test_right_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-5k346049 hello") == " hello"
def test_right_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-4к6 hello") == " hello"
def test_right_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-9к703473 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-190239 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-839965 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-517738'9 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-764801'614671 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-634963`9 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-372948`939025 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-760889 7 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-7831 504330 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-837557k3 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-195729k572621 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("-355848к0 hello") == " hello"
def test_right_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("-665426к392704 hello") == " hello"
def test_both_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 4 world") == "hello world"
def test_both_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 8 world") == "hello world"
def test_both_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 6'2 world") == "hello world"
def test_both_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 3'622671 world") == "hello world"
def test_both_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 6`0 world") == "hello world"
def test_both_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 8`757195 world") == "hello world"
def test_both_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 0 1 world") == "hello world"
def test_both_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 7 862462 world") == "hello world"
def test_both_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 8k5 world") == "hello world"
def test_both_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 3k314471 world") == "hello world"
def test_both_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 2к5 world") == "hello world"
def test_both_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 9к486783 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 805686 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 369355 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 163343'0 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 461408'736785 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 864015`2 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 647078`653487 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 222917 9 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 564211 641276 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 440821k8 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 845780k860446 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello 81289к1 world") == "hello world"
def test_both_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello 146234к484167 world") == "hello world"
def test_both_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4 world") == "hello world"
def test_both_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -0 world") == "hello world"
def test_both_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4'9 world") == "hello world"
def test_both_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -5'387080 world") == "hello world"
def test_both_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -3`8 world") == "hello world"
def test_both_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -0`385330 world") == "hello world"
def test_both_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -7 7 world") == "hello world"
def test_both_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -1 245555 world") == "hello world"
def test_both_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -4k4 world") == "hello world"
def test_both_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -7k737481 world") == "hello world"
def test_both_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -3к8 world") == "hello world"
def test_both_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -4к979649 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -579549 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -521868 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -494030'8 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -997018'388418 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -48935`6 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -115491`848265 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -373023 5 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -526547 383697 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -304461k5 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -308120k521264 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("hello -230268к9 world") == "hello world"
def test_both_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("hello -695525к628100 world") == "hello world"
def test_inside_text_positive_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he4llo") == "he4llo"
def test_inside_text_positive_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he8llo") == "he8llo"
def test_inside_text_positive_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he0'8llo") == "he0'8llo"
def test_inside_text_positive_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he8'503290llo") == "he8'503290llo"
def test_inside_text_positive_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he3`3llo") == "he3`3llo"
def test_inside_text_positive_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he0`179192llo") == "he0`179192llo"
def test_inside_text_positive_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he2 4llo") == "he2 4llo"
def test_inside_text_positive_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he3 135087llo") == "he3 135087llo"
def test_inside_text_positive_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he8k4llo") == "he8k4llo"
def test_inside_text_positive_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he0k657610llo") == "he0k657610llo"
def test_inside_text_positive_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he9к2llo") == "he9к2llo"
def test_inside_text_positive_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he6к839529llo") == "he6к839529llo"
def test_inside_text_positive_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he513934llo") == "he513934llo"
def test_inside_text_positive_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he424141llo") == "he424141llo"
def test_inside_text_positive_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he757949'6llo") == "he757949'6llo"
def test_inside_text_positive_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he650035'989071llo") == "he650035'989071llo"
def test_inside_text_positive_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he849767`6llo") == "he849767`6llo"
def test_inside_text_positive_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he234327`915339llo") == "he234327`915339llo"
def test_inside_text_positive_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he703293 5llo") == "he703293 5llo"
def test_inside_text_positive_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he409856 70023llo") == "he409856 70023llo"
def test_inside_text_positive_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he744620k6llo") == "he744620k6llo"
def test_inside_text_positive_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he743290k231362llo") == "he743290k231362llo"
def test_inside_text_positive_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he791511к3llo") == "he791511к3llo"
def test_inside_text_positive_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he401092к788202llo") == "he401092к788202llo"
def test_inside_text_negative_integer_single_digit_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-4llo") == "he-4llo"
def test_inside_text_negative_integer_single_digit_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-8llo") == "he-8llo"
def test_inside_text_negative_integer_single_digit_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-3'3llo") == "he-3'3llo"
def test_inside_text_negative_integer_single_digit_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-4'290601llo") == "he-4'290601llo"
def test_inside_text_negative_integer_single_digit_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-7`0llo") == "he-7`0llo"
def test_inside_text_negative_integer_single_digit_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-6`707325llo") == "he-6`707325llo"
def test_inside_text_negative_integer_single_digit_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-9 3llo") == "he-9 3llo"
def test_inside_text_negative_integer_single_digit_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-0 183754llo") == "he-0 183754llo"
def test_inside_text_negative_integer_single_digit_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-1k4llo") == "he-1k4llo"
def test_inside_text_negative_integer_single_digit_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-3k878581llo") == "he-3k878581llo"
def test_inside_text_negative_integer_single_digit_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-0к0llo") == "he-0к0llo"
def test_inside_text_negative_integer_single_digit_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-6к377555llo") == "he-6к377555llo"
def test_inside_text_negative_integer_multiple_digits_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-598986llo") == "he-598986llo"
def test_inside_text_negative_integer_multiple_digits_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-393398llo") == "he-393398llo"
def test_inside_text_negative_integer_multiple_digits_quote_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-890636'7llo") == "he-890636'7llo"
def test_inside_text_negative_integer_multiple_digits_quote_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-834451'288314llo") == "he-834451'288314llo"
def test_inside_text_negative_integer_multiple_digits_apostrophe_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-347856`8llo") == "he-347856`8llo"
def test_inside_text_negative_integer_multiple_digits_apostrophe_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-504475`759252llo") == "he-504475`759252llo"
def test_inside_text_negative_integer_multiple_digits_space_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-349749 9llo") == "he-349749 9llo"
def test_inside_text_negative_integer_multiple_digits_space_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-184038 68144llo") == "he-184038 68144llo"
def test_inside_text_negative_integer_multiple_digits_eng_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-289290k6llo") == "he-289290k6llo"
def test_inside_text_negative_integer_multiple_digits_eng_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-964399k733553llo") == "he-964399k733553llo"
def test_inside_text_negative_integer_multiple_digits_rus_k_single_digit(setup_number_cleaner):
assert setup_number_cleaner("he-63989к5llo") == "he-63989к5llo"
def test_inside_text_negative_integer_multiple_digits_rus_k_multiple_digits(setup_number_cleaner):
assert setup_number_cleaner("he-403175к774771llo") == "he-403175к774771llo"
def test_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("2.9") == ""
def test_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("8.569333") == ""
def test_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("5,0") == ""
def test_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("1,780518") == ""
def test_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("785313.5") == ""
def test_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("537221.74655") == ""
def test_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("391240,8") == ""
def test_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("181004,460352") == ""
def test_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-9.6") == ""
def test_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-8.258030") == ""
def test_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-7,1") == ""
def test_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-0,885164") == ""
def test_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-864605.4") == ""
def test_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-355839.416791") == ""
def test_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-578243,4") == ""
def test_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-98767,817853") == ""
def test_left_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 4.6") == "hello "
def test_left_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 1.74914") == "hello "
def test_left_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 3,5") == "hello "
def test_left_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 2,8995") == "hello "
def test_left_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 128684.7") == "hello "
def test_left_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 832606.932249") == "hello "
def test_left_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 377802,4") == "hello "
def test_left_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 762367,135153") == "hello "
def test_left_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.8") == "hello "
def test_left_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -5.792708") == "hello "
def test_left_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -2,5") == "hello "
def test_left_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -5,888953") == "hello "
def test_left_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -486940.5") == "hello "
def test_left_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -716193.653169") == "hello "
def test_left_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -892150,7") == "hello "
def test_left_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -825361,420340") == "hello "
def test_right_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("9.7 hello") == " hello"
def test_right_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("8.668371 hello") == " hello"
def test_right_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("6,9 hello") == " hello"
def test_right_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("9,934089 hello") == " hello"
def test_right_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("243369.1 hello") == " hello"
def test_right_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("424756.17786 hello") == " hello"
def test_right_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("922173,3 hello") == " hello"
def test_right_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("829857,999977 hello") == " hello"
def test_right_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-1.8 hello") == " hello"
def test_right_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-5.743926 hello") == " hello"
def test_right_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-1,9 hello") == " hello"
def test_right_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-3,740022 hello") == " hello"
def test_right_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-746442.5 hello") == " hello"
def test_right_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-796358.785568 hello") == " hello"
def test_right_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("-162965,8 hello") == " hello"
def test_right_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("-510271,12306 hello") == " hello"
def test_both_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 2.6 world") == "hello world"
def test_both_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 6.756683 world") == "hello world"
def test_both_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 6,3 world") == "hello world"
def test_both_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 1,84108 world") == "hello world"
def test_both_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 430035.4 world") == "hello world"
def test_both_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 547739.554345 world") == "hello world"
def test_both_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 26171,1 world") == "hello world"
def test_both_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello 666557,952575 world") == "hello world"
def test_both_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.0 world") == "hello world"
def test_both_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -1.445504 world") == "hello world"
def test_both_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -7,7 world") == "hello world"
def test_both_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -3,87658 world") == "hello world"
def test_both_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -477476.4 world") == "hello world"
def test_both_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -541300.867811 world") == "hello world"
def test_both_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -708842,4 world") == "hello world"
def test_both_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("hello -741041,952275 world") == "hello world"
def test_inside_text_positive_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he4.9llo") == "he4.9llo"
def test_inside_text_positive_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he4.605648llo") == "he4.605648llo"
def test_inside_text_positive_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he7,6llo") == "he7,6llo"
def test_inside_text_positive_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he1,640808llo") == "he1,640808llo"
def test_inside_text_positive_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he311010.5llo") == "he311010.5llo"
def test_inside_text_positive_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he593407.960145llo") == "he593407.960145llo"
def test_inside_text_positive_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he318574,7llo") == "he318574,7llo"
def test_inside_text_positive_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he113354,321762llo") == "he113354,321762llo"
def test_inside_text_negative_float_single_digit_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-1.7llo") == "he-1.7llo"
def test_inside_text_negative_float_single_digit_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-5.347666llo") == "he-5.347666llo"
def test_inside_text_negative_float_single_digit_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-1,5llo") == "he-1,5llo"
def test_inside_text_negative_float_single_digit_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-0,785082llo") == "he-0,785082llo"
def test_inside_text_negative_float_multiple_digits_dot_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-19847.2llo") == "he-19847.2llo"
def test_inside_text_negative_float_multiple_digits_dot_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-163691.435539llo") == "he-163691.435539llo"
def test_inside_text_negative_float_multiple_digits_comma_with_single_digit_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-416740,2llo") == "he-416740,2llo"
def test_inside_text_negative_float_multiple_digits_comma_with_multiple_digits_fraction(setup_number_cleaner):
assert setup_number_cleaner("he-117470,870470llo") == "he-117470,870470llo"
| [
"pytest.fixture",
"gorgona.stages.cleaners.NumberCleaner"
] | [((68, 84), 'pytest.fixture', 'pytest.fixture', ([], {}), '()\n', (82, 84), False, 'import pytest\n'), ((122, 143), 'gorgona.stages.cleaners.NumberCleaner', 'NumberCleaner', (['""""""', '""""""'], {}), "('', '')\n", (135, 143), False, 'from gorgona.stages.cleaners import NumberCleaner\n')] |
#! /usr/bin/env python3
import argparse
import atexit
import json
import os
import subprocess
import sys
import time
import unittest
from test_device import (
Bitcoind,
DeviceEmulator,
DeviceTestCase,
TestDeviceConnect,
TestGetKeypool,
TestGetDescriptors,
TestSignTx,
)
from hwilib.devices.digitalbitbox import BitboxSimulator, send_plain, send_encrypt
class BitBox01Emulator(DeviceEmulator):
def __init__(self, simulator):
try:
os.unlink('bitbox-emulator.stderr')
except FileNotFoundError:
pass
self.simulator = simulator
self.bitbox_log = None
self.simulator_proc = None
self.type = 'digitalbitbox'
self.path = 'udp:127.0.0.1:35345'
self.fingerprint = '<PASSWORD>'
self.master_xpub = "<KEY>"
self.password = "<PASSWORD>"
self.supports_ms_display = False
self.supports_xpub_ms_display = False
self.supports_unsorted_ms = False
self.supports_taproot = False
self.strict_bip48 = False
self.include_xpubs = False
self.supports_device_multiple_multisig = True
def start(self):
super().start()
self.bitbox_log = open('bitbox-emulator.stderr', 'a')
# Start the Digital bitbox simulator
self.simulator_proc = subprocess.Popen(
[
'./' + os.path.basename(self.simulator),
'../../tests/sd_files/'
],
cwd=os.path.dirname(self.simulator),
stderr=self.bitbox_log
)
# Wait for simulator to be up
while True:
try:
self.dev = BitboxSimulator('127.0.0.1', 35345)
reply = send_plain(b'{"password":"<PASSWORD>"}', self.dev)
if 'error' not in reply:
break
except Exception:
pass
time.sleep(0.5)
# Set password and load from backup
send_encrypt(json.dumps({"seed": {"source": "backup", "filename": "test_backup.pdf", "key": "key"}}), '0000', self.dev)
atexit.register(self.stop)
def stop(self):
super().stop()
self.simulator_proc.terminate()
self.simulator_proc.wait()
self.bitbox_log.close()
atexit.unregister(self.stop)
# DigitalBitbox specific management command tests
class TestDBBManCommands(DeviceTestCase):
def test_restore(self):
result = self.do_command(self.dev_args + ['-i', 'restore'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not support restoring via software')
self.assertEqual(result['code'], -9)
def test_pin(self):
result = self.do_command(self.dev_args + ['promptpin'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not need a PIN sent from the host')
self.assertEqual(result['code'], -9)
result = self.do_command(self.dev_args + ['sendpin', '1234'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not need a PIN sent from the host')
self.assertEqual(result['code'], -9)
def test_display(self):
result = self.do_command(self.dev_args + ['displayaddress', '--path', 'm/0h'])
self.assertIn('error', result)
self.assertIn('code', result)
self.assertEqual(result['error'], 'The Digital Bitbox does not have a screen to display addresses on')
self.assertEqual(result['code'], -9)
def test_setup_wipe(self):
# Device is init, setup should fail
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', '<PASSWORD>'])
self.assertEquals(result['code'], -10)
self.assertEquals(result['error'], 'Device is already initialized. Use wipe first and try again')
# Wipe
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
# Check arguments
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a new Digital Bitbox wallet must be specified and cannot be empty')
result = self.do_command(self.dev_args + ['-i', 'setup', '--backup_passphrase', '<PASSWORD>'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a new Digital Bitbox wallet must be specified and cannot be empty')
# Setup
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', '<PASSWORD>'])
self.assertTrue(result['success'])
# Reset back to original
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
send_plain(b'{"password":"<PASSWORD>"}', self.emulator.dev)
send_encrypt(json.dumps({"seed": {"source": "backup", "filename": "test_backup.pdf", "key": "key"}}), '0000', self.emulator.dev)
# Make sure device is init, setup should fail
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'setup_test', '--backup_passphrase', '<PASSWORD>'])
self.assertEquals(result['code'], -10)
self.assertEquals(result['error'], 'Device is already initialized. Use wipe first and try again')
def test_backup(self):
# Check arguments
result = self.do_command(self.dev_args + ['backup', '--label', 'backup_test'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a Digital Bitbox backup must be specified and cannot be empty')
result = self.do_command(self.dev_args + ['backup', '--backup_passphrase', 'key'])
self.assertEquals(result['code'], -7)
self.assertEquals(result['error'], 'The label and backup passphrase for a Digital Bitbox backup must be specified and cannot be empty')
# Wipe
result = self.do_command(self.dev_args + ['wipe'])
self.assertTrue(result['success'])
# Setup
result = self.do_command(self.dev_args + ['-i', 'setup', '--label', 'backup_test', '--backup_passphrase', '<PASSWORD>'])
self.assertTrue(result['success'])
# make the backup
result = self.do_command(self.dev_args + ['backup', '--label', 'backup_test_backup', '--backup_passphrase', 'testpass'])
self.assertTrue(result['success'])
class TestBitboxGetXpub(DeviceTestCase):
def test_getxpub(self):
self.dev_args.remove('--chain')
self.dev_args.remove('test')
result = self.do_command(self.dev_args + ['--expert', 'getxpub', 'm/44h/0h/0h/3'])
self.assertEqual(result['xpub'], '<KEY>')
self.assertFalse(result['testnet'])
self.assertFalse(result['private'])
self.assertEqual(result['depth'], 4)
self.assertEqual(result['parent_fingerprint'], '31d5e5ea')
self.assertEqual(result['child_num'], 3)
self.assertEqual(result['chaincode'], '7062818c752f878bf96ca668f77630452c3fa033b7415eed3ff568e04ada8104')
self.assertEqual(result['pubkey'], '029078c9ad8421afd958d7bc054a0952874923e2586fc9375604f0479a354ea193')
def digitalbitbox_test_suite(simulator, bitcoind, interface):
dev_emulator = BitBox01Emulator(simulator)
signtx_cases = [
(["legacy"], ["legacy"], True, True),
(["segwit"], ["segwit"], True, True),
(["legacy", "segwit"], ["legacy", "segwit"], True, True),
]
# Generic Device tests
suite = unittest.TestSuite()
suite.addTest(DeviceTestCase.parameterize(TestDBBManCommands, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestBitboxGetXpub, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestDeviceConnect, bitcoind, emulator=dev_emulator, interface=interface, detect_type="digitalbitbox"))
suite.addTest(DeviceTestCase.parameterize(TestDeviceConnect, bitcoind, emulator=dev_emulator, interface=interface, detect_type="digitalbitbox_01_simulator"))
suite.addTest(DeviceTestCase.parameterize(TestGetDescriptors, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestGetKeypool, bitcoind, emulator=dev_emulator, interface=interface))
suite.addTest(DeviceTestCase.parameterize(TestSignTx, bitcoind, emulator=dev_emulator, interface=interface, signtx_cases=signtx_cases))
result = unittest.TextTestRunner(stream=sys.stdout, verbosity=2).run(suite)
return result.wasSuccessful()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Test Digital Bitbox implementation')
parser.add_argument('simulator', help='Path to simulator binary')
parser.add_argument('bitcoind', help='Path to bitcoind binary')
parser.add_argument('--interface', help='Which interface to send commands over', choices=['library', 'cli', 'bindist'], default='library')
args = parser.parse_args()
# Start bitcoind
bitcoind = Bitcoind.create(args.bitcoind)
sys.exit(not digitalbitbox_test_suite(args.simulator, bitcoind, args.interface))
| [
"unittest.TestSuite",
"argparse.ArgumentParser",
"test_device.DeviceTestCase.parameterize",
"test_device.Bitcoind.create",
"json.dumps",
"unittest.TextTestRunner",
"time.sleep",
"hwilib.devices.digitalbitbox.send_plain",
"os.path.dirname",
"hwilib.devices.digitalbitbox.BitboxSimulator",
"os.unli... | [((7871, 7891), 'unittest.TestSuite', 'unittest.TestSuite', ([], {}), '()\n', (7889, 7891), False, 'import unittest\n'), ((8978, 9051), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Test Digital Bitbox implementation"""'}), "(description='Test Digital Bitbox implementation')\n", (9001, 9051), False, 'import argparse\n'), ((9401, 9431), 'test_device.Bitcoind.create', 'Bitcoind.create', (['args.bitcoind'], {}), '(args.bitcoind)\n', (9416, 9431), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((2115, 2141), 'atexit.register', 'atexit.register', (['self.stop'], {}), '(self.stop)\n', (2130, 2141), False, 'import atexit\n'), ((2301, 2329), 'atexit.unregister', 'atexit.unregister', (['self.stop'], {}), '(self.stop)\n', (2318, 2329), False, 'import atexit\n'), ((5120, 5179), 'hwilib.devices.digitalbitbox.send_plain', 'send_plain', (['b\'{"password":"<PASSWORD>"}\'', 'self.emulator.dev'], {}), '(b\'{"password":"<PASSWORD>"}\', self.emulator.dev)\n', (5130, 5179), False, 'from hwilib.devices.digitalbitbox import BitboxSimulator, send_plain, send_encrypt\n'), ((7910, 8016), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestDBBManCommands', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface'}), '(TestDBBManCommands, bitcoind, emulator=\n dev_emulator, interface=interface)\n', (7937, 8016), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8031, 8136), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestBitboxGetXpub', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface'}), '(TestBitboxGetXpub, bitcoind, emulator=\n dev_emulator, interface=interface)\n', (8058, 8136), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8151, 8285), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestDeviceConnect', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface', 'detect_type': '"""digitalbitbox"""'}), "(TestDeviceConnect, bitcoind, emulator=\n dev_emulator, interface=interface, detect_type='digitalbitbox')\n", (8178, 8285), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8300, 8452), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestDeviceConnect', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface', 'detect_type': '"""digitalbitbox_01_simulator"""'}), "(TestDeviceConnect, bitcoind, emulator=\n dev_emulator, interface=interface, detect_type='digitalbitbox_01_simulator'\n )\n", (8327, 8452), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8462, 8568), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestGetDescriptors', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface'}), '(TestGetDescriptors, bitcoind, emulator=\n dev_emulator, interface=interface)\n', (8489, 8568), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8583, 8684), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestGetKeypool', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface'}), '(TestGetKeypool, bitcoind, emulator=dev_emulator,\n interface=interface)\n', (8610, 8684), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((8700, 8824), 'test_device.DeviceTestCase.parameterize', 'DeviceTestCase.parameterize', (['TestSignTx', 'bitcoind'], {'emulator': 'dev_emulator', 'interface': 'interface', 'signtx_cases': 'signtx_cases'}), '(TestSignTx, bitcoind, emulator=dev_emulator,\n interface=interface, signtx_cases=signtx_cases)\n', (8727, 8824), False, 'from test_device import Bitcoind, DeviceEmulator, DeviceTestCase, TestDeviceConnect, TestGetKeypool, TestGetDescriptors, TestSignTx\n'), ((485, 520), 'os.unlink', 'os.unlink', (['"""bitbox-emulator.stderr"""'], {}), "('bitbox-emulator.stderr')\n", (494, 520), False, 'import os\n'), ((1917, 1932), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1927, 1932), False, 'import time\n'), ((1999, 2090), 'json.dumps', 'json.dumps', (["{'seed': {'source': 'backup', 'filename': 'test_backup.pdf', 'key': 'key'}}"], {}), "({'seed': {'source': 'backup', 'filename': 'test_backup.pdf',\n 'key': 'key'}})\n", (2009, 2090), False, 'import json\n'), ((5201, 5292), 'json.dumps', 'json.dumps', (["{'seed': {'source': 'backup', 'filename': 'test_backup.pdf', 'key': 'key'}}"], {}), "({'seed': {'source': 'backup', 'filename': 'test_backup.pdf',\n 'key': 'key'}})\n", (5211, 5292), False, 'import json\n'), ((8836, 8891), 'unittest.TextTestRunner', 'unittest.TextTestRunner', ([], {'stream': 'sys.stdout', 'verbosity': '(2)'}), '(stream=sys.stdout, verbosity=2)\n', (8859, 8891), False, 'import unittest\n'), ((1496, 1527), 'os.path.dirname', 'os.path.dirname', (['self.simulator'], {}), '(self.simulator)\n', (1511, 1527), False, 'import os\n'), ((1676, 1711), 'hwilib.devices.digitalbitbox.BitboxSimulator', 'BitboxSimulator', (['"""127.0.0.1"""', '(35345)'], {}), "('127.0.0.1', 35345)\n", (1691, 1711), False, 'from hwilib.devices.digitalbitbox import BitboxSimulator, send_plain, send_encrypt\n'), ((1736, 1786), 'hwilib.devices.digitalbitbox.send_plain', 'send_plain', (['b\'{"password":"<PASSWORD>"}\'', 'self.dev'], {}), '(b\'{"password":"<PASSWORD>"}\', self.dev)\n', (1746, 1786), False, 'from hwilib.devices.digitalbitbox import BitboxSimulator, send_plain, send_encrypt\n'), ((1391, 1423), 'os.path.basename', 'os.path.basename', (['self.simulator'], {}), '(self.simulator)\n', (1407, 1423), False, 'import os\n')] |
import os
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
extensions = []
templates_path = ['_templates']
source_suffix = ['.rst', '.md']
master_doc = 'index'
project = u'fiubar'
copyright = u'2008-2018, <NAME>'
version = '2.0.0'
release = '2.0.0'
exclude_trees = ['_build']
pygments_style = 'sphinx'
html_static_path = ['_static']
if not on_rtd:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
source_parsers = {
'.md': 'recommonmark.parser.CommonMarkParser',
}
| [
"sphinx_rtd_theme.get_html_theme_path",
"os.environ.get"
] | [((20, 55), 'os.environ.get', 'os.environ.get', (['"""READTHEDOCS"""', 'None'], {}), "('READTHEDOCS', None)\n", (34, 55), False, 'import os\n'), ((443, 481), 'sphinx_rtd_theme.get_html_theme_path', 'sphinx_rtd_theme.get_html_theme_path', ([], {}), '()\n', (479, 481), False, 'import sphinx_rtd_theme\n')] |
from io import StringIO
import itertools
import numpy as np
import numpy.ma as ma
inputfile = './input/day9.txt'
# inputfile = StringIO('''2199943210
# 3987894921
# 9856789892
# 8767896789
# 9899965678''')
def neighbors(ar, i, j):
return {ar[i-1,j],
ar[i+1,j],
ar[i,j-1],
ar[i,j+1]}
a = np.genfromtxt(inputfile, dtype='i', delimiter=1)
nRows, nCols = a.shape
b = np.pad(a, ((1, 1), (1, 1)), constant_values=10)
lowPoints = []
for i, j in itertools.product(range(1, nRows+1), range(1, nCols+1)):
if all(b[i,j] < foo for foo in neighbors(b,i,j)):
lowPoints.append(b[i,j])
print(sum(1+n for n in lowPoints))
| [
"numpy.pad",
"numpy.genfromtxt"
] | [((333, 381), 'numpy.genfromtxt', 'np.genfromtxt', (['inputfile'], {'dtype': '"""i"""', 'delimiter': '(1)'}), "(inputfile, dtype='i', delimiter=1)\n", (346, 381), True, 'import numpy as np\n'), ((409, 456), 'numpy.pad', 'np.pad', (['a', '((1, 1), (1, 1))'], {'constant_values': '(10)'}), '(a, ((1, 1), (1, 1)), constant_values=10)\n', (415, 456), True, 'import numpy as np\n')] |
"""
Copyright 2019 BBC. Licensed under the terms of the Apache License 2.0.
"""
from unittest.mock import Mock
import pytest
from google.cloud.bigquery import Client
from foxglove.connectors.bigquery import BigQueryConnector
@pytest.fixture
def fake_bq_client():
return Mock(spec=Client(project='test_project'))
@pytest.mark.integration
def test_valid_bigquery_connector_init():
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
assert connector.bq_dataset_id
assert connector.bq_table_id
assert connector.bq_client
@pytest.mark.integration
def test_write_truncate_ndjson_file(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
connector.write_truncate_ndjson_file('test_ndjson_fh')
fake_bq_client.load_table_from_file.assert_called_with(
file_obj='test_ndjson_fh',
destination=connector._bq_table,
job_config=connector._job_config
)
@pytest.mark.integration
def test_bq_table(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
_ = connector._bq_table()
connector._bq_dataset.table.assert_called_once()
@pytest.mark.integration
def test_bq_dataset(fake_bq_client):
connector = BigQueryConnector(
'test_dataset_id',
'test_table_id',
'test_role'
)
connector.bq_client = fake_bq_client
_ = connector._bq_dataset()
fake_bq_client.create_dataset.assert_called_once()
def test_bigquery_engine_url_decode():
engine_url='bigquery://projectId=my_project;datasetId=nice_food;tableId=cakes;'
connector = BigQueryConnector(engine_url=engine_url)
project, dataset, table = connector._decode_engine_url()
assert project == 'my_project'
assert dataset == 'nice_food'
assert table == 'cakes'
@pytest.mark.integration
def test_sql_query_with_params():
engine_url='bigquery://projectId=bbc-datalab;datasetId=foxglove_test;tableId=rms_titles;'
connector = BigQueryConnector(engine_url=engine_url)
# check known value in sample data
sql = "SELECT id FROM `bbc-datalab.foxglove_test.rms_titles` WHERE pid=@my_pid"
for row in connector.query(sql=sql, sql_params=[("my_pid", "STRING", "b01qw8tz")]):
assert row.id == 1
| [
"foxglove.connectors.bigquery.BigQueryConnector",
"google.cloud.bigquery.Client"
] | [((406, 472), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', (['"""test_dataset_id"""', '"""test_table_id"""', '"""test_role"""'], {}), "('test_dataset_id', 'test_table_id', 'test_role')\n", (423, 472), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((697, 763), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', (['"""test_dataset_id"""', '"""test_table_id"""', '"""test_role"""'], {}), "('test_dataset_id', 'test_table_id', 'test_role')\n", (714, 763), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((1154, 1220), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', (['"""test_dataset_id"""', '"""test_table_id"""', '"""test_role"""'], {}), "('test_dataset_id', 'test_table_id', 'test_role')\n", (1171, 1220), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((1454, 1520), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', (['"""test_dataset_id"""', '"""test_table_id"""', '"""test_role"""'], {}), "('test_dataset_id', 'test_table_id', 'test_role')\n", (1471, 1520), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((1819, 1859), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', ([], {'engine_url': 'engine_url'}), '(engine_url=engine_url)\n', (1836, 1859), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((2188, 2228), 'foxglove.connectors.bigquery.BigQueryConnector', 'BigQueryConnector', ([], {'engine_url': 'engine_url'}), '(engine_url=engine_url)\n', (2205, 2228), False, 'from foxglove.connectors.bigquery import BigQueryConnector\n'), ((290, 320), 'google.cloud.bigquery.Client', 'Client', ([], {'project': '"""test_project"""'}), "(project='test_project')\n", (296, 320), False, 'from google.cloud.bigquery import Client\n')] |
import sys
#"splchar":["!","@","#","$",".",",",":","%","^","*"]
splchar=[chr(i) for i in range(33,48)]#ASCII spl charecter range from 33-48 and58-65#and i here is the mapping expression ie the thing thet is executed evry iteration
splchar1=[chr(i) for i in range(58,65)]#Instead of explicit declaration of for loop this is better for assigning number and converting to charecter then make it a list
splchar+=splchar1#making all spl char in one list
letter={"cap":"ABCDEFGHIJKLMNOPQRSTUVWXYZ","alpha":"abcdefghijklmnopqrstuvwxyz","digit":"0123456789","white_space":[" ","\t","\n"],
"splchar":splchar}#Dictionary to be used
print("Enter your string with multiple lines\n")
print("Ctrl+D to Terminate input\n")
a=sys.stdin.read()#ctrl+d to finish input#this is for multiline input
x=list(letter.items())#return Datatype dict_item so convert to list which return tuples in list
lineno=[j for j in a if j=="\n"]#list of \n
cap,small,digit,whitespace,spl=0,0,0,0,0#Assign all values to be 0
'''Implementation of switch case(sort of)'''
d1={"cap":"if i in x[j][-1] and x[j][0]=='cap':cap+=1",
"alpha":"if i in x[j][-1] and x[j][0]=='alpha':small+=1",
"digit":"if i in x[j][-1] and x[j][0]=='digit':digit+=1",
"white_space":"if i in x[j][-1] and x[j][0]=='white_space':whitespace+=1",
"splchar":"if i in x[j][-1] and x[j][0]=='splchar':spl+=1"
}
#here to be specific the switch case here uses executable command with shared key with dictionary letter
for i in a:#Check the each charecter of input a
for j in range(0,len(x)):#Accessing the list of key value pair from letter
exec(str(d1.get(x[j][0])))#exec execute string and this access the switch statement
print("\nThe total number of caps",cap,"\nSmall:",small,"\ndigit:",digit,"\nwhitespaces",whitespace,"\nSpecialCharecters",spl,"\nTotal Alphabets",cap+small,
"\nTotal lines", len(lineno))
| [
"sys.stdin.read"
] | [((733, 749), 'sys.stdin.read', 'sys.stdin.read', ([], {}), '()\n', (747, 749), False, 'import sys\n')] |
__author__ = 'ashvinder'
import re
import os
import gc
import logger
import time
from TestInput import TestInputSingleton
from backup.backup_base import BackupBaseTest
from remote.remote_util import RemoteMachineShellConnection
from couchbase_helper.documentgenerator import BlobGenerator
from couchbase_helper.documentgenerator import DocumentGenerator
from memcached.helper.kvstore import KVStore
from membase.api.rest_client import RestConnection, Bucket
from couchbase_helper.data_analysis_helper import *
from memcached.helper.data_helper import VBucketAwareMemcached
from view.spatialquerytests import SimpleDataSet
from view.spatialquerytests import SpatialQueryTests
from membase.helper.spatial_helper import SpatialHelper
from couchbase_helper.cluster import Cluster
from membase.helper.bucket_helper import BucketOperationHelper
from couchbase_helper.document import DesignDocument, View
import copy
class IBRTests(BackupBaseTest):
def setUp(self):
super(IBRTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
gen_load = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
def tearDown(self):
super(IBRTests, self).tearDown()
def restoreAndVerify(self, bucket_names, kvs_before, expected_error=None):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
errors, outputs = self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
errors.extend(outputs)
error_found = False
if expected_error:
for line in errors:
if line.find(expected_error) != -1:
error_found = True
break
self.assertTrue(error_found, "Expected error not found: %s" % expected_error)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if expected_error:
for bucket in self.buckets:
bucket.kvs[1] = KVStore()
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
def verify_dir_structure(self, total_backups, buckets, nodes):
cmd = 'find ' + self.backup_location + ' -type f'
if self.shell.info.type.lower() == 'windows':
cmd = 'cmd.exe /C "dir /s /b C:\\tmp\\backup"'
output, error = self.shell.execute_command(cmd)
self.log.info("output = {0} error = {1}".format(output,error))
if error:
raise Exception('Got error {0}',format(error))
expected_design_json = total_backups * buckets
expected_data_cbb = total_backups * buckets * nodes
expected_meta_json = total_backups * buckets * nodes
expected_failover_json = total_backups * buckets * nodes
timestamp = '\d{4}\-\d{2}\-\d{2}T\d+Z'
pattern_mode = '(full|accu|diff)'
timestamp_backup = timestamp + '\-' + pattern_mode
pattern_bucket = 'bucket-\w+'
pattern_node = 'node\-\d{1,3}\.\d{1,3}\.\d{1,3}.\d{1,3}.+'
pattern_design_json = timestamp + '/|\\\\' + timestamp_backup + \
'/|\\\\' + pattern_bucket
pattern_backup_files = pattern_design_json + '/|\\\\' + pattern_node
data_cbb = 0
failover = 0
meta_json = 0
design_json = 0
for line in output:
if 'data-0000.cbb' in line:
if re.search(pattern_backup_files, line):
data_cbb += 1
if 'failover.json' in line:
if re.search(pattern_backup_files, line):
failover += 1
if self.cb_version[:5] != "4.5.1" and 'meta.json' in line:
if re.search(pattern_backup_files, line):
meta_json += 1
if 'design.json' in line:
if re.search(pattern_design_json, line):
design_json += 1
self.log.info("expected_data_cbb {0} data_cbb {1}"
.format(expected_data_cbb, data_cbb))
self.log.info("expected_failover_json {0} failover {1}"
.format(expected_failover_json, failover))
if self.cb_version[:5] != "4.5.1":
self.log.info("expected_meta_json {0} meta_json {1}"
.format(expected_meta_json, meta_json))
""" add json support later in this test
self.log.info("expected_design_json {0} design_json {1}"
.format(expected_design_json, design_json)) """
if self.cb_version[:5] != "4.5.1":
if data_cbb == expected_data_cbb and failover == expected_failover_json and \
meta_json == expected_meta_json:
# add support later in and design_json == expected_design_json:
return True
else:
if data_cbb == expected_data_cbb and failover == expected_failover_json:
return True
return False
def testFullBackupDirStructure(self):
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testMultipleFullBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(120)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Full Backup')
def testIncrBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleIncrBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.log.info("sleeping for 30 secs")
self.sleep(30)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Incremental Backup')
def testMultipleDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testMultipleIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Incr and Diff Backup')
def testMultipleFullIncrDiffBackupDirStructure(self):
for count in range(10):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
self.sleep(60)
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a full backup
options = self.command_options + [' -m full']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options, delete_backup=False)
self.total_backups += 1
self.sleep(60)
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Combo Full,Incr and Diff Backups')
def testDiffBackupDirStructure(self):
# Update data
gen_update = BlobGenerator('testdata', 'testdata-', self.value_size, end=5)
self._load_all_buckets(self.master, gen_update, "update", 0, 1, self.item_flag, True, batch_size=20000,
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
self.total_backups += 1
if not self.verify_dir_structure(self.total_backups, len(self.buckets), len(self.servers)):
raise Exception('Backup Directory Verification Failed for Differential Backup')
def testIncrementalBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testDifferentialBackup(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Differential backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a diff backup
options = self.command_options + [' -m diff']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def testIncrementalBackupConflict(self):
gen_extra = BlobGenerator('zoom', 'zoom-', self.value_size, end=self.num_items)
self.log.info("Starting Incremental backup")
extra_items_deleted_flag = 0
if(self.doc_ops is not None):
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("update" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "update", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
if("delete" in self.doc_ops):
self._load_all_buckets(self.master, gen_extra, "delete", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
extra_items_deleted_flag = 1
if("expire" in self.doc_ops):
if extra_items_deleted_flag == 1:
self._load_all_buckets(self.master, gen_extra, "create", 0, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
self._load_all_buckets(self.master, gen_extra, "update", self.expire_time, 1, self.item_flag, True, batch_size=20000, pause_secs=5, timeout_secs=180)
#Take a incremental backup
options = self.command_options + [' -m accu']
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self.lww = self.num_mutate_items = self.input.param("lww_new", False)
self._bucket_creation()
self.sleep(20)
expected_error = self.input.param("expected_error", None)
self.restoreAndVerify(bucket_names, kvs_before, expected_error)
class IBRJsonTests(BackupBaseTest):
def setUp(self):
super(IBRJsonTests, self).setUp()
self.num_mutate_items = self.input.param("mutate_items", 1000)
template = '{{ "mutated" : 0, "age": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_load, "create", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
if self.test_with_view:
view_list = []
bucket = "default"
if self.dev_view:
prefix_ddoc="dev_ddoc"
else:
prefix_ddoc="ddoc"
ddoc_view_map = self.bucket_ddoc_map.pop(bucket, {})
for ddoc_count in xrange(self.num_ddocs):
design_doc_name = prefix_ddoc + str(ddoc_count)
view_list = self.make_default_views("views", self.num_views_per_ddoc)
self.create_views(self.master, design_doc_name, view_list,\
bucket, self.wait_timeout * 2)
ddoc_view_map[design_doc_name] = view_list
self.bucket_ddoc_map[bucket] = ddoc_view_map
#Take a full backup
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
self.sleep(2)
def testFullBackup(self):
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
def restoreAndVerify(self,bucket_names,kvs_before):
for bucket in self.buckets:
bucket.kvs[1] = kvs_before[bucket.name]
del kvs_before
gc.collect()
self.shell.restore_backupFile(self.couchbase_login_info,\
self.backup_location, bucket_names)
self.sleep(10)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
self.verify_results(self.master)
self._verify_stats_all_buckets(self.servers[:self.num_servers])
""" add design doc and view """
if self.test_with_view:
result = False
query = {"stale" : "false", "full_set" : "true", \
"connection_timeout" : 60000}
for bucket, ddoc_view_map in self.bucket_ddoc_map.items():
for ddoc_name, view_list in ddoc_view_map.items():
for view in view_list:
try:
result = self.cluster.query_view(self.master,\
ddoc_name, view.name, query,\
self.num_items, timeout=10)
except Exception:
pass
if not result:
self.fail("There is no: View: {0} in Design Doc:"\
" {1} in bucket: {2}"\
.format(view.name, ddoc_name, bucket))
self.log.info("DDoc Data Validation Successful")
def tearDown(self):
super(IBRJsonTests, self).tearDown()
def testMultipleBackups(self):
if not self.command_options:
self.command_options = []
options = self.command_options
if self.backup_type is not None:
if "accu" in self.backup_type:
options = self.command_options + [' -m accu']
if "diff" in self.backup_type:
options = self.command_options + [' -m diff']
diff_backup = [" -m diff"]
accu_backup = [" -m accu"]
current_backup = [" -m diff"]
for count in range(self.number_of_backups):
if "mix" in self.backup_type:
if current_backup == diff_backup:
current_backup = accu_backup
options = self.command_options + accu_backup
elif current_backup == accu_backup:
current_backup = diff_backup
options = self.command_options + diff_backup
# Update data
template = '{{ "mutated" : {0}, "age": {0}, "first_name": "{1}" }}'
gen_update = DocumentGenerator('load_by_id_test', template, range(5),\
['james', 'john'], start=0, end=self.num_items)
self._load_all_buckets(self.master, gen_update, "update", 0, 1,\
self.item_flag, True, batch_size=20000,\
pause_secs=5, timeout_secs=180)
self._wait_for_stats_all_buckets(self.servers[:self.num_servers])
#Take a backup
self.shell.execute_cluster_backup(self.couchbase_login_info,\
self.backup_location, options)
# Save copy of data
kvs_before = {}
for bucket in self.buckets:
kvs_before[bucket.name] = bucket.kvs[1]
bucket_names = [bucket.name for bucket in self.buckets]
# Delete all buckets
self._all_buckets_delete(self.master)
gc.collect()
self._bucket_creation()
self.sleep(20)
self.restoreAndVerify(bucket_names, kvs_before)
class IBRSpatialTests(SpatialQueryTests):
def setUp(self):
self.input = TestInputSingleton.input
self.servers = self.input.servers
self.master = self.servers[0]
self.log = logger.Logger.get_logger()
self.helper = SpatialHelper(self, "default")
self.helper.setup_cluster()
self.cluster = Cluster()
self.default_bucket = self.input.param("default_bucket", True)
self.sasl_buckets = self.input.param("sasl_buckets", 0)
self.standard_buckets = self.input.param("standard_buckets", 0)
self.memcached_buckets = self.input.param("memcached_buckets", 0)
self.servers = self.helper.servers
self.shell = RemoteMachineShellConnection(self.master)
info = self.shell.extract_remote_info()
self.os = info.type.lower()
self.couchbase_login_info = "%s:%s" % (self.input.membase_settings.rest_username,
self.input.membase_settings.rest_password)
self.backup_location = self.input.param("backup_location", "/tmp/backup")
self.command_options = self.input.param("command_options", '')
def tearDown(self):
self.helper.cleanup_cluster()
def test_backup_with_spatial_data(self):
num_docs = self.helper.input.param("num-docs", 5000)
self.log.info("description : Make limit queries on a simple "
"dataset with {0} docs".format(num_docs))
data_set = SimpleDataSet(self.helper, num_docs)
data_set.add_limit_queries()
self._query_test_init(data_set)
if not self.command_options:
self.command_options = []
options = self.command_options + [' -m full']
self.total_backups = 1
self.shell.execute_cluster_backup(self.couchbase_login_info, self.backup_location, options)
time.sleep(2)
self.buckets = RestConnection(self.master).get_buckets()
bucket_names = [bucket.name for bucket in self.buckets]
BucketOperationHelper.delete_all_buckets_or_assert(self.servers, self)
gc.collect()
self.helper._create_default_bucket()
self.shell.restore_backupFile(self.couchbase_login_info, self.backup_location, bucket_names)
SimpleDataSet(self.helper, num_docs)._create_views()
self._query_test_init(data_set)
| [
"re.search",
"view.spatialquerytests.SimpleDataSet",
"couchbase_helper.cluster.Cluster",
"memcached.helper.kvstore.KVStore",
"membase.helper.spatial_helper.SpatialHelper",
"remote.remote_util.RemoteMachineShellConnection",
"time.sleep",
"logger.Logger.get_logger",
"couchbase_helper.documentgenerator... | [((1093, 1168), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (1106, 1168), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((1971, 1983), 'gc.collect', 'gc.collect', ([], {}), '()\n', (1981, 1983), False, 'import gc\n'), ((6874, 6949), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (6887, 6949), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((13471, 13533), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': '(5)'}), "('testdata', 'testdata-', self.value_size, end=5)\n", (13484, 13533), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((14249, 14316), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""zoom"""', '"""zoom-"""', 'self.value_size'], {'end': 'self.num_items'}), "('zoom', 'zoom-', self.value_size, end=self.num_items)\n", (14262, 14316), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((15918, 15930), 'gc.collect', 'gc.collect', ([], {}), '()\n', (15928, 15930), False, 'import gc\n'), ((16104, 16171), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""zoom"""', '"""zoom-"""', 'self.value_size'], {'end': 'self.num_items'}), "('zoom', 'zoom-', self.value_size, end=self.num_items)\n", (16117, 16171), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((17842, 17854), 'gc.collect', 'gc.collect', ([], {}), '()\n', (17852, 17854), False, 'import gc\n'), ((18287, 18299), 'gc.collect', 'gc.collect', ([], {}), '()\n', (18297, 18299), False, 'import gc\n'), ((18478, 18545), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""zoom"""', '"""zoom-"""', 'self.value_size'], {'end': 'self.num_items'}), "('zoom', 'zoom-', self.value_size, end=self.num_items)\n", (18491, 18545), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((20146, 20158), 'gc.collect', 'gc.collect', ([], {}), '()\n', (20156, 20158), False, 'import gc\n'), ((22543, 22555), 'gc.collect', 'gc.collect', ([], {}), '()\n', (22553, 22555), False, 'import gc\n'), ((22845, 22857), 'gc.collect', 'gc.collect', ([], {}), '()\n', (22855, 22857), False, 'import gc\n'), ((26348, 26360), 'gc.collect', 'gc.collect', ([], {}), '()\n', (26358, 26360), False, 'import gc\n'), ((26683, 26709), 'logger.Logger.get_logger', 'logger.Logger.get_logger', ([], {}), '()\n', (26707, 26709), False, 'import logger\n'), ((26732, 26762), 'membase.helper.spatial_helper.SpatialHelper', 'SpatialHelper', (['self', '"""default"""'], {}), "(self, 'default')\n", (26745, 26762), False, 'from membase.helper.spatial_helper import SpatialHelper\n'), ((26822, 26831), 'couchbase_helper.cluster.Cluster', 'Cluster', ([], {}), '()\n', (26829, 26831), False, 'from couchbase_helper.cluster import Cluster\n'), ((27177, 27218), 'remote.remote_util.RemoteMachineShellConnection', 'RemoteMachineShellConnection', (['self.master'], {}), '(self.master)\n', (27205, 27218), False, 'from remote.remote_util import RemoteMachineShellConnection\n'), ((27961, 27997), 'view.spatialquerytests.SimpleDataSet', 'SimpleDataSet', (['self.helper', 'num_docs'], {}), '(self.helper, num_docs)\n', (27974, 27997), False, 'from view.spatialquerytests import SimpleDataSet\n'), ((28345, 28358), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (28355, 28358), False, 'import time\n'), ((28497, 28567), 'membase.helper.bucket_helper.BucketOperationHelper.delete_all_buckets_or_assert', 'BucketOperationHelper.delete_all_buckets_or_assert', (['self.servers', 'self'], {}), '(self.servers, self)\n', (28547, 28567), False, 'from membase.helper.bucket_helper import BucketOperationHelper\n'), ((28576, 28588), 'gc.collect', 'gc.collect', ([], {}), '()\n', (28586, 28588), False, 'import gc\n'), ((5998, 6073), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (6011, 6073), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((7749, 7824), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (7762, 7824), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((8729, 8804), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (8742, 8804), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((9665, 9740), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (9678, 9740), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((10320, 10395), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (10333, 10395), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((11259, 11334), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (11272, 11334), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((11914, 11989), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (11927, 11989), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((12562, 12637), 'couchbase_helper.documentgenerator.BlobGenerator', 'BlobGenerator', (['"""testdata"""', '"""testdata-"""', 'self.value_size'], {'end': 'self.num_items'}), "('testdata', 'testdata-', self.value_size, end=self.num_items)\n", (12575, 12637), False, 'from couchbase_helper.documentgenerator import BlobGenerator\n'), ((2604, 2613), 'memcached.helper.kvstore.KVStore', 'KVStore', ([], {}), '()\n', (2611, 2613), False, 'from memcached.helper.kvstore import KVStore\n'), ((4077, 4114), 're.search', 're.search', (['pattern_backup_files', 'line'], {}), '(pattern_backup_files, line)\n', (4086, 4114), False, 'import re\n'), ((4209, 4246), 're.search', 're.search', (['pattern_backup_files', 'line'], {}), '(pattern_backup_files, line)\n', (4218, 4246), False, 'import re\n'), ((4372, 4409), 're.search', 're.search', (['pattern_backup_files', 'line'], {}), '(pattern_backup_files, line)\n', (4381, 4409), False, 'import re\n'), ((4503, 4539), 're.search', 're.search', (['pattern_design_json', 'line'], {}), '(pattern_design_json, line)\n', (4512, 4539), False, 'import re\n'), ((28383, 28410), 'membase.api.rest_client.RestConnection', 'RestConnection', (['self.master'], {}), '(self.master)\n', (28397, 28410), False, 'from membase.api.rest_client import RestConnection, Bucket\n'), ((28745, 28781), 'view.spatialquerytests.SimpleDataSet', 'SimpleDataSet', (['self.helper', 'num_docs'], {}), '(self.helper, num_docs)\n', (28758, 28781), False, 'from view.spatialquerytests import SimpleDataSet\n')] |
"""Check the feasibility of a bipartite graph by using SSLAP's feasibility module"""
import numpy as np
from sslap import hopcroft_solve
# All 3 methods will use the same input bipartite graph:
# i = 0 connects to j = 0, 1
# i = 1 connects to j = 1, 2
# i = 2 connects to j = 1, 4
# i = 3 connects to j = 2
# i = 4 connects to j = 3
# which has a maximum matching of 5
# eg i:j of 0:0, 1:1, 2:4, 3:2, 4:3
def dict_usage():
lookup = {0: [0, 1], 1: [1, 2], 2: [1, 4], 3: [2], 4: [3]}
res = hopcroft_solve(lookup=lookup)
print(res)
def mat_usage():
mat = - np.ones((5, 5)) # all invalid, except
mat[[0, 0, 1, 1, 2, 2, 3, 4], [0, 1, 1, 2, 1, 4, 2, 3]] = 1 # for valid edges
res = hopcroft_solve(mat=mat)
print(res)
def loc_usage():
loc = np.array([[0, 0], [0, 1], [1, 1], [1, 2], [2, 1], [2, 4], [3, 2], [4, 3]]) # (i, j) for each edge
res = hopcroft_solve(loc=loc)
print(res)
if __name__ == "__main__":
dict_usage()
mat_usage()
loc_usage()
| [
"numpy.array",
"numpy.ones",
"sslap.hopcroft_solve"
] | [((494, 523), 'sslap.hopcroft_solve', 'hopcroft_solve', ([], {'lookup': 'lookup'}), '(lookup=lookup)\n', (508, 523), False, 'from sslap import hopcroft_solve\n'), ((690, 713), 'sslap.hopcroft_solve', 'hopcroft_solve', ([], {'mat': 'mat'}), '(mat=mat)\n', (704, 713), False, 'from sslap import hopcroft_solve\n'), ((752, 826), 'numpy.array', 'np.array', (['[[0, 0], [0, 1], [1, 1], [1, 2], [2, 1], [2, 4], [3, 2], [4, 3]]'], {}), '([[0, 0], [0, 1], [1, 1], [1, 2], [2, 1], [2, 4], [3, 2], [4, 3]])\n', (760, 826), True, 'import numpy as np\n'), ((858, 881), 'sslap.hopcroft_solve', 'hopcroft_solve', ([], {'loc': 'loc'}), '(loc=loc)\n', (872, 881), False, 'from sslap import hopcroft_solve\n'), ((564, 579), 'numpy.ones', 'np.ones', (['(5, 5)'], {}), '((5, 5))\n', (571, 579), True, 'import numpy as np\n')] |
from django.db import models
# Create your models here.
class HeadlineListing(models.Model):
headline_text = models.CharField(max_length=500)
accessed = models.DateTimeField()
source_url = models.CharField(max_length=200)
author = models.CharField(default="", max_length=200)
source = models.CharField(max_length=200)
| [
"django.db.models.DateTimeField",
"django.db.models.CharField"
] | [((114, 146), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)'}), '(max_length=500)\n', (130, 146), False, 'from django.db import models\n'), ((162, 184), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (182, 184), False, 'from django.db import models\n'), ((202, 234), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (218, 234), False, 'from django.db import models\n'), ((248, 292), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(200)'}), "(default='', max_length=200)\n", (264, 292), False, 'from django.db import models\n'), ((306, 338), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(200)'}), '(max_length=200)\n', (322, 338), False, 'from django.db import models\n')] |
import matplotlib.pyplot as plt
import pandas as pd
def main():
times = [ 1, 100, 365, 365*20, 365*100, 100000]
df = pd.read_csv('tmp.csv')
df_ini = pd.read_csv("initial_value.csv")
xan_measured = (df_ini["distance"], df_ini["XAn"])
df_m = pd.read_csv('measured_value.csv')
measured_data = (df_m["Distance(um)"],df_m["MgO"])
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
fig = plt.figure()
plt.rcParams["font.size"] = 14
ax = fig.add_subplot(111)
ax2 = ax.twinx()
ax.set_title("$T=1000 ^\circ$C (<NAME>, 2014)")
#ax.set_title(str(time_days)+" days in " + str(tempc) + "$^\circ$C")
best_fit = 100000
#ax.plot(*measured_data, "o", color="w", mec="k", label="Measured")
for t in times:
x = df.iloc[:,0]
y = df.iloc[:,t]
plotdata = (x, y)
if t == 1:
ax.plot(*plotdata, "--", color="k", label="Initial")
elif t == best_fit:
ax.plot(*plotdata, "-", color="r", label=str(int(t/365))+"yrs")
else:
if t < 365:
ax.plot(x, y, "-", color="grey", label=str(int(t))+"days")
else:
ax.plot(x, y, "-", color="grey", label=str(int(t/365))+"yrs")
ax2.plot(*xan_measured, "o", color="b", label="XAn")
ax2.set_ylabel("XAn", fontsize=16)
ax2.set_ylim(0.55, 2)
ax.set_ylim(0, 0.7)
ax.set_xlabel("Distance from rim (\u03bcm)", fontsize=16)
ax.set_ylabel("MgO (wt.%)", fontsize=16)
fig.legend(loc=1, fancybox=False, framealpha=1, edgecolor="k", fontsize=10)
fig.savefig('img.jpg', dpi=300, bbox_inches='tight')
if __name__ == "__main__":
main() | [
"matplotlib.pyplot.figure",
"pandas.read_csv"
] | [((126, 148), 'pandas.read_csv', 'pd.read_csv', (['"""tmp.csv"""'], {}), "('tmp.csv')\n", (137, 148), True, 'import pandas as pd\n'), ((167, 199), 'pandas.read_csv', 'pd.read_csv', (['"""initial_value.csv"""'], {}), "('initial_value.csv')\n", (178, 199), True, 'import pandas as pd\n'), ((271, 304), 'pandas.read_csv', 'pd.read_csv', (['"""measured_value.csv"""'], {}), "('measured_value.csv')\n", (282, 304), True, 'import pandas as pd\n'), ((375, 387), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (385, 387), True, 'import matplotlib.pyplot as plt\n'), ((435, 447), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (445, 447), True, 'import matplotlib.pyplot as plt\n')] |
import os
import re
import csv
import sys
import json
import yaml
import time
import socket
import connexion
import postgresql as psql
from flask import current_app
from urllib.parse import urlencode
from hashlib import md5
from bokeh.embed import server_document
from .processes import fetch_process, is_running, process_info
from .utils import column_filter
float_pattern = re.compile(r'^\d*\.\d+$')
int_pattern = re.compile(r'^-?\d+$')
NA_pattern = re.compile(r'^NA$')
queryfilters = re.compile(r'(.+)(<=?|>=?|!=|==)(.+)')
def init_column_mapping(row, schema):
"""Generate initial estimates of column data types"""
defs = {column_filter(col): 'text' for col in row}
# Apply predefined table schema
defs.update({k: v for (k, v) in schema.items() if k in defs})
for (col, val) in row.items():
col = column_filter(col)
if col not in schema:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
defs[col] = 'integer'
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
defs[col] = 'decimal'
except ValueError:
print("ERROR: Float mismatch:", val)
mapping = {}
for (col, val) in defs.items():
if 'int' in val:
mapping[col] = int
elif val == 'decimal':
mapping[col] = float
else:
mapping[col] = str
return (mapping, defs)
def column_mapping(row, mapping, schema):
"""Apply filtering to the current row.
Detect if column data types need to be changed"""
output = {}
changes = {}
for (col, val) in row.items():
col = column_filter(col)
if val == None or NA_pattern.match(str(val)):
output[col] = None
continue
if col not in schema and mapping[col] == str:
if int_pattern.match(val):
try:
int(val)
print("Assigning int to", col, "based on", val)
mapping[col] = int
changes[col] = int
except ValueError:
print("ERROR: Int mismatch:", val)
elif float_pattern.match(val):
try:
float(val)
print("Assigning float to", col, "based on", val)
mapping[col] = float
changes[col] = float
except ValueError:
print("ERROR: Float mismatch:", val)
try:
output[col] = mapping[col](val)
except ValueError:
output[col] = None
return (mapping, output, changes)
def old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping):
with db.xact():
db.execute(CREATE_TABLE)
# table marked for insertion during original attempt, so don't need to here
# prepare the insertion query
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
update = "ALTER TABLE %s " % tablekey
for row in reader:
# process each row
# We format the data in the row and update column data types, if
# necessary
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
# Re-generate the insert statement since the data types changed
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
insert = db.prepare("INSERT INTO %s (%s) VALUES (%s)" % (
tablekey,
','.join(column_names),
','.join('$%d' % i for (_, i) in zip(
column_names, range(1, sys.maxsize)
))
))
# insert the row
insert(*[formatted[column] for column in column_names])
def table_transaction(file_permissions, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping):
with db.xact():
db.execute(CREATE_TABLE)
db.prepare("LOCK TABLE %s IN ACCESS EXCLUSIVE MODE" % (tablekey))
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, raw_reader.name)
#copy_query may result in psql.exceptions.InsufficientPrivilegeError when run; workaround attempted below
if file_permissions:
#mark the table for deletion when the server shuts down
#don't need to mark table for deletion during second attempt
if 'db-clean' not in current_app.config:
current_app.config['db-clean'] = [tablekey]
else:
current_app.config['db-clean'].append(tablekey)
#attempt file copy
db.execute(copy_query)
else:
import subprocess
filedest = "/tmp/"+os.path.basename(raw_reader.name)
subprocess.run(["mktemp", filedest], stdout=subprocess.DEVNULL)
subprocess.run(["cp", raw_reader.name, filedest])
subprocess.run(["chmod", "666", filedest])
copy_query = "COPY %s (%s) FROM '%s' WITH FREEZE NULL 'NA' DELIMITER E'\t' CSV HEADER" % (tablekey, all_tablecolumns, filedest)
try:
db.execute(copy_query)
print("...Success")
finally:
subprocess.run(["rm", filedest])
col_val_query = "SELECT "
for col_name in column_names:
col_val_query += "(select %s from %s where %s is not null limit 1), "%(col_name, tablekey, col_name)
col_val_query = col_val_query[:-2]
col_values = db.prepare(col_val_query)
values = col_values()[0]
update = "ALTER TABLE %s " % tablekey
row = dict(zip(col_values.column_names, values))
(mapping, formatted, changes) = column_mapping(row, mapping, current_app.config['schema'])
if len(changes):
#Generate a query to alter the table schema, if any changes are required
alter_cols = []
for (k, v) in changes.items():
# if there were any changes to the data type, update the table
# since we only ever update a text column to int/decimal, then
# it's okay to nullify the data
typ = ''
if v == int:
typ = 'bigint' if k in {'start', 'stop'} else 'integer'
elif v == float:
typ = 'decimal'
alter_cols.append(
"ALTER COLUMN %s SET DATA TYPE %s USING %s::%s" % (
k, typ, k, typ
)
)
print("Alter:", update + ','.join(alter_cols))
db.execute(update + ','.join(alter_cols))
def create_table(parentID, fileID, data, tablekey, db):
# Open a reader to cache the file in the database
if parentID != -1:
process = fetch_process(parentID, data, current_app.config['storage']['children'])
if not process[0]:
return (
{
"code": 400,
"message": "The requested process (%d) does not exist" % parentID,
"fields": "parentID"
}, 400
)
if is_running(process):
return (
{
"code": 400,
"message": "The requested process (%d) is still running" % parentID,
"fields": "parentID"
}, 400
)
if str(fileID) not in process[0]['files']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist for this process (%d)" % (fileID, parentID),
"fields": "fileID"
}, 400
)
raw_reader = open(process[0]['files'][fileID]['fullname'])
else:
if str(fileID) not in data['visualize']:
return (
{
"code": 400,
"message": "The requested fileID (%s) does not exist in the visualize" % fileID,
"fields": "fileID"
}, 400
)
raw_reader = open(data['visualize'][str(fileID)]['fullname'])
if not raw_reader.name.endswith('.tsv'):
ext = os.path.splitext(raw_reader.name)[1].lower()
if len(ext) and ext[0] == '.':
ext = ext[1:]
return serve_as(raw_reader, ext)
reader = csv.DictReader(raw_reader, delimiter='\t')
tmp_reader = open(raw_reader.name)
tmp = csv.DictReader(tmp_reader, delimiter='\t')
try:
init = next(tmp)
except StopIteration:
return []
tmp_reader.close()
# Get an initial estimate of column datatypes from the first row
(mapping, column_names) = init_column_mapping(init, current_app.config['schema'])
tablecolumns = "\n".join( # use the estimated types to create the table
"%s %s," % (colname, column_names[colname])
for colname in column_names
)[:-1]
CREATE_TABLE = "CREATE TABLE %s (\
rowid SERIAL PRIMARY KEY NOT NULL,\
%s\
)" % (tablekey, tablecolumns)
all_tablecolumns = ', '.join(column_filter(col) for col in reader.fieldnames)
try:
table_transaction(True, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.UniqueError: #If another transaction already created specified table, pass
pass
except psql.exceptions.InsufficientPrivilegeError as e:
#can occur when postgres user unable to open file due to permissions; specifically for travis-ci tests
#check if resulting from postgres user permissions
if e.args[0].startswith("must be superuser"):
print("WARNING: Postgres user is not a super user; visualization time may be slow")
old_file_read(db, CREATE_TABLE, tablekey, column_names, reader, mapping) #use inefficient file-read-to-db method
else:
#attempt to resolve by copying file to /tmp/, changing its permissions, and accessing it there
try:
print("InsufficientPrivilegeError raised in accessing file.\nAttempting workaround...")
table_transaction(False, db, CREATE_TABLE, tablekey, all_tablecolumns, raw_reader, column_names, mapping)
except psql.exceptions.InsufficientPrivilegeError:
print("Postgres could not access file. Check to make sure that both the "
"file and your current postgres user has the appropriate permissions.")
raise
raw_reader.close()
def filterfile(parentID, fileID, count, page, filters, sort, direction):
"""Gets the file ID belonging to the parent.\
For result files, the parentID is the process ID that spawned them.\
For visualize files, the parentID is -1"""
data = current_app.config['storage']['loader']()
# first, generate the key
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
fileID = str(fileID)
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
response = query(tablekey)
if not len(response): # table does not exist
table_errors = create_table(parentID, fileID, data, tablekey, db)
if table_errors != None:
return table_errors
#with db.synchronizer:
# test_query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
# test_response = query(tablekey)
with db.xact():
typequery = db.prepare(
"SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1"
)
column_defs = typequery(tablekey)
column_maps = {}
for (col, typ) in column_defs:
if 'int' in typ:
column_maps[col] = int
elif typ == 'numeric'or typ == 'decimal':
column_maps[col] = float
else:
column_maps[col] = str
formatted_filters = []
for i in range(len(filters)):
f = filters[i].strip()
if not len(f):
continue
result = queryfilters.match(f)
if not result:
return ({
"code": 400,
"message": "Encountered an invalid filter (%s)" % f,
"fields": "filters"
}, 400)
colname = column_filter(result.group(1))
if colname not in column_maps:
return ({
"code": 400,
"message": "Unknown column name %s" % result.group(1),
"fields": "filters"
}, 400)
op = result.group(2)
typ = column_maps[colname]
val = None
try:
val = column_maps[colname](
result.group(3)
)
except ValueError:
return ({
"code": 400,
"message": "Value %s cannot be formatted to match the type of column %s (%s)" % (
result.group(3),
result.group(1),
typ
)
}, 400)
if typ == str and (op in {'==', '!='}):
formatted_filters.append(
json.dumps(colname) + (' not ' if '!' in op else ' ') + "LIKE '%s'" % (
json.dumps(val)[1:-1]
)
)
else: # type is numerical
op = op.replace('==', '=')
formatted_filters.append(
'%s %s %s' % (
json.dumps(colname),
op,
json.dumps(val)
)
)
raw_query = "SELECT %s FROM %s" % (
','.join([k[0] for k in column_defs]),
tablekey
)
if len(formatted_filters):
raw_query += " WHERE " + " AND ".join(formatted_filters)
if sort:
if column_filter(sort) not in column_maps:
return ({
'code': 400,
'message': 'Invalid column name %s' % sort,
'fields': 'sort'
}, 400)
raw_query += " ORDER BY %s" % (column_filter(sort))
if direction:
raw_query += " " + direction
if count:
raw_query += " LIMIT %d" % count
if page:
raw_query += " OFFSET %d" % (page * count)
print("Query:", raw_query)
import decimal
with db.xact('SERIALIZABLE', 'READ ONLY DEFERRABLE'):
query = db.prepare(raw_query)
decimalizer = lambda x: (float(x) if type(x) == decimal.Decimal else x)
result = [
{
colname: decimalizer(value) for (colname, value) in zip(
[k[0] for k in column_defs],
[val for val in row]
)
} for row in query.rows()
]
db.close()
return result
def fileschema(parentID, fileID):
data = current_app.config['storage']['loader']()
tablekey = "data_%s_%s" % (
(parentID if parentID >= 0 else 'visualize'),
fileID
)
# check if the table exists:
db = psql.open("localhost/pvacseq")
with db.xact():
query = db.prepare("SELECT 1 FROM information_schema.tables WHERE table_name = $1")
if not len(query(tablekey)): # table does not exist
return ({
'code': 400,
'message': "The requested file has not been loaded into the Postgres database",
'fields': "fileID"
}, 400)
typequery = db.prepare("SELECT column_name, data_type FROM information_schema.columns WHERE table_name = $1")
result = {
key: val for (key, val) in typequery(tablekey)
}
db.close()
return result
def serve_as(reader, filetype):
if filetype == 'json':
return {
'filetype':'json',
'content':json.load(reader)
}
elif filetype == 'yaml' or filetype == 'yml':
return {
'filetype':'yaml',
'content':yaml.load(reader.read())
}
elif filetype == 'log':
return {
'filetype':'log',
'content':[line.rstrip() for line in reader.readlines()]
}
else:
return {
'filetype':'raw',
'content':reader.read()
}
def visualize(parentID, fileID):
vis = visualize_script(parentID, fileID)
return '<html><head></head><body>%s</body></html'%(vis if type(vis)!=tuple else vis[0])
def visualize_script(parentID, fileID):
"""Return an HTML document containing the requested table visualization"""
from .files import results_getcols
data = current_app.config['storage']['loader']()
#first call filterfile to load the table if it's not loaded already
result = filterfile(parentID, fileID, 1, 0, '', 'rowid', 'ASC')
if type(result) != list:
return (
{
'code':400,
'message':json.dumps(result),
'fields':'unknown',
},
400
)
if len(result) == 0 or type(result) == dict:
return (
'Results file contains no data - cannot visualize'
)
cols = results_getcols(parentID, fileID)
if type(cols) != dict:
return (
{
'code':400,
'message':json.dumps(cols),
'fields':'unknown'
},
400
)
proc_data = process_info(parentID)
if type(proc_data)==dict and 'parameters' in proc_data and 'sample_name' in proc_data['parameters']:
sample = proc_data['parameters']['sample_name']
elif parentID == -1:
sample = data['visualize'][str(fileID)]['display_name'].rsplit(".", 1)[0]
else:
sample = 'Unknown Sample'
if current_app.PROXY_IP_ADDRESS is not None:
IP = current_app.PROXY_IP_ADDRESS
else:
IP = current_app.IP_ADDRESS
return (
server_document(
url="http://" + IP + ":5006/visualizations",
arguments={
'target-process': parentID,
'target-file': fileID,
'cols': json.dumps(cols),
'samplename': sample
}
)
)
| [
"csv.DictReader",
"re.compile",
"subprocess.run",
"json.dumps",
"os.path.splitext",
"postgresql.open",
"os.path.basename",
"json.load"
] | [((378, 405), 're.compile', 're.compile', (['"""^\\\\d*\\\\.\\\\d+$"""'], {}), "('^\\\\d*\\\\.\\\\d+$')\n", (388, 405), False, 'import re\n'), ((418, 440), 're.compile', 're.compile', (['"""^-?\\\\d+$"""'], {}), "('^-?\\\\d+$')\n", (428, 440), False, 'import re\n'), ((454, 472), 're.compile', 're.compile', (['"""^NA$"""'], {}), "('^NA$')\n", (464, 472), False, 'import re\n'), ((489, 526), 're.compile', 're.compile', (['"""(.+)(<=?|>=?|!=|==)(.+)"""'], {}), "('(.+)(<=?|>=?|!=|==)(.+)')\n", (499, 526), False, 'import re\n'), ((9870, 9912), 'csv.DictReader', 'csv.DictReader', (['raw_reader'], {'delimiter': '"""\t"""'}), "(raw_reader, delimiter='\\t')\n", (9884, 9912), False, 'import csv\n'), ((9963, 10005), 'csv.DictReader', 'csv.DictReader', (['tmp_reader'], {'delimiter': '"""\t"""'}), "(tmp_reader, delimiter='\\t')\n", (9977, 10005), False, 'import csv\n'), ((12525, 12555), 'postgresql.open', 'psql.open', (['"""localhost/pvacseq"""'], {}), "('localhost/pvacseq')\n", (12534, 12555), True, 'import postgresql as psql\n'), ((16663, 16693), 'postgresql.open', 'psql.open', (['"""localhost/pvacseq"""'], {}), "('localhost/pvacseq')\n", (16672, 16693), True, 'import postgresql as psql\n'), ((6251, 6314), 'subprocess.run', 'subprocess.run', (["['mktemp', filedest]"], {'stdout': 'subprocess.DEVNULL'}), "(['mktemp', filedest], stdout=subprocess.DEVNULL)\n", (6265, 6314), False, 'import subprocess\n'), ((6327, 6376), 'subprocess.run', 'subprocess.run', (["['cp', raw_reader.name, filedest]"], {}), "(['cp', raw_reader.name, filedest])\n", (6341, 6376), False, 'import subprocess\n'), ((6389, 6431), 'subprocess.run', 'subprocess.run', (["['chmod', '666', filedest]"], {}), "(['chmod', '666', filedest])\n", (6403, 6431), False, 'import subprocess\n'), ((17438, 17455), 'json.load', 'json.load', (['reader'], {}), '(reader)\n', (17447, 17455), False, 'import json\n'), ((6205, 6238), 'os.path.basename', 'os.path.basename', (['raw_reader.name'], {}), '(raw_reader.name)\n', (6221, 6238), False, 'import os\n'), ((6701, 6733), 'subprocess.run', 'subprocess.run', (["['rm', filedest]"], {}), "(['rm', filedest])\n", (6715, 6733), False, 'import subprocess\n'), ((18515, 18533), 'json.dumps', 'json.dumps', (['result'], {}), '(result)\n', (18525, 18533), False, 'import json\n'), ((18908, 18924), 'json.dumps', 'json.dumps', (['cols'], {}), '(cols)\n', (18918, 18924), False, 'import json\n'), ((19720, 19736), 'json.dumps', 'json.dumps', (['cols'], {}), '(cols)\n', (19730, 19736), False, 'import json\n'), ((9706, 9739), 'os.path.splitext', 'os.path.splitext', (['raw_reader.name'], {}), '(raw_reader.name)\n', (9722, 9739), False, 'import os\n'), ((14805, 14824), 'json.dumps', 'json.dumps', (['colname'], {}), '(colname)\n', (14815, 14824), False, 'import json\n'), ((15114, 15133), 'json.dumps', 'json.dumps', (['colname'], {}), '(colname)\n', (15124, 15133), False, 'import json\n'), ((15179, 15194), 'json.dumps', 'json.dumps', (['val'], {}), '(val)\n', (15189, 15194), False, 'import json\n'), ((14897, 14912), 'json.dumps', 'json.dumps', (['val'], {}), '(val)\n', (14907, 14912), False, 'import json\n')] |
import numpy as np
import chainer
from chainer import cuda, Function, gradient_check, Variable
from chainer import optimizers, serializers, utils
from chainer import Link, Chain, ChainList
import chainer.functions as F
import chainer.links as L
class normalNN(Chain):
def __init__(self, dim):
super().__init__(
l1=L.Linear(dim, 100),
l2=L.Linear(100, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.af(h)
h = self.l2(h)
return h
class MultiLayerPerceptron(Chain):
def __init__(self, dim):
super(MultiLayerPerceptron, self).__init__(
l1=L.Linear(dim, 300, nobias=True),
b1=L.BatchNormalization(300),
l2=L.Linear(300, 300, nobias=True),
b2=L.BatchNormalization(300),
l3=L.Linear(300, 300, nobias=True),
b3=L.BatchNormalization(300),
l4=L.Linear(300, 300, nobias=True),
b4=L.BatchNormalization(300),
l5=L.Linear(300, 1))
self.af = F.relu
def __call__(self, x):
h = self.l1(x)
h = self.b1(h)
h = self.af(h)
h = self.l2(h)
h = self.b2(h)
h = self.af(h)
h = self.l3(h)
h = self.b3(h)
h = self.af(h)
h = self.l4(h)
h = self.b4(h)
h = self.af(h)
h = self.l5(h)
return h
class CNN(Chain):
def __init__(self, dim):
super(CNN, self).__init__(
conv1=L.Convolution2D(3, 96, 3, pad=1),
conv2=L.Convolution2D(96, 96, 3, pad=1),
conv3=L.Convolution2D(96, 96, 3, pad=1, stride=2),
conv4=L.Convolution2D(96, 192, 3, pad=1),
conv5=L.Convolution2D(192, 192, 3, pad=1),
conv6=L.Convolution2D(192, 192, 3, pad=1, stride=2),
conv7=L.Convolution2D(192, 192, 3, pad=1),
conv8=L.Convolution2D(192, 192, 1),
conv9=L.Convolution2D(192, 10, 1),
b1=L.BatchNormalization(96),
b2=L.BatchNormalization(96),
b3=L.BatchNormalization(96),
b4=L.BatchNormalization(192),
b5=L.BatchNormalization(192),
b6=L.BatchNormalization(192),
b7=L.BatchNormalization(192),
b8=L.BatchNormalization(192),
b9=L.BatchNormalization(10),
fc1=L.Linear(None, 1000),
fc2=L.Linear(1000, 1000),
fc3=L.Linear(1000, 1),
)
self.af = F.relu
def __call__(self, x):
h = self.conv1(x)
h = self.b1(h)
h = self.af(h)
h = self.conv2(h)
h = self.b2(h)
h = self.af(h)
h = self.conv3(h)
h = self.b3(h)
h = self.af(h)
h = self.conv4(h)
h = self.b4(h)
h = self.af(h)
h = self.conv5(h)
h = self.b5(h)
h = self.af(h)
h = self.conv6(h)
h = self.b6(h)
h = self.af(h)
h = self.conv7(h)
h = self.b7(h)
h = self.af(h)
h = self.conv8(h)
h = self.b8(h)
h = self.af(h)
h = self.conv9(h)
h = self.b9(h)
h = self.af(h)
h = self.fc1(h)
h = self.af(h)
h = self.fc2(h)
h = self.af(h)
h = self.fc3(h)
return h
| [
"chainer.links.BatchNormalization",
"chainer.links.Linear",
"chainer.links.Convolution2D"
] | [((340, 358), 'chainer.links.Linear', 'L.Linear', (['dim', '(100)'], {}), '(dim, 100)\n', (348, 358), True, 'import chainer.links as L\n'), ((375, 391), 'chainer.links.Linear', 'L.Linear', (['(100)', '(1)'], {}), '(100, 1)\n', (383, 391), True, 'import chainer.links as L\n'), ((674, 705), 'chainer.links.Linear', 'L.Linear', (['dim', '(300)'], {'nobias': '(True)'}), '(dim, 300, nobias=True)\n', (682, 705), True, 'import chainer.links as L\n'), ((722, 747), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(300)'], {}), '(300)\n', (742, 747), True, 'import chainer.links as L\n'), ((764, 795), 'chainer.links.Linear', 'L.Linear', (['(300)', '(300)'], {'nobias': '(True)'}), '(300, 300, nobias=True)\n', (772, 795), True, 'import chainer.links as L\n'), ((812, 837), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(300)'], {}), '(300)\n', (832, 837), True, 'import chainer.links as L\n'), ((854, 885), 'chainer.links.Linear', 'L.Linear', (['(300)', '(300)'], {'nobias': '(True)'}), '(300, 300, nobias=True)\n', (862, 885), True, 'import chainer.links as L\n'), ((902, 927), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(300)'], {}), '(300)\n', (922, 927), True, 'import chainer.links as L\n'), ((944, 975), 'chainer.links.Linear', 'L.Linear', (['(300)', '(300)'], {'nobias': '(True)'}), '(300, 300, nobias=True)\n', (952, 975), True, 'import chainer.links as L\n'), ((992, 1017), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(300)'], {}), '(300)\n', (1012, 1017), True, 'import chainer.links as L\n'), ((1034, 1050), 'chainer.links.Linear', 'L.Linear', (['(300)', '(1)'], {}), '(300, 1)\n', (1042, 1050), True, 'import chainer.links as L\n'), ((1522, 1554), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(3)', '(96)', '(3)'], {'pad': '(1)'}), '(3, 96, 3, pad=1)\n', (1537, 1554), True, 'import chainer.links as L\n'), ((1574, 1607), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(96)', '(96)', '(3)'], {'pad': '(1)'}), '(96, 96, 3, pad=1)\n', (1589, 1607), True, 'import chainer.links as L\n'), ((1627, 1670), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(96)', '(96)', '(3)'], {'pad': '(1)', 'stride': '(2)'}), '(96, 96, 3, pad=1, stride=2)\n', (1642, 1670), True, 'import chainer.links as L\n'), ((1690, 1724), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(96)', '(192)', '(3)'], {'pad': '(1)'}), '(96, 192, 3, pad=1)\n', (1705, 1724), True, 'import chainer.links as L\n'), ((1744, 1779), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(192)', '(192)', '(3)'], {'pad': '(1)'}), '(192, 192, 3, pad=1)\n', (1759, 1779), True, 'import chainer.links as L\n'), ((1799, 1844), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(192)', '(192)', '(3)'], {'pad': '(1)', 'stride': '(2)'}), '(192, 192, 3, pad=1, stride=2)\n', (1814, 1844), True, 'import chainer.links as L\n'), ((1864, 1899), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(192)', '(192)', '(3)'], {'pad': '(1)'}), '(192, 192, 3, pad=1)\n', (1879, 1899), True, 'import chainer.links as L\n'), ((1919, 1947), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(192)', '(192)', '(1)'], {}), '(192, 192, 1)\n', (1934, 1947), True, 'import chainer.links as L\n'), ((1967, 1994), 'chainer.links.Convolution2D', 'L.Convolution2D', (['(192)', '(10)', '(1)'], {}), '(192, 10, 1)\n', (1982, 1994), True, 'import chainer.links as L\n'), ((2011, 2035), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(96)'], {}), '(96)\n', (2031, 2035), True, 'import chainer.links as L\n'), ((2052, 2076), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(96)'], {}), '(96)\n', (2072, 2076), True, 'import chainer.links as L\n'), ((2093, 2117), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(96)'], {}), '(96)\n', (2113, 2117), True, 'import chainer.links as L\n'), ((2134, 2159), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(192)'], {}), '(192)\n', (2154, 2159), True, 'import chainer.links as L\n'), ((2176, 2201), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(192)'], {}), '(192)\n', (2196, 2201), True, 'import chainer.links as L\n'), ((2218, 2243), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(192)'], {}), '(192)\n', (2238, 2243), True, 'import chainer.links as L\n'), ((2260, 2285), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(192)'], {}), '(192)\n', (2280, 2285), True, 'import chainer.links as L\n'), ((2302, 2327), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(192)'], {}), '(192)\n', (2322, 2327), True, 'import chainer.links as L\n'), ((2344, 2368), 'chainer.links.BatchNormalization', 'L.BatchNormalization', (['(10)'], {}), '(10)\n', (2364, 2368), True, 'import chainer.links as L\n'), ((2386, 2406), 'chainer.links.Linear', 'L.Linear', (['None', '(1000)'], {}), '(None, 1000)\n', (2394, 2406), True, 'import chainer.links as L\n'), ((2424, 2444), 'chainer.links.Linear', 'L.Linear', (['(1000)', '(1000)'], {}), '(1000, 1000)\n', (2432, 2444), True, 'import chainer.links as L\n'), ((2462, 2479), 'chainer.links.Linear', 'L.Linear', (['(1000)', '(1)'], {}), '(1000, 1)\n', (2470, 2479), True, 'import chainer.links as L\n')] |
""" This script is needed to convert gdb scripts from commands to documentation
"""
import os
def convert_commands_to_docs():
commands_dir = os.getcwd() + "/numba_dppy/examples/debug/commands"
examples = os.listdir(commands_dir)
os.chdir(commands_dir + "/docs")
for file in examples:
if file != "docs":
open_file = open(commands_dir + "/" + file, "r")
read_lines = open_file.readlines()
if os.path.exists(file):
os.remove(file)
write_file = open(file, "a")
for line in read_lines:
if (
line.startswith("# Expected")
or line.startswith("echo Done")
or line.startswith("quit")
or line.startswith("set trace-commands")
or line.startswith("set pagination")
):
continue
if line.startswith("# Run: "):
line = line.replace("# Run:", "$")
words = line.split()
for i in range(len(words)):
if words[i] == "-command" or words[i].startswith("commands"):
words[i] = ""
line = " ".join(words)
line = " ".join(line.split()) + "\n"
elif line.startswith("# "):
line = line.replace("# ", "")
else:
line = "(gdb) " + line
write_file.write(line)
if __name__ == "__main__":
convert_commands_to_docs()
| [
"os.path.exists",
"os.listdir",
"os.getcwd",
"os.chdir",
"os.remove"
] | [((214, 238), 'os.listdir', 'os.listdir', (['commands_dir'], {}), '(commands_dir)\n', (224, 238), False, 'import os\n'), ((243, 275), 'os.chdir', 'os.chdir', (["(commands_dir + '/docs')"], {}), "(commands_dir + '/docs')\n", (251, 275), False, 'import os\n'), ((147, 158), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (156, 158), False, 'import os\n'), ((452, 472), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (466, 472), False, 'import os\n'), ((490, 505), 'os.remove', 'os.remove', (['file'], {}), '(file)\n', (499, 505), False, 'import os\n')] |
import json
import requests
from typing import List
from konlpy.tag import Okt
from requests.models import Response
class OktTokenizer:
"""
A POS-tagger based tokenizer functor. Note that these are just examples. The `phrases` function usually gives a better result than an ordinary POS tokenizer.
Example:
tokenizer: OktTokenizer = OktTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
okt: Okt = Okt()
def __call__(self, text: str) -> List[str]:
tokens: List[str] = self.okt.phrases(text)
return tokens
class ApiTokenizer:
"""
An API based tokenizer functor, assuming that the response body is a jsonifyable string with content of list of `str` tokens.
Example:
tokenizer: ApiTokenizer = ApiTokenizer()
tokens: List[str] = tokenizer(your_text_here)
"""
def __init__(self, endpoint: str) -> None:
self.endpoint: str = endpoint
def __call__(self, text: str) -> List[str]:
body: bytes = text.encode('utf-8')
res: Response = requests.post(self.endpoint, data=body)
tokens: List[str] = json.loads(res.text)
return tokens
| [
"konlpy.tag.Okt",
"json.loads",
"requests.post"
] | [((467, 472), 'konlpy.tag.Okt', 'Okt', ([], {}), '()\n', (470, 472), False, 'from konlpy.tag import Okt\n'), ((1098, 1137), 'requests.post', 'requests.post', (['self.endpoint'], {'data': 'body'}), '(self.endpoint, data=body)\n', (1111, 1137), False, 'import requests\n'), ((1166, 1186), 'json.loads', 'json.loads', (['res.text'], {}), '(res.text)\n', (1176, 1186), False, 'import json\n')] |
from .alexnet import alexnet_V2
import tensorflow.compat.v1 as tf
import tensorflow.contrib.slim as slim
from utils import montage_tf
from .lci_nets import patch_inpainter, patch_discriminator
import tensorflow.contrib as contrib
# Average pooling params for imagenet linear classifier experiments
AVG_POOL_PARAMS = {'conv_1': (6, 6, 'SAME'), 'conv_2': (4, 4, 'VALID'), 'conv_3': (3, 3, 'SAME'),
'conv_4': (3, 3, 'SAME'), 'conv_5': (2, 2, 'VALID')}
class TRCNet:
def __init__(self, batch_size, im_shape, n_tr_classes=6, lci_patch_sz=64, lci_crop_sz=80, ae_dim=48, n_layers_lci=5,
tag='default', feats_ids=None, feat_pool='AVG', enc_params=None):
if enc_params is None:
enc_params = {}
self.name = 'TRNet_{}'.format(tag)
self.n_tr_classes = n_tr_classes
self.batch_size = batch_size
self.im_shape = im_shape
self.feats_IDs = feats_ids
self.feat_pool = feat_pool
self.enc_params = enc_params
self.lci_patch_sz = lci_patch_sz
self.lci_crop_sz = lci_crop_sz
self.num_LCI_layers = n_layers_lci
self.ae_model = patch_inpainter
self.class_model = alexnet_V2
self.disc_model = patch_discriminator
self.ae_dim = ae_dim
def lci(self, img, enc_scope, dec_scope):
# Extract random patch
patch, jit_x, jit_y = random_crop(img, crop_sz=(self.lci_crop_sz, self.lci_crop_sz))
# Erase the center of the patch
patch_erased, mask_erase = patch_erase(patch, patch_sz=(self.lci_patch_sz, self.lci_patch_sz))
tf.summary.image('imgs/patch_erased', montage_tf(patch_erased, 4, 8), max_outputs=1)
# Perform inpainting/autoencoding
net_in = tf.concat([patch, patch_erased], 0)
net_out, _ = self.ae_model(net_in, depth=self.ae_dim, num_layers=self.num_LCI_layers,
encoder_scope=enc_scope, decoder_scope=dec_scope)
patch_ae, patch_ip = tf.split(net_out, 2)
# Paste inpainted patches
pasted_patch_inpaint, patch_mask = paste_crop(img, patch_ip, jit_x, jit_y)
pasted_patch_ae, _ = paste_crop(img, patch_ae, jit_x, jit_y)
img_lci = img * (1. - patch_mask) + pasted_patch_inpaint
img_patchae = img * (1. - patch_mask) + pasted_patch_ae
return patch_ip, patch_ae, mask_erase, tf.ones_like(mask_erase), patch, img_lci, img_patchae
def ssl_net(self, net, reuse=None, training=True, scope='encoder'):
return self.class_model(net, self.n_tr_classes, reuse, training, scope, **self.enc_params)
def net(self, img, reuse=tf.AUTO_REUSE, training=True):
preds, _ = self.ssl_net(img, reuse, training, scope='features')
return preds
def linear_classifiers(self, img, num_classes, training, reuse=None):
_, feats = self.ssl_net(img, training=False, scope='features')
preds_list = []
with tf.variable_scope('classifier', reuse=reuse):
for feats_id in self.feats_IDs:
p = AVG_POOL_PARAMS[feats_id]
if self.feat_pool == 'AVG':
class_in = slim.avg_pool2d(feats[feats_id], p[0], p[1], p[2])
elif self.feat_pool == 'None':
class_in = feats[feats_id]
print('{} linear classifier input shape: {}'.format(feats_id, class_in.get_shape().as_list()))
preds = linear_classifier(class_in, num_classes, reuse, training, scope=feats_id, wd=5e-4)
preds_list.append(preds)
return preds_list
def patch_disc(self, input, update_collection, disc_scope):
in_1, in_2 = tf.split(input, 2)
input = tf.concat([in_1, in_2], -1)
model, _ = self.disc_model(input,
update_collection=update_collection,
num_layers=self.num_LCI_layers - 1,
scope=disc_scope)
return model
def linear_class_loss(self, scope, preds, labels):
total_loss = 0.
for pred, f_id in zip(preds, self.feats_IDs):
loss = tf.losses.softmax_cross_entropy(labels, pred, scope=scope)
tf.summary.scalar('losses/SCE_{}'.format(f_id), loss)
total_loss += loss
# Compute accuracy
predictions = tf.argmax(pred, 1)
tf.summary.scalar('accuracy/train_accuracy_{}'.format(f_id),
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
loss_wd = tf.add_n(tf.losses.get_regularization_losses(), name='loss_wd')
tf.summary.scalar('losses/loss_wd', loss_wd)
total_loss = total_loss + loss_wd
return total_loss
def inpainter_loss(self, preads_fake, imgs, recs_erase, mask_erase, recs_orig, mask_orig):
loss_fake = -tf.reduce_mean(preads_fake)
tf.summary.scalar('losses/generator_fake_loss', loss_fake)
loss_ae_erase = tf.losses.mean_squared_error(imgs, recs_erase, weights=50. * mask_erase)
loss_ae_orig = tf.losses.mean_squared_error(imgs, recs_orig, weights=50. * mask_orig)
tf.summary.scalar('losses/loss_ae_erase', loss_ae_erase)
tf.summary.scalar('losses/loss_ae_orig', loss_ae_orig)
return loss_fake + loss_ae_erase + loss_ae_orig
def discriminator_loss(self, preds_fake, preds_real):
loss_real = tf.reduce_mean(tf.nn.relu(1. - preds_real))
loss_fake = tf.reduce_mean(tf.nn.relu(1. + preds_fake))
loss = loss_real + loss_fake
tf.summary.scalar('losses/disc_fake_loss', loss_fake)
tf.summary.scalar('losses/disc_real_loss', loss_real)
tf.summary.scalar('losses/disc_total_loss', loss)
return loss
def loss_ssl(self, preds, labels):
# Define the loss
loss = tf.losses.softmax_cross_entropy(labels, preds)
tf.summary.scalar('losses/SCE', loss)
# Compute accuracy
predictions = tf.argmax(preds, 1)
tf.summary.scalar('accuracy/train_accuracy',
slim.metrics.accuracy(predictions, tf.argmax(labels, 1)))
bs = self.batch_size
tf.summary.scalar('accuracy/train_accuracy_real_noae',
slim.metrics.accuracy(predictions[:bs // 2], tf.argmax(labels[:bs // 2], 1)))
tf.summary.scalar('accuracy/train_accuracy_real_ae',
slim.metrics.accuracy(predictions[bs // 2:bs], tf.argmax(labels[bs // 2:bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_lci',
slim.metrics.accuracy(predictions[bs:2 * bs], tf.argmax(labels[bs:2 * bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_rot',
slim.metrics.accuracy(predictions[2 * bs:-bs], tf.argmax(labels[2 * bs:-bs], 1)))
tf.summary.scalar('accuracy/train_accuracy_warp',
slim.metrics.accuracy(predictions[-bs:], tf.argmax(labels[-bs:], 1)))
return loss
def loss_lci_adv(self, preds, labels_tf):
loss = tf.losses.softmax_cross_entropy(labels_tf, preds)
return loss
def linear_classifier(net, num_out, reuse=None, training=True, scope='classifier', wd=5e-4):
with tf.variable_scope(scope, reuse=reuse):
net = slim.batch_norm(net, decay=0.975, is_training=training, fused=True, center=False, scale=False)
net = slim.flatten(net)
net = slim.fully_connected(net, num_out,
weights_initializer=contrib.layers.variance_scaling_initializer(),
weights_regularizer=slim.l2_regularizer(wd),
activation_fn=None, normalizer_fn=None)
return net
def patch_erase(img, patch_sz=(16, 16)):
im_shape = img.get_shape()
pad_sz = [im_shape[1] - patch_sz[0], im_shape[2] - patch_sz[1]]
patch_mask = tf.ones([im_shape[0], patch_sz[0], patch_sz[1], im_shape[3]])
patch_mask = tf.pad(patch_mask,
[[0, 0], [pad_sz[0] // 2, pad_sz[0] // 2], [pad_sz[1] // 2, pad_sz[1] // 2], [0, 0]])
return img * (1. - patch_mask) + 0.1 * patch_mask * tf.random_normal(im_shape), 1. - patch_mask
def random_crop(img, crop_sz=(20, 20)):
im_shape = img.get_shape().as_list()
bsz = im_shape[0]
dx = (im_shape[1] - crop_sz[0]) // 2
dy = (im_shape[2] - crop_sz[1]) // 2
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
jit_x = tf.random_uniform([bsz, 8], minval=-dx + 1, maxval=dx, dtype=tf.int32)
jit_x = tf.cast(jit_x, tf.float32)
jit_y = tf.random_uniform([bsz, 8], minval=-dy + 1, maxval=dy, dtype=tf.int32)
jit_y = tf.cast(jit_y, tf.float32)
xforms = base + jit_x * mask_x + jit_y * mask_y
processed_data = contrib.image.transform(
images=img, transforms=xforms
)
cropped_data = processed_data[:, dx:dx + crop_sz[0], dy:dy + crop_sz[1], :]
return cropped_data, jit_x, jit_y
def paste_crop(img, crop, jit_x, jit_y):
im_shape = tf.shape(img)
crop_shape = tf.shape(crop)
bsz = im_shape[0]
dx_1 = (im_shape[1] - crop_shape[1]) // 2
dy_1 = (im_shape[2] - crop_shape[2]) // 2
dx_2 = im_shape[1] - crop_shape[1] - dx_1
dy_2 = im_shape[2] - crop_shape[2] - dy_1
patch_mask = tf.ones_like(crop)
crop = tf.pad(crop, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
patch_mask = tf.pad(patch_mask, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])
base = tf.constant(
[1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
base = tf.tile(base, [bsz, 1])
mask_x = tf.constant(
[0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_x = tf.tile(mask_x, [bsz, 1])
mask_y = tf.constant(
[0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32
)
mask_y = tf.tile(mask_y, [bsz, 1])
xforms = base - jit_x * mask_x - jit_y * mask_y
transformed_crop = contrib.image.transform(
images=crop, transforms=xforms
)
transformed_mask = contrib.image.transform(
images=patch_mask, transforms=xforms
)
return transformed_crop, transformed_mask
| [
"tensorflow.compat.v1.ones_like",
"tensorflow.compat.v1.shape",
"tensorflow.compat.v1.pad",
"tensorflow.contrib.layers.variance_scaling_initializer",
"tensorflow.compat.v1.losses.mean_squared_error",
"tensorflow.compat.v1.concat",
"tensorflow.compat.v1.nn.relu",
"tensorflow.compat.v1.split",
"tensor... | [((7919, 7980), 'tensorflow.compat.v1.ones', 'tf.ones', (['[im_shape[0], patch_sz[0], patch_sz[1], im_shape[3]]'], {}), '([im_shape[0], patch_sz[0], patch_sz[1], im_shape[3]])\n', (7926, 7980), True, 'import tensorflow.compat.v1 as tf\n'), ((7998, 8106), 'tensorflow.compat.v1.pad', 'tf.pad', (['patch_mask', '[[0, 0], [pad_sz[0] // 2, pad_sz[0] // 2], [pad_sz[1] // 2, pad_sz[1] // 2],\n [0, 0]]'], {}), '(patch_mask, [[0, 0], [pad_sz[0] // 2, pad_sz[0] // 2], [pad_sz[1] //\n 2, pad_sz[1] // 2], [0, 0]])\n', (8004, 8106), True, 'import tensorflow.compat.v1 as tf\n'), ((8427, 8496), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 0, 0, 0, 1, 0, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (8438, 8496), True, 'import tensorflow.compat.v1 as tf\n'), ((8522, 8545), 'tensorflow.compat.v1.tile', 'tf.tile', (['base', '[bsz, 1]'], {}), '(base, [bsz, 1])\n', (8529, 8545), True, 'import tensorflow.compat.v1 as tf\n'), ((8560, 8629), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 1, 0, 0, 0, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (8571, 8629), True, 'import tensorflow.compat.v1 as tf\n'), ((8657, 8682), 'tensorflow.compat.v1.tile', 'tf.tile', (['mask_x', '[bsz, 1]'], {}), '(mask_x, [bsz, 1])\n', (8664, 8682), True, 'import tensorflow.compat.v1 as tf\n'), ((8697, 8766), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 0, 0, 0, 1, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (8708, 8766), True, 'import tensorflow.compat.v1 as tf\n'), ((8794, 8819), 'tensorflow.compat.v1.tile', 'tf.tile', (['mask_y', '[bsz, 1]'], {}), '(mask_y, [bsz, 1])\n', (8801, 8819), True, 'import tensorflow.compat.v1 as tf\n'), ((8833, 8903), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[bsz, 8]'], {'minval': '(-dx + 1)', 'maxval': 'dx', 'dtype': 'tf.int32'}), '([bsz, 8], minval=-dx + 1, maxval=dx, dtype=tf.int32)\n', (8850, 8903), True, 'import tensorflow.compat.v1 as tf\n'), ((8916, 8942), 'tensorflow.compat.v1.cast', 'tf.cast', (['jit_x', 'tf.float32'], {}), '(jit_x, tf.float32)\n', (8923, 8942), True, 'import tensorflow.compat.v1 as tf\n'), ((8956, 9026), 'tensorflow.compat.v1.random_uniform', 'tf.random_uniform', (['[bsz, 8]'], {'minval': '(-dy + 1)', 'maxval': 'dy', 'dtype': 'tf.int32'}), '([bsz, 8], minval=-dy + 1, maxval=dy, dtype=tf.int32)\n', (8973, 9026), True, 'import tensorflow.compat.v1 as tf\n'), ((9039, 9065), 'tensorflow.compat.v1.cast', 'tf.cast', (['jit_y', 'tf.float32'], {}), '(jit_y, tf.float32)\n', (9046, 9065), True, 'import tensorflow.compat.v1 as tf\n'), ((9140, 9194), 'tensorflow.contrib.image.transform', 'contrib.image.transform', ([], {'images': 'img', 'transforms': 'xforms'}), '(images=img, transforms=xforms)\n', (9163, 9194), True, 'import tensorflow.contrib as contrib\n'), ((9385, 9398), 'tensorflow.compat.v1.shape', 'tf.shape', (['img'], {}), '(img)\n', (9393, 9398), True, 'import tensorflow.compat.v1 as tf\n'), ((9416, 9430), 'tensorflow.compat.v1.shape', 'tf.shape', (['crop'], {}), '(crop)\n', (9424, 9430), True, 'import tensorflow.compat.v1 as tf\n'), ((9657, 9675), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['crop'], {}), '(crop)\n', (9669, 9675), True, 'import tensorflow.compat.v1 as tf\n'), ((9687, 9745), 'tensorflow.compat.v1.pad', 'tf.pad', (['crop', '[[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]]'], {}), '(crop, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])\n', (9693, 9745), True, 'import tensorflow.compat.v1 as tf\n'), ((9763, 9827), 'tensorflow.compat.v1.pad', 'tf.pad', (['patch_mask', '[[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]]'], {}), '(patch_mask, [[0, 0], [dx_1, dx_2], [dy_1, dy_2], [0, 0]])\n', (9769, 9827), True, 'import tensorflow.compat.v1 as tf\n'), ((9840, 9909), 'tensorflow.compat.v1.constant', 'tf.constant', (['[1, 0, 0, 0, 1, 0, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([1, 0, 0, 0, 1, 0, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (9851, 9909), True, 'import tensorflow.compat.v1 as tf\n'), ((9935, 9958), 'tensorflow.compat.v1.tile', 'tf.tile', (['base', '[bsz, 1]'], {}), '(base, [bsz, 1])\n', (9942, 9958), True, 'import tensorflow.compat.v1 as tf\n'), ((9973, 10042), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 1, 0, 0, 0, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([0, 0, 1, 0, 0, 0, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (9984, 10042), True, 'import tensorflow.compat.v1 as tf\n'), ((10070, 10095), 'tensorflow.compat.v1.tile', 'tf.tile', (['mask_x', '[bsz, 1]'], {}), '(mask_x, [bsz, 1])\n', (10077, 10095), True, 'import tensorflow.compat.v1 as tf\n'), ((10110, 10179), 'tensorflow.compat.v1.constant', 'tf.constant', (['[0, 0, 0, 0, 0, 1, 0, 0]'], {'shape': '[1, 8]', 'dtype': 'tf.float32'}), '([0, 0, 0, 0, 0, 1, 0, 0], shape=[1, 8], dtype=tf.float32)\n', (10121, 10179), True, 'import tensorflow.compat.v1 as tf\n'), ((10207, 10232), 'tensorflow.compat.v1.tile', 'tf.tile', (['mask_y', '[bsz, 1]'], {}), '(mask_y, [bsz, 1])\n', (10214, 10232), True, 'import tensorflow.compat.v1 as tf\n'), ((10309, 10364), 'tensorflow.contrib.image.transform', 'contrib.image.transform', ([], {'images': 'crop', 'transforms': 'xforms'}), '(images=crop, transforms=xforms)\n', (10332, 10364), True, 'import tensorflow.contrib as contrib\n'), ((10402, 10463), 'tensorflow.contrib.image.transform', 'contrib.image.transform', ([], {'images': 'patch_mask', 'transforms': 'xforms'}), '(images=patch_mask, transforms=xforms)\n', (10425, 10463), True, 'import tensorflow.contrib as contrib\n'), ((1754, 1789), 'tensorflow.compat.v1.concat', 'tf.concat', (['[patch, patch_erased]', '(0)'], {}), '([patch, patch_erased], 0)\n', (1763, 1789), True, 'import tensorflow.compat.v1 as tf\n'), ((1998, 2018), 'tensorflow.compat.v1.split', 'tf.split', (['net_out', '(2)'], {}), '(net_out, 2)\n', (2006, 2018), True, 'import tensorflow.compat.v1 as tf\n'), ((3676, 3694), 'tensorflow.compat.v1.split', 'tf.split', (['input', '(2)'], {}), '(input, 2)\n', (3684, 3694), True, 'import tensorflow.compat.v1 as tf\n'), ((3711, 3738), 'tensorflow.compat.v1.concat', 'tf.concat', (['[in_1, in_2]', '(-1)'], {}), '([in_1, in_2], -1)\n', (3720, 3738), True, 'import tensorflow.compat.v1 as tf\n'), ((4637, 4681), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/loss_wd"""', 'loss_wd'], {}), "('losses/loss_wd', loss_wd)\n", (4654, 4681), True, 'import tensorflow.compat.v1 as tf\n'), ((4905, 4963), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/generator_fake_loss"""', 'loss_fake'], {}), "('losses/generator_fake_loss', loss_fake)\n", (4922, 4963), True, 'import tensorflow.compat.v1 as tf\n'), ((4989, 5062), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['imgs', 'recs_erase'], {'weights': '(50.0 * mask_erase)'}), '(imgs, recs_erase, weights=50.0 * mask_erase)\n', (5017, 5062), True, 'import tensorflow.compat.v1 as tf\n'), ((5085, 5156), 'tensorflow.compat.v1.losses.mean_squared_error', 'tf.losses.mean_squared_error', (['imgs', 'recs_orig'], {'weights': '(50.0 * mask_orig)'}), '(imgs, recs_orig, weights=50.0 * mask_orig)\n', (5113, 5156), True, 'import tensorflow.compat.v1 as tf\n'), ((5165, 5221), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/loss_ae_erase"""', 'loss_ae_erase'], {}), "('losses/loss_ae_erase', loss_ae_erase)\n", (5182, 5221), True, 'import tensorflow.compat.v1 as tf\n'), ((5230, 5284), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/loss_ae_orig"""', 'loss_ae_orig'], {}), "('losses/loss_ae_orig', loss_ae_orig)\n", (5247, 5284), True, 'import tensorflow.compat.v1 as tf\n'), ((5576, 5629), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/disc_fake_loss"""', 'loss_fake'], {}), "('losses/disc_fake_loss', loss_fake)\n", (5593, 5629), True, 'import tensorflow.compat.v1 as tf\n'), ((5638, 5691), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/disc_real_loss"""', 'loss_real'], {}), "('losses/disc_real_loss', loss_real)\n", (5655, 5691), True, 'import tensorflow.compat.v1 as tf\n'), ((5700, 5749), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/disc_total_loss"""', 'loss'], {}), "('losses/disc_total_loss', loss)\n", (5717, 5749), True, 'import tensorflow.compat.v1 as tf\n'), ((5851, 5897), 'tensorflow.compat.v1.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['labels', 'preds'], {}), '(labels, preds)\n', (5882, 5897), True, 'import tensorflow.compat.v1 as tf\n'), ((5906, 5943), 'tensorflow.compat.v1.summary.scalar', 'tf.summary.scalar', (['"""losses/SCE"""', 'loss'], {}), "('losses/SCE', loss)\n", (5923, 5943), True, 'import tensorflow.compat.v1 as tf\n'), ((5994, 6013), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['preds', '(1)'], {}), '(preds, 1)\n', (6003, 6013), True, 'import tensorflow.compat.v1 as tf\n'), ((7081, 7130), 'tensorflow.compat.v1.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['labels_tf', 'preds'], {}), '(labels_tf, preds)\n', (7112, 7130), True, 'import tensorflow.compat.v1 as tf\n'), ((7255, 7292), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['scope'], {'reuse': 'reuse'}), '(scope, reuse=reuse)\n', (7272, 7292), True, 'import tensorflow.compat.v1 as tf\n'), ((7308, 7407), 'tensorflow.contrib.slim.batch_norm', 'slim.batch_norm', (['net'], {'decay': '(0.975)', 'is_training': 'training', 'fused': '(True)', 'center': '(False)', 'scale': '(False)'}), '(net, decay=0.975, is_training=training, fused=True, center=\n False, scale=False)\n', (7323, 7407), True, 'import tensorflow.contrib.slim as slim\n'), ((7417, 7434), 'tensorflow.contrib.slim.flatten', 'slim.flatten', (['net'], {}), '(net)\n', (7429, 7434), True, 'import tensorflow.contrib.slim as slim\n'), ((1647, 1677), 'utils.montage_tf', 'montage_tf', (['patch_erased', '(4)', '(8)'], {}), '(patch_erased, 4, 8)\n', (1657, 1677), False, 'from utils import montage_tf\n'), ((2383, 2407), 'tensorflow.compat.v1.ones_like', 'tf.ones_like', (['mask_erase'], {}), '(mask_erase)\n', (2395, 2407), True, 'import tensorflow.compat.v1 as tf\n'), ((2947, 2991), 'tensorflow.compat.v1.variable_scope', 'tf.variable_scope', (['"""classifier"""'], {'reuse': 'reuse'}), "('classifier', reuse=reuse)\n", (2964, 2991), True, 'import tensorflow.compat.v1 as tf\n'), ((4152, 4210), 'tensorflow.compat.v1.losses.softmax_cross_entropy', 'tf.losses.softmax_cross_entropy', (['labels', 'pred'], {'scope': 'scope'}), '(labels, pred, scope=scope)\n', (4183, 4210), True, 'import tensorflow.compat.v1 as tf\n'), ((4366, 4384), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['pred', '(1)'], {}), '(pred, 1)\n', (4375, 4384), True, 'import tensorflow.compat.v1 as tf\n'), ((4574, 4611), 'tensorflow.compat.v1.losses.get_regularization_losses', 'tf.losses.get_regularization_losses', ([], {}), '()\n', (4609, 4611), True, 'import tensorflow.compat.v1 as tf\n'), ((4869, 4896), 'tensorflow.compat.v1.reduce_mean', 'tf.reduce_mean', (['preads_fake'], {}), '(preads_fake)\n', (4883, 4896), True, 'import tensorflow.compat.v1 as tf\n'), ((5436, 5464), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['(1.0 - preds_real)'], {}), '(1.0 - preds_real)\n', (5446, 5464), True, 'import tensorflow.compat.v1 as tf\n'), ((5500, 5528), 'tensorflow.compat.v1.nn.relu', 'tf.nn.relu', (['(1.0 + preds_fake)'], {}), '(1.0 + preds_fake)\n', (5510, 5528), True, 'import tensorflow.compat.v1 as tf\n'), ((6128, 6148), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (6137, 6148), True, 'import tensorflow.compat.v1 as tf\n'), ((6315, 6345), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels[:bs // 2]', '(1)'], {}), '(labels[:bs // 2], 1)\n', (6324, 6345), True, 'import tensorflow.compat.v1 as tf\n'), ((6482, 6514), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels[bs // 2:bs]', '(1)'], {}), '(labels[bs // 2:bs], 1)\n', (6491, 6514), True, 'import tensorflow.compat.v1 as tf\n'), ((6646, 6677), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels[bs:2 * bs]', '(1)'], {}), '(labels[bs:2 * bs], 1)\n', (6655, 6677), True, 'import tensorflow.compat.v1 as tf\n'), ((6810, 6842), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels[2 * bs:-bs]', '(1)'], {}), '(labels[2 * bs:-bs], 1)\n', (6819, 6842), True, 'import tensorflow.compat.v1 as tf\n'), ((6970, 6996), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels[-bs:]', '(1)'], {}), '(labels[-bs:], 1)\n', (6979, 6996), True, 'import tensorflow.compat.v1 as tf\n'), ((7539, 7584), 'tensorflow.contrib.layers.variance_scaling_initializer', 'contrib.layers.variance_scaling_initializer', ([], {}), '()\n', (7582, 7584), True, 'import tensorflow.contrib as contrib\n'), ((7641, 7664), 'tensorflow.contrib.slim.l2_regularizer', 'slim.l2_regularizer', (['wd'], {}), '(wd)\n', (7660, 7664), True, 'import tensorflow.contrib.slim as slim\n'), ((8183, 8209), 'tensorflow.compat.v1.random_normal', 'tf.random_normal', (['im_shape'], {}), '(im_shape)\n', (8199, 8209), True, 'import tensorflow.compat.v1 as tf\n'), ((3158, 3208), 'tensorflow.contrib.slim.avg_pool2d', 'slim.avg_pool2d', (['feats[feats_id]', 'p[0]', 'p[1]', 'p[2]'], {}), '(feats[feats_id], p[0], p[1], p[2])\n', (3173, 3208), True, 'import tensorflow.contrib.slim as slim\n'), ((4523, 4543), 'tensorflow.compat.v1.argmax', 'tf.argmax', (['labels', '(1)'], {}), '(labels, 1)\n', (4532, 4543), True, 'import tensorflow.compat.v1 as tf\n')] |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Author: pirogue
Purpose: 白名单端口表操作
Site: http://pirogue.org
Created: 2018-08-03 17:32:54
"""
from dbs.initdb import DBSession
from dbs.models.Whiteport import Whiteport
from sqlalchemy import desc,asc
from sqlalchemy.exc import InvalidRequestError
# import sys
# sys.path.append("..")
class WhitePort:
"""增删改查"""
def __init__(self):
self.session=DBSession
# 查询白名单表port数据
def select_white_port(self):
try:
white_port_res = self.session.query(Whiteport.dst_port).all()
return white_port_res
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 增加白名单
def insert_white_port(self, dst_port):
try:
wip_insert = Whiteport(dst_port=dst_port)
self.session.merge(wip_insert)
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close()
# 删除白名单端口表数据
def delete_white_port(self):
try:
self.session.query(Whiteport).delete()
self.session.commit()
except InvalidRequestError:
self.session.rollback()
except Exception as e:
print(e)
finally:
self.session.close() | [
"dbs.models.Whiteport.Whiteport"
] | [((884, 912), 'dbs.models.Whiteport.Whiteport', 'Whiteport', ([], {'dst_port': 'dst_port'}), '(dst_port=dst_port)\n', (893, 912), False, 'from dbs.models.Whiteport import Whiteport\n')] |
#
# Copyright (C) 2018 <NAME> <<EMAIL>>
# License: MIT
#
r"""Singleton class
.. versionadded:: 0.9.8
- Add to make a kind of manager instancne later to manage plugins.
"""
from __future__ import absolute_import
import threading
class Singleton(object):
"""Singleton utilizes __new__ special method.
.. note:: Inherited classes are equated with base class inherit this.
"""
__instance = None
__lock = threading.RLock()
def __new__(cls):
if cls.__instance is None:
cls.__lock.acquire()
if cls.__instance is None:
try:
cls.__instance = object.__new__(cls)
finally:
cls.__lock.release()
return cls.__instance
# vim:sw=4:ts=4:et:
| [
"threading.RLock"
] | [((428, 445), 'threading.RLock', 'threading.RLock', ([], {}), '()\n', (443, 445), False, 'import threading\n')] |
import tvm
from functools import reduce
from ..utils import to_int, to_int_or_None
def get_need_tile(need_tile):
return [True if x.value == 1 else False for x in need_tile]
def get_factors(split_factor_entities):
return [[x.value for x in factors.factors] for factors in split_factor_entities]
def tile_axis(stage, axis, factors, inner_to_outer=False):
ret = []
if inner_to_outer:
factors = list(reversed(factors))
for f in factors[:-1]:
axis, inner = stage.split(axis, f)
ret.append(inner)
ret.append(axis)
ret = list(reversed(ret))
else:
for f in factors[:-1]:
outer, axis = stage.split(axis, nparts=f)
ret.append(outer)
ret.append(axis)
return ret
def tile_axes(sch, op, axes, need_tile, split_factors, inner_to_outer=False):
"""Tile axes according to need_tile and split_factors
"""
axis_map = {}
count_axis = 0
split_axis_list = []
split_factor_list = []
for axis, need_tile, factors in zip(axes, need_tile, split_factors):
if need_tile:
split_axis = tile_axis(sch[op], axis, factors, inner_to_outer=inner_to_outer)
split_axis_list.append(split_axis)
split_factor_list.append(factors)
axis_map[count_axis] = split_axis
else:
axis_map[count_axis] = axis
count_axis += 1
return axis_map, split_axis_list, split_factor_list
def get_bind_spec(binding_entity):
ret = []
for b in binding_entity:
tmp = []
for bb in b:
tmp.append([bb[0].value, bb[1].value])
ret.append(tmp)
return ret
def bind_axes(sch, op, axis_map, bind, to_bind, already_bind=None, factors=None, extents=None):
"""The bind function will fuse some axes,
which is dangerous because this is not updated
to the schedule state. For now it shouldn't be
a problem because the fusion should only happen
on blockIdx.z
"""
ret = []
for part in bind:
to_fuse = []
to_fuse_extent = 1
for ele in part:
if ele[1] < 0:
axis = axis_map[ele[0]]
if already_bind is not None:
to_fuse_extent *= extents[ele[0]]
else:
axis = axis_map[ele[0]][ele[1]]
if already_bind is not None:
to_fuse_extent *= factors[ele[0]][ele[1]]
to_fuse.append(axis)
if len(to_fuse) > 1:
sch[op].reorder(*to_fuse)
fused_axis = sch[op].fuse(*to_fuse)
else:
fused_axis = to_fuse[0]
ret.append(fused_axis)
sch[op].bind(fused_axis, to_bind)
if already_bind is not None:
already_bind["extent"] = to_fuse_extent
return ret
def get_move_to_inner(move):
return [x.value for x in move]
def reorder_spatial_and_reduce_axes(sch, op, axis_map, split_axis_list, reduce_split_axis_list, extents_info=None):
"""Reorder spatial and reduce axes
"""
pre = []
ones = []
for k, v in axis_map.items():
if not isinstance(v, (list, tuple)):
if v.dom is None:
ext = None
else:
ext = to_int_or_None(v.dom.extent)
if ext is None:
if v in extents_info:
ext = extents_info[v]
else:
ERROR("Can't decide extent for %s" % (str(v)))
if ext > 1:
pre.append(v)
else:
ones.append(v)
# perform local reorder
num_axis_parts = len(split_axis_list[0]) if len(split_axis_list) > 0 else 0
num_reduce_axis_parts = len(reduce_split_axis_list[0]) if len(reduce_split_axis_list) > 0 else 0
leveled_axes = []
reduce_leveled_axes = []
local_order = []
def _inner(axis_list, leveled, nparts):
for i in range(nparts):
leveled.append([])
for part in axis_list:
for i, axis in enumerate(part):
leveled[i].append(axis)
_inner(split_axis_list, leveled_axes, num_axis_parts)
_inner(reduce_split_axis_list, reduce_leveled_axes, num_reduce_axis_parts)
if len(leveled_axes) >= 1:
# GPU specific reorder choice
# put the inner part as inner-most axes
local_order = list(reduce(lambda x, y: x + y, leveled_axes[:-1], []))
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
local_order += leveled_axes[-1]
else:
local_order += list(reduce(lambda x, y: x + y, reduce_leveled_axes, []))
if len(local_order) > 0:
sch[op].reorder(*ones, *pre, *local_order)
return leveled_axes, reduce_leveled_axes
| [
"functools.reduce"
] | [((3933, 3982), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'leveled_axes[:-1]', '[]'], {}), '(lambda x, y: x + y, leveled_axes[:-1], [])\n', (3939, 3982), False, 'from functools import reduce\n'), ((4008, 4059), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'reduce_leveled_axes', '[]'], {}), '(lambda x, y: x + y, reduce_leveled_axes, [])\n', (4014, 4059), False, 'from functools import reduce\n'), ((4129, 4180), 'functools.reduce', 'reduce', (['(lambda x, y: x + y)', 'reduce_leveled_axes', '[]'], {}), '(lambda x, y: x + y, reduce_leveled_axes, [])\n', (4135, 4180), False, 'from functools import reduce\n')] |
"""
mcpython - a minecraft clone written in python licenced under the MIT-licence
(https://github.com/mcpython4-coding/core)
Contributors: uuk, xkcdjerry (inactive)
Based on the game of fogleman (https://github.com/fogleman/Minecraft), licenced under the MIT-licence
Original game "minecraft" by Mojang Studios (www.minecraft.net), licenced under the EULA
(https://account.mojang.com/documents/minecraft_eula)
Mod loader inspired by "Minecraft Forge" (https://github.com/MinecraftForge/MinecraftForge) and similar
This project is not official by mojang and does not relate to it.
"""
import asyncio
import mcpython.engine.ResourceLoader as ResourceLoader
import mcpython.util.texture
import PIL.Image
from mcpython.common.data.gen.DataGeneratorManager import (
DataGeneratorInstance,
IDataGenerator,
)
from mcpython.engine import logger
class TextureConstructor(IDataGenerator):
"""
generator system for generating textures
"""
def __init__(self, name: str, image_size: tuple = None):
"""
will create an new TextureConstructor-instance
:param name: the name of the texture address as "{group}/{path without .png}"
:param image_size: the size of the image to create
"""
self.name = name
self.image_size = image_size
self.actions = []
def add_image_layer_top(self, location_or_image, position=(0, 0), rescale=(1, 1)):
"""
will alpha-composite an image ontop of all previous actions
:param location_or_image: the image to add
:param position: the position to add on
:param rescale: rescale of the image
"""
try:
self.actions.append(
(
0,
location_or_image
if type(location_or_image) == PIL.Image.Image
else asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image)),
position,
rescale,
)
)
except:
logger.print_exception(
"[ERROR] failed to add image layer from file {}".format(
location_or_image
)
)
return self
def add_coloring_layer(
self, location_or_image, color: tuple, position=(0, 0), rescale=(1, 1)
):
"""
will alpha-composite an image (which is colored before) ontop of all previous actions
:param location_or_image: the image to add
:param color: the color to colorize with
:param position: the position to add on
:param rescale: rescale of the image
"""
try:
if type(location_or_image) != PIL.Image.Image:
location_or_image = asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image))
self.actions.append(
(
1,
location_or_image,
color,
position,
rescale,
)
)
except:
logger.print_exception(
"[ERROR] failed to add colorized layer from file {} with color {}".format(
location_or_image, color
)
)
return self
def scaled(self, scale: tuple):
self.actions.append((3, scale))
return self
def crop(self, region: tuple):
self.actions.append((4, region))
return self
def add_alpha_composite_layer(self, location_or_image, position=(0, 0)):
try:
self.actions.append(
(
2,
location_or_image
if type(location_or_image) == PIL.Image.Image
else asyncio.get_event_loop().run_until_complete(ResourceLoader.read_image(location_or_image)),
position,
)
)
except:
logger.print_exception("failed to add alpha composite layer")
return self
def write(self, generator: "DataGeneratorInstance", name: str):
file = self.get_default_location(generator, name)
if self.image_size is None:
for action, *data in self.actions:
if action == 0:
self.image_size = data[0]
break
else:
logger.println(
"[ERROR] failed to texture-gen as image size could not get loaded for"
" generator named {} to store at {}!".format(self.name, file)
)
return
image = PIL.Image.new("RGBA", self.image_size, (0, 0, 0, 0))
for action, *data in self.actions:
if action == 0:
sx, sy = data[0].size
px, py = data[2]
image.alpha_composite(
data[0]
.resize((sx * px, sy * py), PIL.Image.NEAREST)
.convert("RGBA"),
data[1],
)
elif action == 1:
i = mcpython.util.texture.colorize(data[0], data[1])
sx, sy = i.size
px, py = data[3]
image.alpha_composite(
i.resize((sx * px, sy * py), PIL.Image.NEAREST).convert("RGBA"),
data[2],
)
elif action == 2:
image.alpha_composite(data[0], data[1])
elif action == 3:
size = image.size
scale = data[0]
image = image.resize(tuple([scale[i] * size[i] for i in range(2)]))
elif action == 4:
size = image.size
region = data[0]
image = image.crop(tuple([region[i] * size[i % 2] for i in range(4)]))
image.save(generator.get_full_path(file))
| [
"mcpython.engine.ResourceLoader.read_image",
"mcpython.engine.logger.print_exception",
"asyncio.get_event_loop"
] | [((4030, 4091), 'mcpython.engine.logger.print_exception', 'logger.print_exception', (['"""failed to add alpha composite layer"""'], {}), "('failed to add alpha composite layer')\n", (4052, 4091), False, 'from mcpython.engine import logger\n'), ((2844, 2888), 'mcpython.engine.ResourceLoader.read_image', 'ResourceLoader.read_image', (['location_or_image'], {}), '(location_or_image)\n', (2869, 2888), True, 'import mcpython.engine.ResourceLoader as ResourceLoader\n'), ((2800, 2824), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (2822, 2824), False, 'import asyncio\n'), ((1913, 1957), 'mcpython.engine.ResourceLoader.read_image', 'ResourceLoader.read_image', (['location_or_image'], {}), '(location_or_image)\n', (1938, 1957), True, 'import mcpython.engine.ResourceLoader as ResourceLoader\n'), ((3893, 3937), 'mcpython.engine.ResourceLoader.read_image', 'ResourceLoader.read_image', (['location_or_image'], {}), '(location_or_image)\n', (3918, 3937), True, 'import mcpython.engine.ResourceLoader as ResourceLoader\n'), ((1869, 1893), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (1891, 1893), False, 'import asyncio\n'), ((3849, 3873), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3871, 3873), False, 'import asyncio\n')] |
from setuptools import setup
setup(name='goofy',
version='0.1',
description='A goofy ebay bot.',
url='github.com/elcolumbio/goofy',
author='<NAME>',
author_email='<EMAIL>',
license='Apache License, Version 2.0 (the "License")',
packages=['goofy'])
| [
"setuptools.setup"
] | [((30, 257), 'setuptools.setup', 'setup', ([], {'name': '"""goofy"""', 'version': '"""0.1"""', 'description': '"""A goofy ebay bot."""', 'url': '"""github.com/elcolumbio/goofy"""', 'author': '"""<NAME>"""', 'author_email': '"""<EMAIL>"""', 'license': '"""Apache License, Version 2.0 (the "License")"""', 'packages': "['goofy']"}), '(name=\'goofy\', version=\'0.1\', description=\'A goofy ebay bot.\', url=\n \'github.com/elcolumbio/goofy\', author=\'<NAME>\', author_email=\'<EMAIL>\',\n license=\'Apache License, Version 2.0 (the "License")\', packages=[\'goofy\'])\n', (35, 257), False, 'from setuptools import setup\n')] |
import unittest
import translator
class TestEnglishToFrench(unittest.TestCase):
def test_love(self):
self.assertEqual(translator.english_to_french('Love'), 'Amour')
def test_sun(self):
self.assertEqual(translator.english_to_french('Sun'), 'Soleil')
def test_null(self):
self.assertRaises(ValueError, translator.english_to_french, None)
def test_hello(self):
self.assertEqual(translator.english_to_french('Hello'), 'Bonjour')
class TestFrenchToEnglish(unittest.TestCase):
def test_love(self):
self.assertEqual(translator.french_to_english('Amour'), 'Love')
def test_sun(self):
self.assertEqual(translator.french_to_english('Soleil'), 'Sun')
def test_null(self):
self.assertRaises(ValueError, translator.french_to_english, None)
def test_hello(self):
self.assertEqual(translator.french_to_english('Bonjour'), 'Hello')
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"translator.french_to_english",
"translator.english_to_french"
] | [((972, 987), 'unittest.main', 'unittest.main', ([], {}), '()\n', (985, 987), False, 'import unittest\n'), ((131, 167), 'translator.english_to_french', 'translator.english_to_french', (['"""Love"""'], {}), "('Love')\n", (159, 167), False, 'import translator\n'), ((228, 263), 'translator.english_to_french', 'translator.english_to_french', (['"""Sun"""'], {}), "('Sun')\n", (256, 263), False, 'import translator\n'), ((435, 472), 'translator.english_to_french', 'translator.english_to_french', (['"""Hello"""'], {}), "('Hello')\n", (463, 472), False, 'import translator\n'), ((586, 623), 'translator.french_to_english', 'translator.french_to_english', (['"""Amour"""'], {}), "('Amour')\n", (614, 623), False, 'import translator\n'), ((683, 721), 'translator.french_to_english', 'translator.french_to_english', (['"""Soleil"""'], {}), "('Soleil')\n", (711, 721), False, 'import translator\n'), ((890, 929), 'translator.french_to_english', 'translator.french_to_english', (['"""Bonjour"""'], {}), "('Bonjour')\n", (918, 929), False, 'import translator\n')] |
from flask import (Flask, jsonify)
from gevent import (pywsgi, sleep)
from geventwebsocket.handler import WebSocketHandler
from . import __version__
from .logs import logger
class FlaskApp(object):
def __init__(self, host='', port=8080):
self.app = Flask(__name__)
self._register_routes()
self._socket_app = None
self._host = host
self._port = port
self._server = pywsgi.WSGIServer((self._host, self._port), self.app, handler_class=WebSocketHandler)
self._serving = False
@property
def socket_app(self):
return self._socket_app
@socket_app.setter
def socket_app(self, socket_app):
self._socket_app = socket_app
@property
def socket_clients(self):
if self._socket_app is not None:
return len(self._socket_app)
else:
return 0
@property
def is_serving(self):
return self._serving
def _register_routes(self):
@self.app.route("/", methods=['GET'])
def root():
return "200 OK", 200
# Tesseract requires at least a /status endpoint to verify that the app is running.
@self.app.route("/status", methods=['GET'])
def status():
return jsonify({
"status": "up",
"version": __version__,
"clients": self.socket_clients
}), 200
def serve_forever(self):
logger.debug('Serving Forever!')
try:
print(str(self._port))
print(str(self._host))
self._server.serve_forever()
except KeyboardInterrupt:
print("Keyboard Interrupt, Exiting...")
exit(0)
def start(self):
logger.debug('Starting Server...')
self._serving = True
self._server.start()
def stop(self):
logger.debug('Stopping Server...')
self._server.stop()
self._serving = False
def run_in_loop(self, actions, *args, **kwargs):
if not self._serving:
self.start()
while self._serving:
actions(*args, **kwargs)
sleep(0)
| [
"flask.jsonify",
"gevent.sleep",
"gevent.pywsgi.WSGIServer",
"flask.Flask"
] | [((264, 279), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (269, 279), False, 'from flask import Flask, jsonify\n'), ((419, 509), 'gevent.pywsgi.WSGIServer', 'pywsgi.WSGIServer', (['(self._host, self._port)', 'self.app'], {'handler_class': 'WebSocketHandler'}), '((self._host, self._port), self.app, handler_class=\n WebSocketHandler)\n', (436, 509), False, 'from gevent import pywsgi, sleep\n'), ((2140, 2148), 'gevent.sleep', 'sleep', (['(0)'], {}), '(0)\n', (2145, 2148), False, 'from gevent import pywsgi, sleep\n'), ((1258, 1344), 'flask.jsonify', 'jsonify', (["{'status': 'up', 'version': __version__, 'clients': self.socket_clients}"], {}), "({'status': 'up', 'version': __version__, 'clients': self.\n socket_clients})\n", (1265, 1344), False, 'from flask import Flask, jsonify\n')] |
from django.views.generic import ListView, CreateView, UpdateView
from django.utils.decorators import method_decorator
from django.contrib.admin.views.decorators import staff_member_required
from django.shortcuts import get_object_or_404, redirect, reverse
from django.urls import reverse_lazy
from django.contrib import messages
from django.template.loader import render_to_string
from django.http import JsonResponse
from django.db.models import Sum
from django_tables2 import RequestConfig
from .models import Order, OrderItem, CURRENCY
from .forms import OrderCreateForm, OrderEditForm
from product.models import Product, Category
from .tables import ProductTable, OrderItemTable, OrderTable
import datetime
@method_decorator(staff_member_required, name='dispatch')
class HomepageView(ListView):
template_name = 'index.html'
model = Order
queryset = Order.objects.all()[:10]
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = Order.objects.all()
total_sales = orders.aggregate(Sum('final_value'))['final_value__sum'] if orders.exists() else 0
paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum']\
if orders.filter(is_paid=True).exists() else 0
remaining = total_sales - paid_value
diviner = total_sales if total_sales > 0 else 1
paid_percent, remain_percent = round((paid_value/diviner)*100, 1), round((remaining/diviner)*100, 1)
total_sales = f'{total_sales} {CURRENCY}'
paid_value = f'{paid_value} {CURRENCY}'
remaining = f'{remaining} {CURRENCY}'
orders = OrderTable(orders)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@staff_member_required
def auto_create_order_view(request):
new_order = Order.objects.create(
title='Order 66',
date=datetime.datetime.now()
)
new_order.title = f'Order - {new_order.id}'
new_order.save()
return redirect(new_order.get_edit_url())
@method_decorator(staff_member_required, name='dispatch')
class OrderListView(ListView):
template_name = 'list.html'
model = Order
paginate_by = 50
def get_queryset(self):
qs = Order.objects.all()
if self.request.GET:
qs = Order.filter_data(self.request, qs)
return qs
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
orders = OrderTable(self.object_list)
RequestConfig(self.request).configure(orders)
context.update(locals())
return context
@method_decorator(staff_member_required, name='dispatch')
class CreateOrderView(CreateView):
template_name = 'form.html'
form_class = OrderCreateForm
model = Order
def get_success_url(self):
self.new_object.refresh_from_db()
return reverse('update_order', kwargs={'pk': self.new_object.id})
def form_valid(self, form):
object = form.save()
object.refresh_from_db()
self.new_object = object
return super().form_valid(form)
@method_decorator(staff_member_required, name='dispatch')
class OrderUpdateView(UpdateView):
model = Order
template_name = 'order_update.html'
form_class = OrderEditForm
def get_success_url(self):
return reverse('update_order', kwargs={'pk': self.object.id})
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
instance = self.object
qs_p = Product.objects.filter(active=True)[:12]
products = ProductTable(qs_p)
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(self.request).configure(products)
RequestConfig(self.request).configure(order_items)
context.update(locals())
return context
@staff_member_required
def delete_order(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.delete()
messages.warning(request, 'The order is deleted!')
return redirect(reverse('homepage'))
@staff_member_required
def done_order_view(request, pk):
instance = get_object_or_404(Order, id=pk)
instance.is_paid = True
instance.save()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_add_product(request, pk, dk):
instance = get_object_or_404(Order, id=pk)
product = get_object_or_404(Product, id=dk)
order_item, created = OrderItem.objects.get_or_create(order=instance, product=product)
if created:
order_item.qty = 1
order_item.price = product.value
order_item.discount_price = product.discount_value
else:
order_item.qty += 1
order_item.save()
product.qty -= 1
product.save()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data = dict()
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_modify_order_item(request, pk, action):
order_item = get_object_or_404(OrderItem, id=pk)
product = order_item.product
instance = order_item.order
if action == 'remove':
order_item.qty -= 1
product.qty += 1
if order_item.qty < 1: order_item.qty = 1
if action == 'add':
order_item.qty += 1
product.qty -= 1
product.save()
order_item.save()
if action == 'delete':
order_item.delete()
data = dict()
instance.refresh_from_db()
order_items = OrderItemTable(instance.order_items.all())
RequestConfig(request).configure(order_items)
data['result'] = render_to_string(template_name='include/order_container.html',
request=request,
context={
'instance': instance,
'order_items': order_items
}
)
return JsonResponse(data)
@staff_member_required
def ajax_search_products(request, pk):
instance = get_object_or_404(Order, id=pk)
q = request.GET.get('q', None)
products = Product.broswer.active().filter(title__startswith=q) if q else Product.broswer.active()
products = products[:12]
products = ProductTable(products)
RequestConfig(request).configure(products)
data = dict()
data['products'] = render_to_string(template_name='include/product_container.html',
request=request,
context={
'products': products,
'instance': instance
})
return JsonResponse(data)
@staff_member_required
def order_action_view(request, pk, action):
instance = get_object_or_404(Order, id=pk)
if action == 'is_paid':
instance.is_paid = True
instance.save()
if action == 'delete':
instance.delete()
return redirect(reverse('homepage'))
@staff_member_required
def ajax_calculate_results_view(request):
orders = Order.filter_data(request, Order.objects.all())
total_value, total_paid_value, remaining_value, data = 0, 0, 0, dict()
if orders.exists():
total_value = orders.aggregate(Sum('final_value'))['final_value__sum']
total_paid_value = orders.filter(is_paid=True).aggregate(Sum('final_value'))['final_value__sum'] if\
orders.filter(is_paid=True) else 0
remaining_value = total_value - total_paid_value
total_value, total_paid_value, remaining_value = f'{total_value} {CURRENCY}',\
f'{total_paid_value} {CURRENCY}', f'{remaining_value} {CURRENCY}'
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals())
return JsonResponse(data)
@staff_member_required
def ajax_calculate_category_view(request):
orders = Order.filter_data(request, Order.objects.all())
order_items = OrderItem.objects.filter(order__in=orders)
category_analysis = order_items.values_list('product__category__title').annotate(qty=Sum('qty'),
total_incomes=Sum('total_price')
)
data = dict()
category, currency = True, CURRENCY
data['result'] = render_to_string(template_name='include/result_container.html',
request=request,
context=locals()
)
return JsonResponse(data)
| [
"django.http.JsonResponse",
"django.contrib.messages.warning",
"django.shortcuts.get_object_or_404",
"product.models.Product.objects.filter",
"django.utils.decorators.method_decorator",
"product.models.Product.broswer.active",
"datetime.datetime.now",
"django.shortcuts.reverse",
"django_tables2.Requ... | [((716, 772), 'django.utils.decorators.method_decorator', 'method_decorator', (['staff_member_required'], {'name': '"""dispatch"""'}), "(staff_member_required, name='dispatch')\n", (732, 772), False, 'from django.utils.decorators import method_decorator\n'), ((2079, 2135), 'django.utils.decorators.method_decorator', 'method_decorator', (['staff_member_required'], {'name': '"""dispatch"""'}), "(staff_member_required, name='dispatch')\n", (2095, 2135), False, 'from django.utils.decorators import method_decorator\n'), ((2655, 2711), 'django.utils.decorators.method_decorator', 'method_decorator', (['staff_member_required'], {'name': '"""dispatch"""'}), "(staff_member_required, name='dispatch')\n", (2671, 2711), False, 'from django.utils.decorators import method_decorator\n'), ((3149, 3205), 'django.utils.decorators.method_decorator', 'method_decorator', (['staff_member_required'], {'name': '"""dispatch"""'}), "(staff_member_required, name='dispatch')\n", (3165, 3205), False, 'from django.utils.decorators import method_decorator\n'), ((3960, 3991), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'id': 'pk'}), '(Order, id=pk)\n', (3977, 3991), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4018, 4068), 'django.contrib.messages.warning', 'messages.warning', (['request', '"""The order is deleted!"""'], {}), "(request, 'The order is deleted!')\n", (4034, 4068), False, 'from django.contrib import messages\n'), ((4184, 4215), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'id': 'pk'}), '(Order, id=pk)\n', (4201, 4215), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4384, 4415), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'id': 'pk'}), '(Order, id=pk)\n', (4401, 4415), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4430, 4463), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Product'], {'id': 'dk'}), '(Product, id=dk)\n', (4447, 4463), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4979, 5123), 'django.template.loader.render_to_string', 'render_to_string', ([], {'template_name': '"""include/order_container.html"""', 'request': 'request', 'context': "{'instance': instance, 'order_items': order_items}"}), "(template_name='include/order_container.html', request=\n request, context={'instance': instance, 'order_items': order_items})\n", (4995, 5123), False, 'from django.template.loader import render_to_string\n'), ((5338, 5356), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (5350, 5356), False, 'from django.http import JsonResponse\n'), ((5448, 5483), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['OrderItem'], {'id': 'pk'}), '(OrderItem, id=pk)\n', (5465, 5483), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((6033, 6177), 'django.template.loader.render_to_string', 'render_to_string', ([], {'template_name': '"""include/order_container.html"""', 'request': 'request', 'context': "{'instance': instance, 'order_items': order_items}"}), "(template_name='include/order_container.html', request=\n request, context={'instance': instance, 'order_items': order_items})\n", (6049, 6177), False, 'from django.template.loader import render_to_string\n'), ((6423, 6441), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (6435, 6441), False, 'from django.http import JsonResponse\n'), ((6521, 6552), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'id': 'pk'}), '(Order, id=pk)\n', (6538, 6552), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((6846, 6986), 'django.template.loader.render_to_string', 'render_to_string', ([], {'template_name': '"""include/product_container.html"""', 'request': 'request', 'context': "{'products': products, 'instance': instance}"}), "(template_name='include/product_container.html', request=\n request, context={'products': products, 'instance': instance})\n", (6862, 6986), False, 'from django.template.loader import render_to_string\n'), ((7203, 7221), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (7215, 7221), False, 'from django.http import JsonResponse\n'), ((7306, 7337), 'django.shortcuts.get_object_or_404', 'get_object_or_404', (['Order'], {'id': 'pk'}), '(Order, id=pk)\n', (7323, 7337), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((8444, 8462), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (8456, 8462), False, 'from django.http import JsonResponse\n'), ((9265, 9283), 'django.http.JsonResponse', 'JsonResponse', (['data'], {}), '(data)\n', (9277, 9283), False, 'from django.http import JsonResponse\n'), ((2919, 2977), 'django.shortcuts.reverse', 'reverse', (['"""update_order"""'], {'kwargs': "{'pk': self.new_object.id}"}), "('update_order', kwargs={'pk': self.new_object.id})\n", (2926, 2977), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((3377, 3431), 'django.shortcuts.reverse', 'reverse', (['"""update_order"""'], {'kwargs': "{'pk': self.object.id}"}), "('update_order', kwargs={'pk': self.object.id})\n", (3384, 3431), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4089, 4108), 'django.shortcuts.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (4096, 4108), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((4284, 4303), 'django.shortcuts.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (4291, 4303), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((6666, 6690), 'product.models.Product.broswer.active', 'Product.broswer.active', ([], {}), '()\n', (6688, 6690), False, 'from product.models import Product, Category\n'), ((7495, 7514), 'django.shortcuts.reverse', 'reverse', (['"""homepage"""'], {}), "('homepage')\n", (7502, 7514), False, 'from django.shortcuts import get_object_or_404, redirect, reverse\n'), ((1930, 1953), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (1951, 1953), False, 'import datetime\n'), ((3574, 3609), 'product.models.Product.objects.filter', 'Product.objects.filter', ([], {'active': '(True)'}), '(active=True)\n', (3596, 3609), False, 'from product.models import Product, Category\n'), ((4894, 4916), 'django_tables2.RequestConfig', 'RequestConfig', (['request'], {}), '(request)\n', (4907, 4916), False, 'from django_tables2 import RequestConfig\n'), ((5966, 5988), 'django_tables2.RequestConfig', 'RequestConfig', (['request'], {}), '(request)\n', (5979, 5988), False, 'from django_tables2 import RequestConfig\n'), ((6762, 6784), 'django_tables2.RequestConfig', 'RequestConfig', (['request'], {}), '(request)\n', (6775, 6784), False, 'from django_tables2 import RequestConfig\n'), ((8742, 8752), 'django.db.models.Sum', 'Sum', (['"""qty"""'], {}), "('qty')\n", (8745, 8752), False, 'from django.db.models import Sum\n'), ((8854, 8872), 'django.db.models.Sum', 'Sum', (['"""total_price"""'], {}), "('total_price')\n", (8857, 8872), False, 'from django.db.models import Sum\n'), ((1689, 1716), 'django_tables2.RequestConfig', 'RequestConfig', (['self.request'], {}), '(self.request)\n', (1702, 1716), False, 'from django_tables2 import RequestConfig\n'), ((2550, 2577), 'django_tables2.RequestConfig', 'RequestConfig', (['self.request'], {}), '(self.request)\n', (2563, 2577), False, 'from django_tables2 import RequestConfig\n'), ((3726, 3753), 'django_tables2.RequestConfig', 'RequestConfig', (['self.request'], {}), '(self.request)\n', (3739, 3753), False, 'from django_tables2 import RequestConfig\n'), ((3782, 3809), 'django_tables2.RequestConfig', 'RequestConfig', (['self.request'], {}), '(self.request)\n', (3795, 3809), False, 'from django_tables2 import RequestConfig\n'), ((6603, 6627), 'product.models.Product.broswer.active', 'Product.broswer.active', ([], {}), '()\n', (6625, 6627), False, 'from product.models import Product, Category\n'), ((7782, 7800), 'django.db.models.Sum', 'Sum', (['"""final_value"""'], {}), "('final_value')\n", (7785, 7800), False, 'from django.db.models import Sum\n'), ((1066, 1084), 'django.db.models.Sum', 'Sum', (['"""final_value"""'], {}), "('final_value')\n", (1069, 1084), False, 'from django.db.models import Sum\n'), ((1191, 1209), 'django.db.models.Sum', 'Sum', (['"""final_value"""'], {}), "('final_value')\n", (1194, 1209), False, 'from django.db.models import Sum\n'), ((7887, 7905), 'django.db.models.Sum', 'Sum', (['"""final_value"""'], {}), "('final_value')\n", (7890, 7905), False, 'from django.db.models import Sum\n')] |
import caffe
import torch
import numpy as np
import argparse
from collections import OrderedDict
from torch.autograd import Variable
import torch.nn as nn
def arg_parse():
parser = argparse.ArgumentParser()
parser.add_argument('--model', '-m', default='alexnet')
parser.add_argument('--decimal', '-d', default=2)
parser.add_argument('--gpu', '-gpu', action='store_true')
args = parser.parse_args()
return args
def generate_random(shape, gpu=False):
data_np = np.random.rand(np.prod(shape)).reshape(shape)
data_torch = Variable(torch.Tensor(data_np))
if gpu:
data_torch = data_torch.cuda()
return [data_np], [data_torch]
def get_input_size(caffe_net):
input_name = caffe_net.inputs[0]
return caffe_net.blobs[input_name].data.shape
def forward_torch(net, data):
blobs = OrderedDict()
module2name = {}
for layer_name, m in net.named_modules():
layer_name = layer_name.replace('.', '_')
module2name[m] = layer_name
# turn off all the inplace operation
if hasattr(m, 'inplace'):
m.inplace = False
def forward_hook(m, i, o):
o_np = o.data.cpu().numpy()
blobs[module2name[m]] = o_np
for m in net.modules():
m.register_forward_hook(forward_hook)
output = net.forward(*data)
if isinstance(output, tuple):
outputs = []
for o in output:
outputs.append(o.data.cpu().numpy())
else:
outputs = [output.data.cpu().numpy()]
return blobs, outputs
def forward_caffe(net, data):
for input_name, d in zip(net.inputs, data):
net.blobs[input_name].data[...] = d
rst = net.forward()
blobs = OrderedDict()
blob2layer = {}
for layer_name, tops in net.top_names.items():
for top in tops:
blob2layer[top] = layer_name
for name, value in net.blobs.items():
layer_name = blob2layer[name]
value = value.data
if layer_name in blobs:
blobs[layer_name].append(value)
else:
blobs[layer_name] = [value]
outputs = []
for output_name in net.outputs:
outputs.append(rst[output_name])
return blobs, outputs
def test(net_caffe, net_torch, data_np, data_torch, args):
blobs_caffe, rsts_caffe = forward_caffe(net_caffe, data_np)
blobs_torch, rsts_torchs = forward_torch(net_torch, data_torch)
# test the output of every layer
for layer, value in blobs_caffe.items():
if layer in blobs_torch:
value_torch = blobs_torch[layer]
value = value[0]
if value.size != value_torch.size: continue
if 'relu' in layer: continue
try:
np.testing.assert_almost_equal(value, value_torch, decimal=args.decimal)
print("TEST layer {}: PASS".format(layer))
except:
print("TEST layer {}: FAIL".format(layer))
# np.testing.assert_almost_equal(np.clip(value, min=0), np.clip(value_torch, min=0))
# test the output
print("TEST output")
for rst_caffe, rst_torch in zip(rsts_caffe, rsts_torchs):
np.testing.assert_almost_equal(rst_caffe, rst_torch, decimal=args.decimal)
print("TEST output: PASS")
if __name__ == '__main__':
args = arg_parse()
if args.model == 'alexnet':
# Alexnet example
from torchvision.models.alexnet import alexnet
net_torch = alexnet(True).eval()
if args.gpu:
net_torch.cuda()
try:
net_caffe = caffe.Net('alexnet.prototxt', 'alexnet.caffemodel', caffe.TEST)
except:
raise ("Please run alexnet_pytorch_to_caffe.py first")
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
elif args.model == 'resnet18':
# ResNet example
from torchvision.models.resnet import resnet18
net_torch = resnet18(True).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('resnet18.prototxt', 'resnet18.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
elif args.model == 'inception_v3':
# Inception_v3 example
from torchvision.models.inception import inception_v3
net_torch = inception_v3(True, transform_input=False).eval()
if args.gpu:
net_torch.cuda()
net_caffe = caffe.Net('inception_v3.prototxt', 'inception_v3.caffemodel', caffe.TEST)
shape = get_input_size(net_caffe)
data_np, data_torch = generate_random(shape, args.gpu)
test(net_caffe, net_torch, data_np, data_torch, args)
else:
raise NotImplementedError()
| [
"numpy.prod",
"collections.OrderedDict",
"torchvision.models.inception.inception_v3",
"argparse.ArgumentParser",
"torchvision.models.resnet.resnet18",
"torch.Tensor",
"numpy.testing.assert_almost_equal",
"caffe.Net",
"torchvision.models.alexnet.alexnet"
] | [((187, 212), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (210, 212), False, 'import argparse\n'), ((836, 849), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (847, 849), False, 'from collections import OrderedDict\n'), ((1695, 1708), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (1706, 1708), False, 'from collections import OrderedDict\n'), ((563, 584), 'torch.Tensor', 'torch.Tensor', (['data_np'], {}), '(data_np)\n', (575, 584), False, 'import torch\n'), ((3144, 3218), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['rst_caffe', 'rst_torch'], {'decimal': 'args.decimal'}), '(rst_caffe, rst_torch, decimal=args.decimal)\n', (3174, 3218), True, 'import numpy as np\n'), ((3545, 3608), 'caffe.Net', 'caffe.Net', (['"""alexnet.prototxt"""', '"""alexnet.caffemodel"""', 'caffe.TEST'], {}), "('alexnet.prototxt', 'alexnet.caffemodel', caffe.TEST)\n", (3554, 3608), False, 'import caffe\n'), ((4088, 4153), 'caffe.Net', 'caffe.Net', (['"""resnet18.prototxt"""', '"""resnet18.caffemodel"""', 'caffe.TEST'], {}), "('resnet18.prototxt', 'resnet18.caffemodel', caffe.TEST)\n", (4097, 4153), False, 'import caffe\n'), ((506, 520), 'numpy.prod', 'np.prod', (['shape'], {}), '(shape)\n', (513, 520), True, 'import numpy as np\n'), ((2715, 2787), 'numpy.testing.assert_almost_equal', 'np.testing.assert_almost_equal', (['value', 'value_torch'], {'decimal': 'args.decimal'}), '(value, value_torch, decimal=args.decimal)\n', (2745, 2787), True, 'import numpy as np\n'), ((3437, 3450), 'torchvision.models.alexnet.alexnet', 'alexnet', (['(True)'], {}), '(True)\n', (3444, 3450), False, 'from torchvision.models.alexnet import alexnet\n'), ((4593, 4666), 'caffe.Net', 'caffe.Net', (['"""inception_v3.prototxt"""', '"""inception_v3.caffemodel"""', 'caffe.TEST'], {}), "('inception_v3.prototxt', 'inception_v3.caffemodel', caffe.TEST)\n", (4602, 4666), False, 'import caffe\n'), ((3996, 4010), 'torchvision.models.resnet.resnet18', 'resnet18', (['(True)'], {}), '(True)\n', (4004, 4010), False, 'from torchvision.models.resnet import resnet18\n'), ((4474, 4515), 'torchvision.models.inception.inception_v3', 'inception_v3', (['(True)'], {'transform_input': '(False)'}), '(True, transform_input=False)\n', (4486, 4515), False, 'from torchvision.models.inception import inception_v3\n')] |
# coding=utf-8
import logging
import numpy as np
import pandas as pd
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.multiclass import OneVsRestClassifier
from sklearn.preprocessing import LabelBinarizer
from sklearn.utils import shuffle
logging.basicConfig(format='%(levelname)s:%(message)s', level=logging.ERROR)
def classification(embedding, lbl_path, split_ratio=0.7, loop=100):
eval_dict = {
'acc': 0.0,
'f1-micro': 0.0,
'f1-macro': 0.0,
}
label = pd.read_csv(lbl_path, header=None, sep=' ').values
for _ in range(loop):
labels_np = shuffle(label)
nodes = labels_np[:, 0]
labels = labels_np[:, 1]
lb = LabelBinarizer()
labels = lb.fit_transform(labels)
train_size = int(labels_np.shape[0] * split_ratio)
features = embedding[nodes]
train_x = features[:train_size, :]
train_y = labels[:train_size, :]
test_x = features[train_size:, :]
test_y = labels[train_size:, :]
clf = OneVsRestClassifier(
LogisticRegression(class_weight='balanced', solver='liblinear', n_jobs=-1))
clf.fit(train_x, train_y)
y_pred = clf.predict_proba(test_x)
y_pred = lb.transform(np.argmax(y_pred, 1))
acc = np.sum(np.argmax(y_pred, 1) == np.argmax(test_y, 1)) / len(y_pred)
eval_dict['acc'] += acc
eval_dict['f1-micro'] += metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),
average='micro')
eval_dict['f1-macro'] += metrics.f1_score(np.argmax(test_y, 1), np.argmax(y_pred, 1),
average='macro')
for key in eval_dict.keys():
eval_dict[key] = round(1.0 * eval_dict[key] / loop, 4)
print('split_ratio: {}'.format(split_ratio))
print(eval_dict)
return eval_dict
def _k_precision(embedding, lbl_path, k, lbl):
label = pd.read_csv(lbl_path, header=None, sep=' ').values
nodes = label[np.where(label[:, 1] == lbl)][:, 0]
acc = 0.0
for node in nodes:
distance = {}
for i in range(embedding.shape[0]):
if i == node:
continue
distance[i] = np.linalg.norm(embedding[i] - embedding[node])
distance = sorted(distance.items(), key=lambda x: x[1])
distance = np.array(distance)[:k]
acc += distance[np.isin(distance[:, 0], nodes)].shape[0] / k
acc /= len(nodes)
return acc
def k_precision(embedding, lbl_path, k=50):
eval_dict = {
'precision': k,
'bots_acc': _k_precision(embedding, lbl_path, k, 1),
'admins_acc': _k_precision(embedding, lbl_path, k, 2)
}
print(eval_dict)
| [
"logging.basicConfig",
"sklearn.preprocessing.LabelBinarizer",
"pandas.read_csv",
"numpy.where",
"sklearn.utils.shuffle",
"numpy.argmax",
"numpy.isin",
"sklearn.linear_model.LogisticRegression",
"numpy.array",
"numpy.linalg.norm"
] | [((285, 361), 'logging.basicConfig', 'logging.basicConfig', ([], {'format': '"""%(levelname)s:%(message)s"""', 'level': 'logging.ERROR'}), "(format='%(levelname)s:%(message)s', level=logging.ERROR)\n", (304, 361), False, 'import logging\n'), ((538, 581), 'pandas.read_csv', 'pd.read_csv', (['lbl_path'], {'header': 'None', 'sep': '""" """'}), "(lbl_path, header=None, sep=' ')\n", (549, 581), True, 'import pandas as pd\n'), ((635, 649), 'sklearn.utils.shuffle', 'shuffle', (['label'], {}), '(label)\n', (642, 649), False, 'from sklearn.utils import shuffle\n'), ((729, 745), 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), '()\n', (743, 745), False, 'from sklearn.preprocessing import LabelBinarizer\n'), ((1984, 2027), 'pandas.read_csv', 'pd.read_csv', (['lbl_path'], {'header': 'None', 'sep': '""" """'}), "(lbl_path, header=None, sep=' ')\n", (1995, 2027), True, 'import pandas as pd\n'), ((1096, 1170), 'sklearn.linear_model.LogisticRegression', 'LogisticRegression', ([], {'class_weight': '"""balanced"""', 'solver': '"""liblinear"""', 'n_jobs': '(-1)'}), "(class_weight='balanced', solver='liblinear', n_jobs=-1)\n", (1114, 1170), False, 'from sklearn.linear_model import LogisticRegression\n'), ((1279, 1299), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1288, 1299), True, 'import numpy as np\n'), ((1464, 1484), 'numpy.argmax', 'np.argmax', (['test_y', '(1)'], {}), '(test_y, 1)\n', (1473, 1484), True, 'import numpy as np\n'), ((1486, 1506), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1495, 1506), True, 'import numpy as np\n'), ((1625, 1645), 'numpy.argmax', 'np.argmax', (['test_y', '(1)'], {}), '(test_y, 1)\n', (1634, 1645), True, 'import numpy as np\n'), ((1647, 1667), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1656, 1667), True, 'import numpy as np\n'), ((2053, 2081), 'numpy.where', 'np.where', (['(label[:, 1] == lbl)'], {}), '(label[:, 1] == lbl)\n', (2061, 2081), True, 'import numpy as np\n'), ((2269, 2315), 'numpy.linalg.norm', 'np.linalg.norm', (['(embedding[i] - embedding[node])'], {}), '(embedding[i] - embedding[node])\n', (2283, 2315), True, 'import numpy as np\n'), ((2399, 2417), 'numpy.array', 'np.array', (['distance'], {}), '(distance)\n', (2407, 2417), True, 'import numpy as np\n'), ((1322, 1342), 'numpy.argmax', 'np.argmax', (['y_pred', '(1)'], {}), '(y_pred, 1)\n', (1331, 1342), True, 'import numpy as np\n'), ((1346, 1366), 'numpy.argmax', 'np.argmax', (['test_y', '(1)'], {}), '(test_y, 1)\n', (1355, 1366), True, 'import numpy as np\n'), ((2446, 2476), 'numpy.isin', 'np.isin', (['distance[:, 0]', 'nodes'], {}), '(distance[:, 0], nodes)\n', (2453, 2476), True, 'import numpy as np\n')] |
import os
import glob
import argparse as ap
import shutil as sh
import re
def main():
parser = ap.ArgumentParser(description="""Uses minimum basis term file to extract the data for a
simulation that used the minimum number of basis terms for each frequency""")
parser.add_argument('min_file',type=str,help="""Path to file containing frequency in first
column and minimum number of basis terms in second column. Comma separated""")
parser.add_argument('s4_dir',type=str,help="""Path to top level dir containing all the frequency
subdirectories""")
parser.add_argument('dest_dir',type=str,help="""Path to top level dir which all files will be
moved to""")
args = parser.parse_args()
s4_dir = os.path.abspath(args.s4_dir)
min_file = os.path.abspath(args.min_file)
dest_dir = os.path.abspath(args.dest_dir)
if not os.path.isdir(args.s4_dir):
print("S4 dir does not exist")
quit()
if not os.path.isfile(min_file):
print('Min file does not exists')
quit()
try:
os.makedirs(dest_dir)
except OSError:
pass
with open(min_file,'r') as f:
data = [('{:G}'.format(float(line.split(',')[0])),str(line.split(',')[1].strip('\n'))) for line in f.readlines()[1:]]
print(data)
dir_glob = os.path.join(s4_dir,"frequency*")
freq_dirs = glob.glob(dir_glob)
for fdir in freq_dirs:
for freq, numbasis in data:
print('Frequency {} has minimum basis of {}'.format(freq,numbasis))
dat = freq.split('E')
regex = dat[0][0:4]+"[0-9]+E\\"+dat[1]
regex = regex.replace('.','\.')
m = re.search(regex,fdir)
if m:
print(m.group(0))
print('Frequency {} found in directory {}'.format(freq,fdir))
basis_glob = os.path.join(fdir,'numbasis_{}*'.format(numbasis))
basis_path = glob.glob(basis_glob)
assert len(basis_path) == 1
basis_path = basis_path[0]
if os.path.isdir(basis_path):
print('Found min basis path {}'.format(basis_path))
new_path = os.path.join(dest_dir,os.path.basename(fdir))
print('Copying {} to {}'.format(basis_path,new_path))
sh.copytree(basis_path,new_path)
else:
print('Missing {} !!!!'.format(basis_path))
break
if __name__ == '__main__':
main()
| [
"argparse.ArgumentParser",
"os.makedirs",
"os.path.join",
"os.path.isfile",
"shutil.copytree",
"os.path.isdir",
"os.path.basename",
"os.path.abspath",
"glob.glob",
"re.search"
] | [((100, 279), 'argparse.ArgumentParser', 'ap.ArgumentParser', ([], {'description': '"""Uses minimum basis term file to extract the data for a\n simulation that used the minimum number of basis terms for each frequency"""'}), '(description=\n """Uses minimum basis term file to extract the data for a\n simulation that used the minimum number of basis terms for each frequency"""\n )\n', (117, 279), True, 'import argparse as ap\n'), ((731, 759), 'os.path.abspath', 'os.path.abspath', (['args.s4_dir'], {}), '(args.s4_dir)\n', (746, 759), False, 'import os\n'), ((775, 805), 'os.path.abspath', 'os.path.abspath', (['args.min_file'], {}), '(args.min_file)\n', (790, 805), False, 'import os\n'), ((821, 851), 'os.path.abspath', 'os.path.abspath', (['args.dest_dir'], {}), '(args.dest_dir)\n', (836, 851), False, 'import os\n'), ((1308, 1342), 'os.path.join', 'os.path.join', (['s4_dir', '"""frequency*"""'], {}), "(s4_dir, 'frequency*')\n", (1320, 1342), False, 'import os\n'), ((1358, 1377), 'glob.glob', 'glob.glob', (['dir_glob'], {}), '(dir_glob)\n', (1367, 1377), False, 'import glob\n'), ((863, 889), 'os.path.isdir', 'os.path.isdir', (['args.s4_dir'], {}), '(args.s4_dir)\n', (876, 889), False, 'import os\n'), ((956, 980), 'os.path.isfile', 'os.path.isfile', (['min_file'], {}), '(min_file)\n', (970, 980), False, 'import os\n'), ((1056, 1077), 'os.makedirs', 'os.makedirs', (['dest_dir'], {}), '(dest_dir)\n', (1067, 1077), False, 'import os\n'), ((1666, 1688), 're.search', 're.search', (['regex', 'fdir'], {}), '(regex, fdir)\n', (1675, 1688), False, 'import re\n'), ((1927, 1948), 'glob.glob', 'glob.glob', (['basis_glob'], {}), '(basis_glob)\n', (1936, 1948), False, 'import glob\n'), ((2055, 2080), 'os.path.isdir', 'os.path.isdir', (['basis_path'], {}), '(basis_path)\n', (2068, 2080), False, 'import os\n'), ((2325, 2358), 'shutil.copytree', 'sh.copytree', (['basis_path', 'new_path'], {}), '(basis_path, new_path)\n', (2336, 2358), True, 'import shutil as sh\n'), ((2207, 2229), 'os.path.basename', 'os.path.basename', (['fdir'], {}), '(fdir)\n', (2223, 2229), False, 'import os\n')] |
# turingmachine.py - implementation of the Turing machine model
#
# Copyright 2014 <NAME>.
#
# This file is part of turingmachine.
#
# turingmachine is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation, either version 3 of the License, or (at your
# option) any later version.
#
# turingmachine is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# turingmachine. If not, see <http://www.gnu.org/licenses/>.
"""Provides an implementation of the Turing machine model."""
import logging
import os.path
# Create and configure the logger which logs debugging information by default.
logger = logging.getLogger(__name__)
handler = logging.StreamHandler()
formatter = logging.Formatter('[%(levelname)s] %(message)s')
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level=logging.DEBUG)
#: Represents a movement of the read/write head of the Turing machine to the
#: left.
L = -1
#: Represents a movement of the read/write head of the Turing machine to the
#: right.
R = +1
#Stores the loaded TM, output and logger state
tm=[None]
output=[None]
logs=[True]
class UnknownSymbol(Exception):
"""This exception is raised when the Turing machine encounters a symbol
that does not appear in the transition dictionary.
"""
pass
class UnknownState(Exception):
"""This exception is raised when the Turing machine enters a state that
does not appear in the transition dictionary.
"""
pass
class BadSymbol(Exception):
"""This exception is raised when the user attempts to specify a tape
alphabet that includes strings of length not equal to one.
"""
pass
class BadFile(Exception):
"""This exception is raised when the file given by the user is
inconsistent with the rules for it.
"""
pass
class NoMachine(Exception):
"""This exception is raised when the user tries to run without having
loaded any machine.
"""
pass
def load(file, max_steps=10000):
'''Import parameters from given file, that has to be in the following
structure:
1st line: "ATM"
2nd line: Any comment you might want
3rd line: Input alphabet
4th line: Tape alphabet
5th line: Number of tapes (must be 1 yet)
6th line: Number of trails on each tape (must be 1 yet)
7th line: Number of directions on which the tapes are infinite (must be 2 yet)
8th line: Initial state
9th line: Final state
10th line and beyond: transitions in the following way:
current state
symbol read
next state
symbol to be written
direction to move
last line: "end"
Comments are made with "//"
'''
#Check if file exists, open it and separate lines
if not os.path.exists(file): raise BadFile("File does not exist.")
file=open(file, "r")
file=file.read()
file=file.split("\n")
#Create new list removing every comment
raw=[]
for line in file:
i=0
while i<len(line):
if line[i]=="/"and line[i+1]=="/":
raw.append(line[:i])
break
i+=1
if i==len(line): raw.append(line)
#Create descriptor separating words and removing null strings/lists
desc=[]
for line in raw:
carac=0
while carac<len(line):
if line[carac]=="\t": line[carac]=" "
carac+=1
desc.append(line.split(" "))
i=0
while i<len(desc[-1]):
if desc[-1][i]=="":
desc[-1].pop(i)
i-=1
i+=1
line=0
while line<len(desc):
if len(desc[line])==0:
desc.pop(line)
line-=1
line+=1
#Check static values in code and raise exceptions
if desc[0][0]!="ATM": raise BadFile("First line is wrong.")
if desc[4][0]!="1": raise BadFile("Number of tapes must be 1.")
if desc[5][0]!="1": raise BadFile("Number of trails must be 1.")
if desc[6][0]!="2": raise BadFile("Number of infinite directions must be 2.")
if desc[-1][0]!="end": raise BadFile("Last line is wrong.")
#Get the input and tape alphabets
in_alph=desc[2]
tp_alph=desc[3]
#Get initial and final states
if len(desc[7])>1: raise BadFile("Initial state must be one only thing.")
ini=desc[7][0]
if len(desc[8])>1: raise BadFile("Final state must be one only thing.")
fin=desc[8][0]
#Remove from escriptor all the preamble and last line
desc=desc[9:-1]
#generate transitions dictionary with the remaining lines
transitions={}
states=[]
i=0
while i<len(desc):
#Check if the line sintax is correct and raise exceptions
if len(desc[i])!=5: raise BadFile("Transition "+str(i+1)+" badly formulated.")
if desc[i][1] not in tp_alph: raise BadFile("All symbols must be declared in input alphabet.")
if desc[i][3] not in tp_alph: raise BadFile("All symbols must be declared in input alphabet.")
if desc[i][4]!="R" and desc[i][4]!="L": raise BadFile("Directions must be either R or L.")
#Add transition
#If state alredy exist in states list, just add the transition depending on direction
if desc[i][0] in states:
if desc[i][4]=="R":transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],R)
else:transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],L)
#Otherwise add state and dictionary for it in transitions and add the transition
else:
states.append(desc[i][0])
transitions[desc[i][0]]={}
if desc[i][4]=="R":transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],R)
else:transitions[desc[i][0]][desc[i][1]]=(desc[i][2],desc[i][3],L)
#If destination state does not exist, create it and it's dictionary in transitions
if not desc[i][2] in states:
states.append(desc[i][2])
transitions[desc[i][2]]={}
i+=1
#Check if initial and final states are in the transitions and raise errors
if not ini in states: raise BadFile("Initial state must be in transitions.")
if not fin in states: raise BadFile("Final state must be in transitions")
#Return the TM class
tm[0]=TuringMachine(states, in_alph, ini, fin, transitions, max_steps)
def run(string):
'''Runs the Turing Machine with given string
'''
if tm[0]==None: raise NoMachine("You must first load an machine.")
print(tm[0](string))
def test(io_file):
'''Test all the cases in given file and return correctness percentage
'''
if not os.path.exists(io_file): raise BadFile(io_file+" does not exist.")
file=open(io_file)
file=file.read()
file=file.split("\n")
line=0
while line<len(file):
file[line]=file[line].split(" ")
item=0
while item<len(file[line]):
if file[line][item]=="":
file[line].pop(item)
item-=1
item+=1
if len(file[line])==0 or file[line][0][0]=="#":
file.pop(line)
line-=1
line+=1
tests=[]
for line in file:
tests.append([line[0], line[2]])
logs[0]=False
cont=0
n=1
for test in tests:
res=tm[0](test[0])
if res:
string=output[0]
while string[0]=="B": string=string[1:]
while string[-1]=="B": string=string[:-1]
if string==test[1]:
cont+=1
print("test "+str(n)+": "+"\033[92m {}\033[00m" .format("Correct"))
else:
print("test "+str(n)+": "+"\033[91m {}\033[00m" .format("Wrong"))
elif test[1]=="STOP_FAIL":
cont+=1
print("test "+str(n)+": "+"\033[92m {}\033[00m" .format("Correct"))
else:
print("test "+str(n)+": "+"\033[91m {}\033[00m" .format("Wrong"))
n+=1
logs[0]=True
print(f"Nota final: {round(10*cont/len(tests), 2)}")
class TuringMachine(object):
"""An implementation of the Turing machine model.
Once instantiated, the Turing machine can be executed by calling it, and it
can be reset to its initial state by calling :meth:`reset`.
`states` is a set of states. A "state" can be anything, but usually simple
integers suffice.
`initial_state` is the state of the machine before it starts reading
symbols from an input string. This state must be a member of `states`. When
:meth:`reset` is called, the state of the machine will be set to this
state.
`accept_state` is the state that will cause the machine to halt and accept
(that is, return ``True``). This set must be a member of `states`.
`transition` is a two-dimensional dictionary specifying how the
"configuration" of the machine (that is, the head location, state, and
string) changes each time it reads from its input string. The dictionary is
indexed first by state, then by symbol. Each entry in this two-dimensional
dictionary must be a three-tuple, *(new_state, new_symbol, direction)*,
where *new_state* is the next state in which the Turing machine will be,
*new_symbol* is the symbol that will be written in the current location on
the string, and *direction* is either :data:`L` or :data:`R`, representing
movement of the head left or right, repectively.
`max_steps` is the maximum number of steps the machine can walk before
it is considered infinite loop and returns False
The transition dictionary need not have an entry for the accept and reject
states. For example, the accept and reject states need not be in
`transition`, because the implementation of :meth:`__call__` checks if this
Turing machine has entered one of these states and immediately halts
execution.
Altohugh they would otherwise be necessary in the formal mathematical
definition of a Turing machine, this class requires the user to specify
neither the input alphabet nor the tape alphabet.
"""
def __init__(self, states, in_alph, initial_state, accept_state,
transition, max_steps, *args, **kw):
self.states = states
self.in_alph=in_alph
self.accept_state = accept_state
self.initial_state = initial_state
self.transition = transition
self.max_steps=max_steps
def _log_state(self, string, head_location, current_state):
"""Logs a visual representation of the current head location, state,
and contents of the tape of this Turing machine.
For example, if the Turing machine has ``'_010_'`` on its input tape
(that is, if `string` is ``'_010_'``), is in state ``4``, and has
read/write head at the location of the ``1`` symbol, this method would
log the following messages, one line at a time.
_010_
^
4
The caret represents the current location of the read/write head, and
the number beneath it represents the current state of the machine.
This method should be called from :meth:`__call__`, during the
execution of the Turing machine on a particular string.
"""
logger.debug('')
logger.debug(string)
logger.debug(' ' * head_location + '^')
logger.debug(' ' * head_location + str(current_state))
def __call__(self, string):
"""Runs the computer program specified by this Turing machine on
`string`.
`string` must be a Python string whose first and last characters are
underscores (``'B'``). The underscore represents a blank on the
theoretical infinite input tape, and denotes the left and right ends of
the input string.
The initial head location of the Turing machine is the left-most
non-blank character of the string.
Calling this Turing machine executes the program specified by its
transition function and returns ``True`` if the Turing machine halts
and accepts and ``False`` if the Turing machine halts and rejects. This
method may never terminate if the transition function indicates that
the Turing machine should loop forever.
"""
#Check if string is valid
for char in string:
if not char in self.in_alph: raise UnknownSymbol("String does not correspond to given input alphabet.")
current_state = self.initial_state
string="B"+string+"B"
# We assume that all strings will be input with one blank on the left
# and one blank on the right, so the head is initially at position 1.
head_location = 1
steps=0
# may loop forever if accept or reject state are never found
while True:
# If the head has moved too far to the left or right, add a blank
# to the current string in the appropriate location. If a blank is
# added to the left, the head location must be incremented, since
# the string has now essentially been shifted right by one cell.
if head_location < 0:
string = 'B' + string
head_location += 1
elif head_location >= len(string):
string += 'B'
if logs[0]: self._log_state(string, head_location, current_state)
# check for accepting or rejecting configurations
if current_state == self.accept_state:
output[0]=string
return True
if steps>self.max_steps:
return False
# for the sake of brevity, rename some verbose variables
h = head_location
q = current_state
s = string[h]
# if the current_state is not in the transition table, raise error
if q not in self.transition:
raise UnknownState('{} is not in transition'
' dictionary'.format(q))
# check if the transition table has an entry for the current symbol
if s not in self.transition[q]:
return False
# compute the new configuration from the transition function
new_state, new_symbol, direction = self.transition[q][s]
# assert that the symbol to write is a string of length one
if len(new_symbol) != 1:
raise BadSymbol('tape alphabet must only include symbols of'
' length 1 ({})'.format(new_symbol))
# write the specified new symbol to the string; Python strings are
# immutable, so we must create a new one
string = string[:h] + new_symbol + string[h + 1:]
# set the new state and head location
current_state = new_state
# direction is either L or R, which are defined to be -1 and +1
head_location += direction
steps+=1
raise Exception('Turing machine somehow halted without accepting or'
' rejecting.')
| [
"logging.getLogger",
"logging.Formatter",
"logging.StreamHandler"
] | [((951, 978), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (968, 978), False, 'import logging\n'), ((989, 1012), 'logging.StreamHandler', 'logging.StreamHandler', ([], {}), '()\n', (1010, 1012), False, 'import logging\n'), ((1025, 1073), 'logging.Formatter', 'logging.Formatter', (['"""[%(levelname)s] %(message)s"""'], {}), "('[%(levelname)s] %(message)s')\n", (1042, 1073), False, 'import logging\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Jul 5 23:04:10 2020
@author: y3
749e8aa818a63c61c31acd7ee948d6d8
"""
import requests
api_address = "https://api.openweathermap.org/data/2.5/weather?q="
api_key_url = "&APPID=749e8aa818a63c61c31acd7ee948d6d8"
city_name = "Bet shemesh,IL"
weather_data = requests.get(api_address+city_name+api_key_url).json()
print(weather_data["main"]["temp"]) | [
"requests.get"
] | [((323, 374), 'requests.get', 'requests.get', (['(api_address + city_name + api_key_url)'], {}), '(api_address + city_name + api_key_url)\n', (335, 374), False, 'import requests\n')] |
#!/usr/bin/python3
import os
import click
import sys
import csv
import time
import pandas as pd
import country_converter as coco
import hashlib
import phonenumbers
from tqdm import tqdm
from uszipcode import SearchEngine
HEADER_TRANSLATIONS = {
"email1": "Email",
"phone1": "Phone",
"person_country": "Country",
}
REQUIRED_HEADERS = {"<NAME>", "<NAME>", "Phone", "Email", "Country", "Zip"}
OPTIONAL_HEADERS = set() # TODO: Add optional headers that can be uploaded.
# All headers that can be in a Customer Match CSV.
ALL_HEADERS = REQUIRED_HEADERS.union(OPTIONAL_HEADERS)
DO_NOT_HASH = {"Country", "Zip"}
# ANSI codes to color/format terminal prints.
ANSI = {
"YELLOW": "\u001b[33m",
"RED": "\u001b[31m",
"CYAN": "\u001b[36m",
"BOLD": "\u001b[1m",
"RESET": "\u001b[0m",
}
class Error(ValueError):
"""Base class for other custom exceptions"""
pass
class FormatError(Error):
"""Raised when a file is not in the correct format."""
pass
class NoZipError(FormatError):
"""Raised when a zip code is not found in a spreadsheet. Sometimes recoverable."""
pass
# ==========================
# Formatted console prints
# ==========================
def warn(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['YELLOW']}WARNING:{ANSI['RESET']} {message}")
def notify(message: str):
tqdm.write(f"{ANSI['BOLD'] + ANSI['CYAN']}INFO:{ANSI['RESET']} {message}")
def check_path(filepath: str):
"""Checks that the path to a file exists. To check if a path to the file and the file itself exists,
use check_csv
Args:
filepath (str): The path to the file
Raises:
ValueError: If the path to the file does not exist
"""
path = os.path.dirname(filepath)
if path.strip() and not os.path.exists(path):
raise ValueError(f"The path {path} does not exist.")
def check_csv(filepath: str) -> csv.Dialect:
"""Runs checks on a CSV file, such as whether it exists and if it can be parsed, and returns
its dialect object
Args:
filepath (str): Path to the CSV file
Raises:
ValueError: If the path does not exist, or the file cannot be read as a CSV
Returns:
csv.Dialect: Parsed CSV dialect from the file
"""
# Check that the file exists, and is a file.
basename = os.path.basename(filepath)
if not os.path.exists(filepath):
raise ValueError(f"The path {filepath} does not exist.")
if not os.path.isfile(filepath):
raise ValueError(f"{basename} is not a file.")
# Try to open the file and verify it can be read as a CSV.
try:
file = open(filepath, encoding="utf8")
dialect = csv.Sniffer().sniff(file.read(100000))
file.seek(0)
file.close()
return dialect
except csv.Error as e:
raise ValueError(
f"Could not get a CSV dialect for file {basename}. Is it a CSV file? Is it maybe too large?"
)
def parse_google_fields(filepath: str, ignore_zip: bool = False) -> dict:
"""Parse the header of the CSV to get the Google field names.
Args:
filepath (str): Path to the CSV file.
ignore_zip (bool): Flag to ignore the zip code column, and not throw an error if it is missing.
Raises:
ValueError: If not all required headers can be found
Returns:
dict: A map from the field name that was found in the CSV to Google's field name.
eg: "first_name": "<NAME>"
"""
field_map = {}
found_headers = []
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
# For each field in the header column, try to translate
# them to a header recognized by Google.
for field in field_names:
header = None
# Check if there is a direct translation first:
if field in HEADER_TRANSLATIONS:
header = HEADER_TRANSLATIONS[field]
# Otherwise attempt to translate snake case:
elif (translated_field := field.replace("_", " ").title()) in ALL_HEADERS:
header = translated_field
# If we have not found this header yet, add it to the map.
# Otherwise, if we have found the header already, warn the user.
if header is not None and header not in found_headers:
notify(f"Detected header name '{header}' as '{field}' in CSV file")
field_map[field] = header
found_headers.append(header)
elif header in found_headers:
warn(
f"Duplicate header name '{header}' was extracted as '{field}'. Keeping column with header '{field_map[header]}'"
)
# Check if we have all required headers.
# All required headers are found if the required headers set is a subset of the headers found.
if not REQUIRED_HEADERS.issubset(field_map.values()):
missing_headers = REQUIRED_HEADERS.difference(field_map.values())
if len(missing_headers) == 1 and list(missing_headers)[0] == "Zip":
if not ignore_zip:
raise NoZipError(field_map)
else:
raise FormatError(
f"Not all required headers found. Missing: {', '.join(missing_headers)}"
)
return field_map
def parse_location_fields(filepath: str) -> dict:
"""Parse a header of a CSV file to get the country and city.
Args:
filepath (str): Path to the CSV file
Raises:
FormatError: If the city, country or both columns cannot be found.
Returns:
dict: A map from the field name that was found in the CSV to the standardized name.
eg: "person_city": "City"
"""
WANTED_FIELDS = {"state", "city"}
found_translations = []
field_map = {}
with open(filepath, "r", encoding="utf8") as file:
reader = csv.DictReader(file)
field_names = reader.fieldnames
for field in field_names:
# Salesql CSVs prefix state and city by person_.
field = field.lower()
salesql_field = field.replace("person_", "")
possible_fields = {field, salesql_field}
if found_set := WANTED_FIELDS.intersection(possible_fields):
translation = list(found_set)[0]
notify(f"Detected header name '{translation}' as '{field}' in CSV file")
found_translations.append(translation)
field_map[field] = translation
if not WANTED_FIELDS.issubset(field_map.values()):
missing_fields = WANTED_FIELDS.difference(field_map.values())
raise FormatError(
f"Could not find state and city columns. Missing: {', '.join(missing_fields)}"
)
return field_map
def hash_element(element: any) -> str:
"""Produces a sha256 hash of an element of data.
Args:
element (any): The data to be hashed
Returns:
str: The sha256 hash hex digest
"""
element = str(element).encode("utf-8")
return hashlib.sha256(element).hexdigest()
def hash_series(series: pd.Series):
"""Hashes a series, usually represnting columns in a CSV.
Args:
series (pd.Series): [description]
Returns:
[type]: [description]
"""
# If the name of the series is a field
# that shouldn't be hashed (eg: Zip), don't hash it.
if series.name in DO_NOT_HASH:
return series
else:
return series.map(hash_element)
def hash_dataframe(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Hashes all elements in a Pandas dataframe.
Args:
dataframe (pd.DataFrame): The dataframe to be hashed
Returns:
pd.DataFrame: The dataframe with all elements hashed
"""
notify(f"Hashing {dataframe.size} elements...")
start = time.time()
dataframe = dataframe.apply(hash_series, axis=0)
notify(
f"Finished hashing {dataframe.size} elements in {time.time() - start} seconds."
)
return dataframe
def get_dataframe(filepath: str) -> pd.DataFrame:
"""Gets a dataframe for a given CSV file.
Args:
filepath (str): Path to the CSV file.
Returns:
pd.DataFrame: [description]
"""
dialect = check_csv(filepath)
return pd.read_csv(
filepath,
warn_bad_lines=False,
error_bad_lines=False,
sep=dialect.delimiter,
low_memory=False,
dtype=str,
)
def translate_dataframe(dataframe: pd.DataFrame, field_map: dict) -> pd.DataFrame:
"""Translates a CSV file to use Google's desired field names in the header.
Any columns with field names that are not recognized by the Customer Match
specification are removed.
Args:
dataframe (pd.DataFrame): The DataFrame of the CSV file.
Returns:
pd.DataFrame: The pandas dataframe that was translated.
Can be exported to a CSV with the save_csv function.
"""
# Parse the headers into a field_map.
# Keep only the columns that have matching headers.
dataframe = dataframe[field_map.keys()]
# Reverse the map to rename columns to Google's expectation.
dataframe = dataframe.rename(columns=field_map)
return dataframe
def save_csv(dataframe: pd.DataFrame, output: str):
"""Saves a dataframe to a CSV file.
Args:
dataframe (pd.DataFrame): The dataframe to be saved
output (str): The filepath to be saved to
"""
dataframe.to_csv(output, index=False, encoding="utf-8")
notify(f"Succesfully saved Customer Match data file to {os.path.abspath(output)}.")
def get_zip(row: pd.Series, search: SearchEngine) -> str:
"""Get the zip code for a row in a dataframe with the city and state.
Args:
row (pd.Series): A series containing a city and state field.
search (SearchEngine): The search engine object to lookup the zipcode.
Returns:
str: The zipcode if found. None otherwise.
"""
try:
if row.count() == 2:
res = search.by_city_and_state(city=row["city"], state=row["state"])
return res[0].zipcode
else:
warn(f"NaN detected for {row['city']}, {row['state']}.")
return ""
except (AttributeError, IndexError):
warn(f"Zip lookup for {row['city']}, {row['state']} failed.")
return ""
def get_zips(dataframe: pd.DataFrame) -> pd.Series:
"""Gets the zips for a dataframe with city and state columns.
Args:
dataframe (pd.DataFrame): The dataframe, must have city and state columns.
Returns:
pd.Series: A series of zip codes correlating to the zips for each city and state.
"""
search = SearchEngine()
tqdm.pandas(desc="Getting zipcodes")
zips = dataframe.progress_apply(lambda row: get_zip(row, search), axis=1)
zips = zips.rename("Zip")
return zips
def convert_to_iso(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe's Country column to ISO2 format (United States => US)
Args:
dataframe (pd.DataFrame): A dataframe with a Country column.
Returns:
pd.DataFrame: The dataframe with the Country column in ISO2 format.
"""
notify(f"Converting {len(dataframe.index)} countries to ISO2 format...")
start = time.time()
iso2_names = coco.convert(names=dataframe["Country"], to="ISO2", not_found=None)
dataframe["Country"] = pd.Series(iso2_names)
notify(
f"Finished converting countries to ISO2 format in {time.time() - start} seconds."
)
return dataframe
def normalize_series(column: pd.Series) -> pd.Series:
"""Formats a series (usually a column) of strings to be all lowercase and without whitespace.
Args:
column (pd.Series): The series of strings to be normalized
Returns:
pd.Series: The same series, with normalized strings.
"""
def format(el: str) -> str:
el = el.strip()
el = el.lower()
return el
return column.map(format)
def get_e164(row: pd.Series) -> str:
"""Takes a series containing a Phone and Country column and returns the
phone number in E.164 format.
Args:
row (pd.Series): A series containing at least a Phone and Country column.
Returns:
str: The phone number in E.164 format, if it could be formatted.
None otherwise.
"""
if row.count() == 2:
try:
number = phonenumbers.parse(row["Phone"], row["Country"])
return phonenumbers.format_number(
number, phonenumbers.PhoneNumberFormat.E164
)
except phonenumbers.NumberParseException:
warn(
f"Can't parse phone number {row['Phone']} for country {row['Country']}. It is not recognized as a valid number."
)
return None
else:
# warn(
# f"Can't convert phone number {row['Phone']} for country {row['Country']} due to missing data."
# )
return None
def convert_to_e164(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Converts a dataframe's Phone column to E.164. Requires a Country column.
Args:
dataframe (pd.DataFrame): A dataframe with a Phone and Country column
Returns:
pd.DataFrame: The same dataframe with the Phone column reformatted to E.164.
"""
tqdm.pandas(desc="Converting phone numbers to E.164 format")
numbers = dataframe[["Country", "Phone"]].progress_apply(get_e164, axis=1)
dataframe["Phone"] = numbers
return dataframe
def format_for_hashing(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Performs formatting on a dataframe necessary for accurate hashing.
Will convert the Country column to ISO, normalize all strings, and convert
the phone number column to E.164 format.
Args:
dataframe (pd.DataFrame): A dataframe to be formatted
Returns:
pd.DataFrame: The same dataframe formatted. May have many NaN values!
"""
notify("Formatting file for hashing...")
dataframe = dataframe.apply(normalize_series, axis=0)
dataframe = convert_to_iso(dataframe)
dataframe = convert_to_e164(dataframe)
notify("Done formatting file.")
return dataframe
def prune(dataframe: pd.DataFrame) -> pd.DataFrame:
"""Drops any rows in a dataframe that contain NaN, and prints
how many rows were affected.
Args:
dataframe (pd.DataFrame): Dataframe to be pruned
Returns:
pd.DataFrame: Same dataframe without rows that have NaN.
"""
total_rows = len(dataframe.index)
notify(f"Removing rows with empty values...")
dataframe = dataframe.dropna()
pruned_rows = len(dataframe.index)
notify(f"Removed {total_rows - pruned_rows} rows with empty values.")
return dataframe
@click.command(
help="Generates a Google Ads Customer Match compliant CSV file from a (potentially large) CSV file in another format."
)
@click.option("-o", "--output", default="result.csv", help="Path to output file.")
@click.option(
"--hash",
"do_hash",
help="SHA256 hash each element in the resulting CSV.",
is_flag=True,
)
@click.option(
"--ignore-empty",
help="Don't remove rows with empty elements.",
is_flag=True,
)
@click.option(
"--format",
help="Format the document as it would before hashing with E.164 phone numbers and lowercase names. Will remove a significant amount of rows.",
is_flag=True,
)
@click.argument("filepath")
def main(
filepath: str, output: str, do_hash: bool, ignore_empty: bool, format: bool
):
try:
file = None
# Attempt to translate to Google's standard.
try:
check_path(output)
file = get_dataframe(filepath)
field_map = parse_google_fields(filepath)
file = translate_dataframe(file, field_map)
# If the no zip is found, it is possible to lookup zip
# codes. Ask the user if they want to try.
except NoZipError:
warn(
"A zip code column could not be found in the CSV file. If there is a state and city column, the zip codes may be able to be automatically detected. This may take hours, depending on your file size."
)
if click.confirm("Would you like to try to detect zip codes?"):
field_map = parse_location_fields(filepath)
states_and_cities = translate_dataframe(file, field_map)
zip_codes = get_zips(states_and_cities)
field_map = parse_google_fields(filepath, ignore_zip=True)
translated = translate_dataframe(file, field_map)
file = pd.concat([translated, zip_codes], axis=1)
else:
sys.exit()
if not ignore_empty:
file = prune(file)
# Format the file for hashing if we are going to hash.
# Country codes are converted to ISO as a step in hashing, so
# we only have to convert if we are not hashing.
if do_hash or format:
file = format_for_hashing(file)
else:
file = convert_to_iso(file)
# Check again for empty values, if phone numbers can't be formatted
# or ISO formats can't be found.
if not ignore_empty:
file = prune(file)
# Hashing must be the last step, or else NaN will be hashed.
if do_hash:
file = hash_dataframe(file)
save_csv(file, output)
return 0
except ValueError as e:
sys.exit(f"{ANSI['BOLD'] + ANSI['RED']}ERROR:{ANSI['RESET']} {e}")
if __name__ == "__main__":
main() | [
"csv.DictReader",
"pandas.read_csv",
"country_converter.convert",
"csv.Sniffer",
"sys.exit",
"os.path.exists",
"tqdm.tqdm.write",
"click.option",
"click.command",
"click.argument",
"hashlib.sha256",
"click.confirm",
"uszipcode.SearchEngine",
"os.path.isfile",
"os.path.dirname",
"phonen... | [((14903, 15046), 'click.command', 'click.command', ([], {'help': '"""Generates a Google Ads Customer Match compliant CSV file from a (potentially large) CSV file in another format."""'}), "(help=\n 'Generates a Google Ads Customer Match compliant CSV file from a (potentially large) CSV file in another format.'\n )\n", (14916, 15046), False, 'import click\n'), ((15044, 15130), 'click.option', 'click.option', (['"""-o"""', '"""--output"""'], {'default': '"""result.csv"""', 'help': '"""Path to output file."""'}), "('-o', '--output', default='result.csv', help=\n 'Path to output file.')\n", (15056, 15130), False, 'import click\n'), ((15127, 15234), 'click.option', 'click.option', (['"""--hash"""', '"""do_hash"""'], {'help': '"""SHA256 hash each element in the resulting CSV."""', 'is_flag': '(True)'}), "('--hash', 'do_hash', help=\n 'SHA256 hash each element in the resulting CSV.', is_flag=True)\n", (15139, 15234), False, 'import click\n'), ((15250, 15346), 'click.option', 'click.option', (['"""--ignore-empty"""'], {'help': '"""Don\'t remove rows with empty elements."""', 'is_flag': '(True)'}), '(\'--ignore-empty\', help=\n "Don\'t remove rows with empty elements.", is_flag=True)\n', (15262, 15346), False, 'import click\n'), ((15358, 15549), 'click.option', 'click.option', (['"""--format"""'], {'help': '"""Format the document as it would before hashing with E.164 phone numbers and lowercase names. Will remove a significant amount of rows."""', 'is_flag': '(True)'}), "('--format', help=\n 'Format the document as it would before hashing with E.164 phone numbers and lowercase names. Will remove a significant amount of rows.'\n , is_flag=True)\n", (15370, 15549), False, 'import click\n'), ((15556, 15582), 'click.argument', 'click.argument', (['"""filepath"""'], {}), "('filepath')\n", (15570, 15582), False, 'import click\n'), ((1239, 1318), 'tqdm.tqdm.write', 'tqdm.write', (['f"""{ANSI[\'BOLD\'] + ANSI[\'YELLOW\']}WARNING:{ANSI[\'RESET\']} {message}"""'], {}), '(f"{ANSI[\'BOLD\'] + ANSI[\'YELLOW\']}WARNING:{ANSI[\'RESET\']} {message}")\n', (1249, 1318), False, 'from tqdm import tqdm\n'), ((1351, 1425), 'tqdm.tqdm.write', 'tqdm.write', (['f"""{ANSI[\'BOLD\'] + ANSI[\'CYAN\']}INFO:{ANSI[\'RESET\']} {message}"""'], {}), '(f"{ANSI[\'BOLD\'] + ANSI[\'CYAN\']}INFO:{ANSI[\'RESET\']} {message}")\n', (1361, 1425), False, 'from tqdm import tqdm\n'), ((1733, 1758), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (1748, 1758), False, 'import os\n'), ((2334, 2360), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (2350, 2360), False, 'import os\n'), ((7896, 7907), 'time.time', 'time.time', ([], {}), '()\n', (7905, 7907), False, 'import time\n'), ((8346, 8469), 'pandas.read_csv', 'pd.read_csv', (['filepath'], {'warn_bad_lines': '(False)', 'error_bad_lines': '(False)', 'sep': 'dialect.delimiter', 'low_memory': '(False)', 'dtype': 'str'}), '(filepath, warn_bad_lines=False, error_bad_lines=False, sep=\n dialect.delimiter, low_memory=False, dtype=str)\n', (8357, 8469), True, 'import pandas as pd\n'), ((10785, 10799), 'uszipcode.SearchEngine', 'SearchEngine', ([], {}), '()\n', (10797, 10799), False, 'from uszipcode import SearchEngine\n'), ((10804, 10840), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {'desc': '"""Getting zipcodes"""'}), "(desc='Getting zipcodes')\n", (10815, 10840), False, 'from tqdm import tqdm\n'), ((11377, 11388), 'time.time', 'time.time', ([], {}), '()\n', (11386, 11388), False, 'import time\n'), ((11406, 11473), 'country_converter.convert', 'coco.convert', ([], {'names': "dataframe['Country']", 'to': '"""ISO2"""', 'not_found': 'None'}), "(names=dataframe['Country'], to='ISO2', not_found=None)\n", (11418, 11473), True, 'import country_converter as coco\n'), ((11501, 11522), 'pandas.Series', 'pd.Series', (['iso2_names'], {}), '(iso2_names)\n', (11510, 11522), True, 'import pandas as pd\n'), ((13446, 13506), 'tqdm.tqdm.pandas', 'tqdm.pandas', ([], {'desc': '"""Converting phone numbers to E.164 format"""'}), "(desc='Converting phone numbers to E.164 format')\n", (13457, 13506), False, 'from tqdm import tqdm\n'), ((2372, 2396), 'os.path.exists', 'os.path.exists', (['filepath'], {}), '(filepath)\n', (2386, 2396), False, 'import os\n'), ((2474, 2498), 'os.path.isfile', 'os.path.isfile', (['filepath'], {}), '(filepath)\n', (2488, 2498), False, 'import os\n'), ((3611, 3631), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (3625, 3631), False, 'import csv\n'), ((5961, 5981), 'csv.DictReader', 'csv.DictReader', (['file'], {}), '(file)\n', (5975, 5981), False, 'import csv\n'), ((1787, 1807), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1801, 1807), False, 'import os\n'), ((7116, 7139), 'hashlib.sha256', 'hashlib.sha256', (['element'], {}), '(element)\n', (7130, 7139), False, 'import hashlib\n'), ((12530, 12578), 'phonenumbers.parse', 'phonenumbers.parse', (["row['Phone']", "row['Country']"], {}), "(row['Phone'], row['Country'])\n", (12548, 12578), False, 'import phonenumbers\n'), ((12598, 12669), 'phonenumbers.format_number', 'phonenumbers.format_number', (['number', 'phonenumbers.PhoneNumberFormat.E164'], {}), '(number, phonenumbers.PhoneNumberFormat.E164)\n', (12624, 12669), False, 'import phonenumbers\n'), ((17633, 17699), 'sys.exit', 'sys.exit', (['f"""{ANSI[\'BOLD\'] + ANSI[\'RED\']}ERROR:{ANSI[\'RESET\']} {e}"""'], {}), '(f"{ANSI[\'BOLD\'] + ANSI[\'RED\']}ERROR:{ANSI[\'RESET\']} {e}")\n', (17641, 17699), False, 'import sys\n'), ((2693, 2706), 'csv.Sniffer', 'csv.Sniffer', ([], {}), '()\n', (2704, 2706), False, 'import csv\n'), ((9665, 9688), 'os.path.abspath', 'os.path.abspath', (['output'], {}), '(output)\n', (9680, 9688), False, 'import os\n'), ((16358, 16417), 'click.confirm', 'click.confirm', (['"""Would you like to try to detect zip codes?"""'], {}), "('Would you like to try to detect zip codes?')\n", (16371, 16417), False, 'import click\n'), ((8030, 8041), 'time.time', 'time.time', ([], {}), '()\n', (8039, 8041), False, 'import time\n'), ((11594, 11605), 'time.time', 'time.time', ([], {}), '()\n', (11603, 11605), False, 'import time\n'), ((16772, 16814), 'pandas.concat', 'pd.concat', (['[translated, zip_codes]'], {'axis': '(1)'}), '([translated, zip_codes], axis=1)\n', (16781, 16814), True, 'import pandas as pd\n'), ((16849, 16859), 'sys.exit', 'sys.exit', ([], {}), '()\n', (16857, 16859), False, 'import sys\n')] |
import io
from PIL import Image
from torchvision import models
import torch
import torchvision.transforms as transforms
import torch.nn as nn
import torch.nn.functional as F
import urllib
import os
def get_model_from_global_agent():
global_model = models.squeezenet1_1(pretrained=True)
global_model.classifier[1] = nn.Conv2d(512, 5, kernel_size=(1,1), stride=(1,1))
global_model.num_classes = 5
global_model.to(torch.device('cpu'))
map_location=torch.device('cpu')
model_weights_link = 'https://drive.google.com/uc?id=11pb2yJKXgyYC9XnB9cd6HlNCFNxnlY1D'
model_weights_path = './model/squeezenet_0.pt'
urllib.request.urlretrieve(model_weights_link, model_weights_path)
global_model.load_state_dict(torch.load("./model/squeezenet_0.pt", map_location=torch.device('cpu')))
os.remove(model_weights_path)
global_model.eval()
return global_model
def transform_image(image_bytes):
apply_transform = transforms.Compose([transforms.Resize(265),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
image = Image.open(io.BytesIO(image_bytes)).convert('RGB')
return apply_transform(image).unsqueeze(0)
# change to DR dataset format
def format_class_name(class_name):
class_name = class_name.replace('_', ' ')
class_name = class_name.title()
return class_name
| [
"torchvision.transforms.CenterCrop",
"torch.device",
"urllib.request.urlretrieve",
"io.BytesIO",
"torch.nn.Conv2d",
"torchvision.transforms.Normalize",
"torchvision.transforms.Resize",
"torchvision.transforms.ToTensor",
"torchvision.models.squeezenet1_1",
"os.remove"
] | [((253, 290), 'torchvision.models.squeezenet1_1', 'models.squeezenet1_1', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (273, 290), False, 'from torchvision import models\n'), ((324, 376), 'torch.nn.Conv2d', 'nn.Conv2d', (['(512)', '(5)'], {'kernel_size': '(1, 1)', 'stride': '(1, 1)'}), '(512, 5, kernel_size=(1, 1), stride=(1, 1))\n', (333, 376), True, 'import torch.nn as nn\n'), ((466, 485), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (478, 485), False, 'import torch\n'), ((633, 699), 'urllib.request.urlretrieve', 'urllib.request.urlretrieve', (['model_weights_link', 'model_weights_path'], {}), '(model_weights_link, model_weights_path)\n', (659, 699), False, 'import urllib\n'), ((810, 839), 'os.remove', 'os.remove', (['model_weights_path'], {}), '(model_weights_path)\n', (819, 839), False, 'import os\n'), ((428, 447), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (440, 447), False, 'import torch\n'), ((966, 988), 'torchvision.transforms.Resize', 'transforms.Resize', (['(265)'], {}), '(265)\n', (983, 988), True, 'import torchvision.transforms as transforms\n'), ((1020, 1046), 'torchvision.transforms.CenterCrop', 'transforms.CenterCrop', (['(224)'], {}), '(224)\n', (1041, 1046), True, 'import torchvision.transforms as transforms\n'), ((1078, 1099), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (1097, 1099), True, 'import torchvision.transforms as transforms\n'), ((1134, 1188), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['(0.5, 0.5, 0.5)', '(0.5, 0.5, 0.5)'], {}), '((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n', (1154, 1188), True, 'import torchvision.transforms as transforms\n'), ((784, 803), 'torch.device', 'torch.device', (['"""cpu"""'], {}), "('cpu')\n", (796, 803), False, 'import torch\n'), ((1214, 1237), 'io.BytesIO', 'io.BytesIO', (['image_bytes'], {}), '(image_bytes)\n', (1224, 1237), False, 'import io\n')] |
"""
main(terminal).py
Author: <NAME>
"""
from solver import print_grid, solve
def main():
sudoku_grid = [ [0,8,0, 0,0,0, 2,0,0],
[0,0,0, 0,8,4, 0,9,0],
[0,0,6, 3,2,0, 0,1,0],
[0,9,7, 0,0,0, 0,8,0],
[8,0,0, 9,0,3, 0,0,2],
[0,1,0, 0,0,0, 9,5,0],
[0,7,0, 0,4,5, 8,0,0],
[0,3,0, 7,1,0, 0,0,0],
[0,0,8, 0,0,0, 0,4,0]
]
sudoku_grid = [
[2,5,0, 0,9,7, 3,0,6],
[0,0,7, 3,0,0, 1,0,2],
[0,3,1, 4,0,5, 8,0,0],
[0,6,0, 8,0,0, 0,2,7],
[0,2,4, 0,0,1, 0,3,8],
[0,8,0, 9,0,0, 6,1,0],
[3,0,5, 0,0,4, 0,0,1],
[0,0,6, 0,0,9, 7,0,0],
[0,7,0, 5,1,0, 4,0,3]
]
print_grid(sudoku_grid)
print('....................................')
copy = sudoku_grid
solve(copy, 9)
print_grid(copy)
if __name__ == "__main__":
main() | [
"solver.solve",
"solver.print_grid"
] | [((872, 895), 'solver.print_grid', 'print_grid', (['sudoku_grid'], {}), '(sudoku_grid)\n', (882, 895), False, 'from solver import print_grid, solve\n'), ((973, 987), 'solver.solve', 'solve', (['copy', '(9)'], {}), '(copy, 9)\n', (978, 987), False, 'from solver import print_grid, solve\n'), ((992, 1008), 'solver.print_grid', 'print_grid', (['copy'], {}), '(copy)\n', (1002, 1008), False, 'from solver import print_grid, solve\n')] |
import subprocess
import os
import requests
import pyttsx3
from bs4 import BeautifulSoup
class Commander:
def __init__(self):
self.confirm = ["yes", "ok", "go on", "sure", "do it", "yeah", "yaa", "Imm", "confirm", "of course"]
self.cancel = ["nope", "no", "noo", "not yet", "don't", "do not", "stop", "wait", "hold on", "not now"]
def discover(self, text):
if "what" in text:
if "my name" in text:
self.respond("You haven't told me your name yet")
if "your name" in text:
self.respond(" I am Personal assistant. May i help you ? ")
else:
params = {"q": text}
r = requests.get("https://www.bing.com/search", params=params)
soup = BeautifulSoup(r.text, "html.parser")
results = soup.find_all("div", class_="dc_mn")
for result in results:
print(result.get_text())
if "tell me about" in text:
con = text.split(" ", 3)[-1] # expression in python 1 equals the second word
self.respond("Wait a minute, let me think about " + con)
self.respond("Ok, i got it ")
URL = 'https://en.wikipedia.org/wiki/' + con
content = requests.get(URL)
soup = BeautifulSoup(content.text, 'html.parser')
try:
results = soup.find('div', id='mw-content-text').find('div', class_="mw-parser-output").find_all('p', limit=5)
except:
results = ""
if results == "":
self.respond("Sorry, try asking something else")
else:
for result in results:
self.respond(result.get_text().rstrip())
if "I don't like you" in text:
self.respond("Ok go on, i don't give a fuck!")
if "*** you" in text:
self.respond("So am I, fuck you triple x time")
def respond(self, response):
print(response)
engine = pyttsx3.init()
engine.setProperty('rate', 150) # Speed percent (can go over 100)
engine.setProperty('volume', 0.9) # Volume 0-1
engine.say(response)
engine.runAndWait()
| [
"pyttsx3.init",
"bs4.BeautifulSoup",
"requests.get"
] | [((2034, 2048), 'pyttsx3.init', 'pyttsx3.init', ([], {}), '()\n', (2046, 2048), False, 'import pyttsx3\n'), ((1282, 1299), 'requests.get', 'requests.get', (['URL'], {}), '(URL)\n', (1294, 1299), False, 'import requests\n'), ((1319, 1361), 'bs4.BeautifulSoup', 'BeautifulSoup', (['content.text', '"""html.parser"""'], {}), "(content.text, 'html.parser')\n", (1332, 1361), False, 'from bs4 import BeautifulSoup\n'), ((698, 756), 'requests.get', 'requests.get', (['"""https://www.bing.com/search"""'], {'params': 'params'}), "('https://www.bing.com/search', params=params)\n", (710, 756), False, 'import requests\n'), ((780, 816), 'bs4.BeautifulSoup', 'BeautifulSoup', (['r.text', '"""html.parser"""'], {}), "(r.text, 'html.parser')\n", (793, 816), False, 'from bs4 import BeautifulSoup\n')] |
# Generated by Django 2.2.12 on 2020-04-29 18:42
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cmsplugin_remote_form', '0004_remoteform_notification_emails'),
]
operations = [
migrations.AlterField(
model_name='extrafield',
name='fieldType',
field=models.CharField(choices=[('CharField', 'CharField'), ('BooleanField', 'BooleanField'), ('EmailField', 'EmailField'), ('DecimalField', 'DecimalField'), ('FloatField', 'FloatField'), ('IntegerField', 'IntegerField'), ('FileField', 'FileField'), ('ImageField', 'ImageField'), ('USStateSelect', 'US State Selector'), ('IPAddressField', 'IPAddressField'), ('MathCaptcha', 'Math Captcha'), ('auto_Textarea', 'CharField as Textarea'), ('auto_hidden_input', 'CharField as HiddenInput'), ('auto_referral_page', 'Referral page as HiddenInput'), ('auto_GET_parameter', 'GET parameter as HiddenInput'), ('CharFieldWithValidator', 'CharFieldWithValidator'), ('ChoiceField', 'ChoiceField'), ('ReCaptcha', 'reCAPTCHA')], max_length=100),
),
migrations.AlterField(
model_name='extrafield',
name='initial',
field=models.CharField(blank=True, max_length=4096, null=True),
),
migrations.AlterField(
model_name='extrafield',
name='name',
field=models.CharField(default='', max_length=100, verbose_name='Name'),
),
migrations.AlterField(
model_name='remoteform',
name='error_notification_emails',
field=models.CharField(blank=True, help_text='multiple emails separated by commas', max_length=250, null=True, verbose_name='Email Errors To:'),
),
migrations.AlterField(
model_name='remoteform',
name='on_submit',
field=models.CharField(blank=True, help_text='Google Analytics Code', max_length=400, null=True),
),
migrations.AlterField(
model_name='remoteform',
name='post_url',
field=models.CharField(default='#remoteURL', max_length=200, null=True, verbose_name='Remote URL'),
),
migrations.AlterField(
model_name='remoteform',
name='template',
field=models.CharField(choices=[('cmsplugin_remote_form_templates/default.html', 'Default'), ('cmsplugin_remote_form_templates/vertical_onecol.html', 'Vertical - One Col')], default='cmsplugin_remote_form/default.html', max_length=255),
),
]
| [
"django.db.models.CharField"
] | [((370, 1138), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('CharField', 'CharField'), ('BooleanField', 'BooleanField'), (\n 'EmailField', 'EmailField'), ('DecimalField', 'DecimalField'), (\n 'FloatField', 'FloatField'), ('IntegerField', 'IntegerField'), (\n 'FileField', 'FileField'), ('ImageField', 'ImageField'), (\n 'USStateSelect', 'US State Selector'), ('IPAddressField',\n 'IPAddressField'), ('MathCaptcha', 'Math Captcha'), ('auto_Textarea',\n 'CharField as Textarea'), ('auto_hidden_input',\n 'CharField as HiddenInput'), ('auto_referral_page',\n 'Referral page as HiddenInput'), ('auto_GET_parameter',\n 'GET parameter as HiddenInput'), ('CharFieldWithValidator',\n 'CharFieldWithValidator'), ('ChoiceField', 'ChoiceField'), ('ReCaptcha',\n 'reCAPTCHA')]", 'max_length': '(100)'}), "(choices=[('CharField', 'CharField'), ('BooleanField',\n 'BooleanField'), ('EmailField', 'EmailField'), ('DecimalField',\n 'DecimalField'), ('FloatField', 'FloatField'), ('IntegerField',\n 'IntegerField'), ('FileField', 'FileField'), ('ImageField',\n 'ImageField'), ('USStateSelect', 'US State Selector'), (\n 'IPAddressField', 'IPAddressField'), ('MathCaptcha', 'Math Captcha'), (\n 'auto_Textarea', 'CharField as Textarea'), ('auto_hidden_input',\n 'CharField as HiddenInput'), ('auto_referral_page',\n 'Referral page as HiddenInput'), ('auto_GET_parameter',\n 'GET parameter as HiddenInput'), ('CharFieldWithValidator',\n 'CharFieldWithValidator'), ('ChoiceField', 'ChoiceField'), ('ReCaptcha',\n 'reCAPTCHA')], max_length=100)\n", (386, 1138), False, 'from django.db import migrations, models\n'), ((1219, 1275), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'max_length': '(4096)', 'null': '(True)'}), '(blank=True, max_length=4096, null=True)\n', (1235, 1275), False, 'from django.db import migrations, models\n'), ((1399, 1464), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(100)', 'verbose_name': '"""Name"""'}), "(default='', max_length=100, verbose_name='Name')\n", (1415, 1464), False, 'from django.db import migrations, models\n'), ((1609, 1755), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""multiple emails separated by commas"""', 'max_length': '(250)', 'null': '(True)', 'verbose_name': '"""Email Errors To:"""'}), "(blank=True, help_text=\n 'multiple emails separated by commas', max_length=250, null=True,\n verbose_name='Email Errors To:')\n", (1625, 1755), False, 'from django.db import migrations, models\n'), ((1875, 1970), 'django.db.models.CharField', 'models.CharField', ([], {'blank': '(True)', 'help_text': '"""Google Analytics Code"""', 'max_length': '(400)', 'null': '(True)'}), "(blank=True, help_text='Google Analytics Code', max_length=\n 400, null=True)\n", (1891, 1970), False, 'from django.db import migrations, models\n'), ((2093, 2189), 'django.db.models.CharField', 'models.CharField', ([], {'default': '"""#remoteURL"""', 'max_length': '(200)', 'null': '(True)', 'verbose_name': '"""Remote URL"""'}), "(default='#remoteURL', max_length=200, null=True,\n verbose_name='Remote URL')\n", (2109, 2189), False, 'from django.db import migrations, models\n'), ((2313, 2554), 'django.db.models.CharField', 'models.CharField', ([], {'choices': "[('cmsplugin_remote_form_templates/default.html', 'Default'), (\n 'cmsplugin_remote_form_templates/vertical_onecol.html',\n 'Vertical - One Col')]", 'default': '"""cmsplugin_remote_form/default.html"""', 'max_length': '(255)'}), "(choices=[('cmsplugin_remote_form_templates/default.html',\n 'Default'), ('cmsplugin_remote_form_templates/vertical_onecol.html',\n 'Vertical - One Col')], default='cmsplugin_remote_form/default.html',\n max_length=255)\n", (2329, 2554), False, 'from django.db import migrations, models\n')] |
import csv
import cv2
import numpy as np
import pandas as pd
import sys
from datetime import datetime
from numpy.random import RandomState
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.callbacks import ModelCheckpoint
from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D
def DrivingNetV1():
model = Sequential()
model.add( Cropping2D( cropping=( (90,20), (0,0) ), input_shape=( 160, 320, 3 ) ) )
model.add( Lambda( lambda x: (x/255.0) - 0.5 ) )
model.add( Flatten( ) )
model.add( Dense(1) )
return model
def NVIDIANetV0( lr=1e-3):
model = Sequential( name="NVIDIANetV0" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV1( lr=1e-3):
model = Sequential( name="NVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV2( lr=1e-3):
model = Sequential( name="NVIDIANetV2" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV3( lr=1e-3):
model = Sequential( name="NVIDIANetV3" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV4( lr=1e-3):
model = Sequential( name="NVIDIANetV4" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dropout(0.125) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV5( lr=1e-3):
model = Sequential( name="NVIDIANetV5" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def NVIDIANetV6( lr=1e-3):
model = Sequential( name="NVIDIANetV6" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
model.add( Conv2D( 24, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 36, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 48, 5, 2, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Conv2D( 64, 3, activation='relu', padding='valid' ) )
model.add( Flatten( ) )
model.add( Dropout(0.5) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV1( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV1" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
#model.add( Dense(1164, activation='relu' ) )
#model.add( Dropout(0.2))
model.add( Dense(300, activation='tanh' ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear' ) )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV2( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV2" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
model.add( Dense(300, activation='linear' ) )
model.add( Dense(100, activation='linear' ) )
model.add( Dense(50, activation='linear' ) )
model.add( Dense(10, activation='linear' ) )
model.add( Dense(1, activation='linear' ) )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
def ModNVIDIANetV3( lr=1e-3):
model = Sequential( name = "ModNVIDIANetV3" )
model.add( Lambda( lambda x: (x/255.0) - 0.5, input_shape=( 160, 320, 3 ) ) )
model.add( Cropping2D( cropping=( (70,25), (0,0) ) ) )
#Keeping padding as "same" and applygin a max
model.add( Conv2D( 24, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 36, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 48, 5, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( ) )
model.add( Conv2D( 64, 3, 1, activation='relu', padding='same' ) )
model.add( MaxPool2D( pool_size=(4, 2) ) ) #forcing to this output to become an "flat"
model.add( Flatten( ) )
model.add( Dense(100, activation='tanh' ) )
model.add( Dropout(0.5) )
model.add( Dense(50, activation='tanh' ) )
model.add( Dropout(0.25) )
model.add( Dense(10, activation='tanh' ) )
model.add( Dense(1, activation='linear') )
#Converting curvature to angle, assuming wheelbase of 2 meters, then going from rad to deg
#The network output was supposed to be 1/R (r), the function then convert it to steering angle (alpha) [deg]
# alpha = atan(l*r)*57.3. l = wheelbase, supossed to be 2 meters
model.add( Lambda( lambda x: tf.multiply( tf.atan( tf.multiply( x, 2 ) ), 57.3 ) ) )
opt = keras.optimizers.Adam(learning_rate=lr )
model.compile( loss='mse', optimizer=opt )
return model
#Hyper parameters
BATCH_SIZE=64
LEARNING_RATE=1e-4
EPOCHS=5
model_name = sys.argv[1]
model = Sequential()
if( model_name == 'NVIDIANetV0'):
model = NVIDIANetV0( LEARNING_RATE )
elif( model_name == 'NVIDIANetV1'):
model = NVIDIANetV1( LEARNING_RATE )
elif( model_name == 'NVIDIANetV2' ):
model = NVIDIANetV2( LEARNING_RATE )
elif( model_name == 'NVIDIANetV3' ):
model = NVIDIANetV3( LEARNING_RATE )
elif( model_name == 'NVIDIANetV4' ):
model = NVIDIANetV4( LEARNING_RATE )
elif( model_name == 'NVIDIANetV5' ):
model = NVIDIANetV5( LEARNING_RATE )
elif( model_name == 'NVIDIANetV6' ):
model = NVIDIANetV6( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV1' ):
model = ModNVIDIANetV1( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV2' ):
model = ModNVIDIANetV2( LEARNING_RATE )
elif( model_name == 'ModNVIDIANetV3' ):
model = ModNVIDIANetV3( LEARNING_RATE )
else:
raise Exception('Invalid model name')
#Load data. Split data into train and validation
df = pd.read_csv('data/driving_log.csv', names=['center', 'left', 'right', 'measurement', '1', '2', '3'])
rng = RandomState()
train = df.sample( frac=0.7, random_state=rng )
valid = df.loc[~df.index.isin(train.index) ]
NUM_TRAIN_IMAGES = train.shape[0]
NUM_TEST_IMAGES = valid.shape[0]
#Deffining the generator
def load_data( df, batch_size, augument=False ):
i = 0
while True:
images = []
measurements = []
while len(images) < batch_size:
image_path = df.iloc[i,:]['center'].split('/')[-1]
current_path = './data/IMG/' + image_path
measurement = float( df.iloc[i,:]['measurement'] )
image = cv2.imread( current_path )
measurements.append( measurement )
images.append( image )
if( augument ):
flipped_image = cv2.flip( image, 1 )
images.append( flipped_image )
measurements.append( -1.0*measurement )
# image_path = df.iloc[i,:]['left'].split('/')[-1]
# current_path = './data/IMG/' + image_path
# measurement = float( +0.9 )
# image = cv2.imread( current_path )
# measurements.append( measurement )
# images.append( image )
# image_path = df.iloc[i,:]['right'].split('/')[-1]
# current_path = './data/IMG/' + image_path
# measurement = float( -0.9 )
# image = cv2.imread( current_path )
# measurements.append( measurement )
# images.append( image )
i += 1
if( i == df.shape[0] ):
i =0
yield ( np.array( images ), np.array( measurements ) )
#Define the generators
trainGen = load_data( train, BATCH_SIZE, True)
validGen = load_data( valid, BATCH_SIZE )
NUM_TRAIN_IMAGES = 2*NUM_TRAIN_IMAGES
NUM_TEST_IMAGES = NUM_TEST_IMAGES
print(model.summary())
#Using tensorboard
logdir = "logs/scalars/" + model.name
#defiining tensorboard callback
tensorboard_callback = keras.callbacks.TensorBoard(log_dir=logdir)
model.fit(
x=trainGen,
steps_per_epoch=NUM_TRAIN_IMAGES//BATCH_SIZE,
verbose=1,
validation_data=validGen,
validation_steps=NUM_TEST_IMAGES//BATCH_SIZE, epochs=EPOCHS,
callbacks=[tensorboard_callback] )
model.save( model.name + '.h5')
| [
"keras.optimizers.Adam",
"keras.layers.Conv2D",
"keras.layers.Flatten",
"pandas.read_csv",
"cv2.flip",
"keras.layers.Lambda",
"tensorflow.multiply",
"keras.callbacks.TensorBoard",
"keras.models.Sequential",
"numpy.array",
"keras.layers.Dropout",
"keras.layers.Dense",
"keras.layers.MaxPool2D"... | [((14357, 14369), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (14367, 14369), False, 'from keras.models import Sequential\n'), ((15282, 15386), 'pandas.read_csv', 'pd.read_csv', (['"""data/driving_log.csv"""'], {'names': "['center', 'left', 'right', 'measurement', '1', '2', '3']"}), "('data/driving_log.csv', names=['center', 'left', 'right',\n 'measurement', '1', '2', '3'])\n", (15293, 15386), True, 'import pandas as pd\n'), ((15389, 15402), 'numpy.random.RandomState', 'RandomState', ([], {}), '()\n', (15400, 15402), False, 'from numpy.random import RandomState\n'), ((17059, 17102), 'keras.callbacks.TensorBoard', 'keras.callbacks.TensorBoard', ([], {'log_dir': 'logdir'}), '(log_dir=logdir)\n', (17086, 17102), False, 'import keras\n'), ((378, 390), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (388, 390), False, 'from keras.models import Sequential\n'), ((627, 657), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV0"""'}), "(name='NVIDIANetV0')\n", (637, 657), False, 'from keras.models import Sequential\n'), ((1448, 1487), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (1469, 1487), False, 'import keras\n'), ((1585, 1615), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV1"""'}), "(name='NVIDIANetV1')\n", (1595, 1615), False, 'from keras.models import Sequential\n'), ((2757, 2796), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (2778, 2796), False, 'import keras\n'), ((2897, 2927), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV2"""'}), "(name='NVIDIANetV2')\n", (2907, 2927), False, 'from keras.models import Sequential\n'), ((4080, 4119), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (4101, 4119), False, 'import keras\n'), ((4223, 4253), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV3"""'}), "(name='NVIDIANetV3')\n", (4233, 4253), False, 'from keras.models import Sequential\n'), ((5401, 5440), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (5422, 5440), False, 'import keras\n'), ((5538, 5568), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV4"""'}), "(name='NVIDIANetV4')\n", (5548, 5568), False, 'from keras.models import Sequential\n'), ((6794, 6833), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (6815, 6833), False, 'import keras\n'), ((6931, 6961), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV5"""'}), "(name='NVIDIANetV5')\n", (6941, 6961), False, 'from keras.models import Sequential\n'), ((8083, 8122), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (8104, 8122), False, 'import keras\n'), ((8220, 8250), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""NVIDIANetV6"""'}), "(name='NVIDIANetV6')\n", (8230, 8250), False, 'from keras.models import Sequential\n'), ((9399, 9438), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (9420, 9438), False, 'import keras\n'), ((9539, 9572), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""ModNVIDIANetV1"""'}), "(name='ModNVIDIANetV1')\n", (9549, 9572), False, 'from keras.models import Sequential\n'), ((10996, 11035), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (11017, 11035), False, 'import keras\n'), ((11139, 11172), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""ModNVIDIANetV2"""'}), "(name='ModNVIDIANetV2')\n", (11149, 11172), False, 'from keras.models import Sequential\n'), ((12625, 12664), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (12646, 12664), False, 'import keras\n'), ((12771, 12804), 'keras.models.Sequential', 'Sequential', ([], {'name': '"""ModNVIDIANetV3"""'}), "(name='ModNVIDIANetV3')\n", (12781, 12804), False, 'from keras.models import Sequential\n'), ((14161, 14200), 'keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), '(learning_rate=lr)\n', (14182, 14200), False, 'import keras\n'), ((403, 469), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((90, 20), (0, 0))', 'input_shape': '(160, 320, 3)'}), '(cropping=((90, 20), (0, 0)), input_shape=(160, 320, 3))\n', (413, 469), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((489, 522), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {}), '(lambda x: x / 255.0 - 0.5)\n', (495, 522), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((539, 548), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (546, 548), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((564, 572), 'keras.layers.Dense', 'Dense', (['(1)'], {}), '(1)\n', (569, 572), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((674, 734), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (680, 734), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((753, 792), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (763, 792), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((811, 863), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (817, 863), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((882, 934), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (888, 934), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((953, 1005), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (959, 1005), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1024, 1073), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (1030, 1073), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1093, 1142), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (1099, 1142), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1163, 1172), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1170, 1172), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1263, 1294), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""linear"""'}), "(100, activation='linear')\n", (1268, 1294), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1311, 1341), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""linear"""'}), "(50, activation='linear')\n", (1316, 1341), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1358, 1388), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""linear"""'}), "(10, activation='linear')\n", (1363, 1388), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1406, 1435), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (1411, 1435), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1632, 1692), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (1638, 1692), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1711, 1750), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (1721, 1750), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1769, 1821), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (1775, 1821), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1840, 1892), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (1846, 1892), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1911, 1963), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (1917, 1963), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((1982, 2031), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (1988, 2031), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2051, 2100), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (2057, 2100), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2121, 2130), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (2128, 2130), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2221, 2250), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (2226, 2250), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2267, 2295), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (2272, 2295), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2312, 2340), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (2317, 2340), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2358, 2387), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (2363, 2387), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((2950, 3010), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (2956, 3010), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3032, 3071), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (3042, 3071), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3096, 3148), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (3102, 3148), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3171, 3223), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (3177, 3223), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3246, 3298), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (3252, 3298), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3321, 3370), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (3327, 3370), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3396, 3445), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (3402, 3445), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3472, 3481), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3479, 3481), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3500, 3531), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""linear"""'}), "(100, activation='linear')\n", (3505, 3531), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3556, 3586), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""linear"""'}), "(50, activation='linear')\n", (3561, 3586), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3608, 3638), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""linear"""'}), "(10, activation='linear')\n", (3613, 3638), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((3660, 3689), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (3665, 3689), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4270, 4330), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (4276, 4330), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4349, 4388), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (4359, 4388), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4407, 4459), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (4413, 4459), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4478, 4530), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (4484, 4530), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4549, 4601), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (4555, 4601), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4620, 4669), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (4626, 4669), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4689, 4738), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (4695, 4738), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4759, 4768), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (4766, 4768), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4784, 4813), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (4789, 4813), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4830, 4842), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4837, 4842), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4857, 4885), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (4862, 4885), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4902, 4914), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4909, 4914), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4929, 4957), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (4934, 4957), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((4975, 4987), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (4982, 4987), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5002, 5031), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (5007, 5031), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5585, 5645), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (5591, 5645), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5664, 5703), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (5674, 5703), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5722, 5774), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (5728, 5774), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5793, 5845), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (5799, 5845), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5864, 5916), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (5870, 5916), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((5935, 5984), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (5941, 5984), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6004, 6053), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (6010, 6053), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6074, 6083), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (6081, 6083), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6174, 6203), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (6179, 6203), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6220, 6232), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (6227, 6232), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6247, 6275), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (6252, 6275), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6292, 6305), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (6299, 6305), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6320, 6348), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (6325, 6348), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6366, 6380), 'keras.layers.Dropout', 'Dropout', (['(0.125)'], {}), '(0.125)\n', (6373, 6380), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6395, 6424), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (6400, 6424), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((6978, 7038), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (6984, 7038), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7057, 7096), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (7067, 7096), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7115, 7167), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (7121, 7167), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7186, 7238), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (7192, 7238), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7257, 7309), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (7263, 7309), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7328, 7377), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (7334, 7377), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7397, 7446), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (7403, 7446), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7467, 7476), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (7474, 7476), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7492, 7521), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (7497, 7521), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7538, 7550), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (7545, 7550), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7565, 7593), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (7570, 7593), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7610, 7623), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (7617, 7623), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7638, 7666), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (7643, 7666), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((7684, 7713), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (7689, 7713), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8267, 8327), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (8273, 8327), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8346, 8385), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (8356, 8385), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8404, 8456), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(24, 5, 2, activation='relu', padding='valid')\n", (8410, 8456), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8475, 8527), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(36, 5, 2, activation='relu', padding='valid')\n", (8481, 8527), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8546, 8598), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(2)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(48, 5, 2, activation='relu', padding='valid')\n", (8552, 8598), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8617, 8666), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (8623, 8666), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8686, 8735), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)'], {'activation': '"""relu"""', 'padding': '"""valid"""'}), "(64, 3, activation='relu', padding='valid')\n", (8692, 8735), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8756, 8765), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (8763, 8765), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8781, 8793), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8788, 8793), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8808, 8837), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (8813, 8837), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8854, 8866), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (8861, 8866), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8881, 8909), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (8886, 8909), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8926, 8939), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (8933, 8939), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((8954, 8982), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (8959, 8982), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9000, 9029), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (9005, 9029), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9591, 9651), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (9597, 9651), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9670, 9709), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (9680, 9709), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9775, 9826), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(24, 5, 1, activation='relu', padding='same')\n", (9781, 9826), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9844, 9855), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (9853, 9855), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9871, 9922), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(36, 5, 1, activation='relu', padding='same')\n", (9877, 9922), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9940, 9951), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (9949, 9951), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((9967, 10018), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(48, 5, 1, activation='relu', padding='same')\n", (9973, 10018), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10036, 10047), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (10045, 10047), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10063, 10114), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (10069, 10114), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10132, 10143), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (10141, 10143), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10159, 10210), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (10165, 10210), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10228, 10255), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(4, 2)'}), '(pool_size=(4, 2))\n', (10237, 10255), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10317, 10326), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (10324, 10326), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10416, 10445), 'keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""tanh"""'}), "(300, activation='tanh')\n", (10421, 10445), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10462, 10491), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (10467, 10491), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10508, 10536), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (10513, 10536), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10553, 10581), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (10558, 10581), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((10598, 10627), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (10603, 10627), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11197, 11257), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (11203, 11257), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11279, 11318), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (11289, 11318), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11393, 11444), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(24, 5, 1, activation='relu', padding='same')\n", (11399, 11444), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11465, 11476), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (11474, 11476), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11495, 11546), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(36, 5, 1, activation='relu', padding='same')\n", (11501, 11546), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11567, 11578), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (11576, 11578), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11597, 11648), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(48, 5, 1, activation='relu', padding='same')\n", (11603, 11648), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11669, 11680), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (11678, 11680), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11699, 11750), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (11705, 11750), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11771, 11782), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (11780, 11782), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11801, 11852), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (11807, 11852), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11873, 11900), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(4, 2)'}), '(pool_size=(4, 2))\n', (11882, 11900), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11966, 11975), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (11973, 11975), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((11994, 12025), 'keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""linear"""'}), "(300, activation='linear')\n", (11999, 12025), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12046, 12077), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""linear"""'}), "(100, activation='linear')\n", (12051, 12077), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12098, 12128), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""linear"""'}), "(50, activation='linear')\n", (12103, 12128), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12150, 12180), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""linear"""'}), "(10, activation='linear')\n", (12155, 12180), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12206, 12235), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (12211, 12235), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12822, 12882), 'keras.layers.Lambda', 'Lambda', (['(lambda x: x / 255.0 - 0.5)'], {'input_shape': '(160, 320, 3)'}), '(lambda x: x / 255.0 - 0.5, input_shape=(160, 320, 3))\n', (12828, 12882), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((12901, 12940), 'keras.layers.Cropping2D', 'Cropping2D', ([], {'cropping': '((70, 25), (0, 0))'}), '(cropping=((70, 25), (0, 0)))\n', (12911, 12940), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13005, 13056), 'keras.layers.Conv2D', 'Conv2D', (['(24)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(24, 5, 1, activation='relu', padding='same')\n", (13011, 13056), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13074, 13085), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (13083, 13085), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13101, 13152), 'keras.layers.Conv2D', 'Conv2D', (['(36)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(36, 5, 1, activation='relu', padding='same')\n", (13107, 13152), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13170, 13181), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (13179, 13181), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13197, 13248), 'keras.layers.Conv2D', 'Conv2D', (['(48)', '(5)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(48, 5, 1, activation='relu', padding='same')\n", (13203, 13248), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13266, 13277), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (13275, 13277), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13293, 13344), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (13299, 13344), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13362, 13373), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {}), '()\n', (13371, 13373), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13389, 13440), 'keras.layers.Conv2D', 'Conv2D', (['(64)', '(3)', '(1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), "(64, 3, 1, activation='relu', padding='same')\n", (13395, 13440), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13458, 13485), 'keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(4, 2)'}), '(pool_size=(4, 2))\n', (13467, 13485), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13548, 13557), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (13555, 13557), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13573, 13602), 'keras.layers.Dense', 'Dense', (['(100)'], {'activation': '"""tanh"""'}), "(100, activation='tanh')\n", (13578, 13602), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13619, 13631), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (13626, 13631), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13646, 13674), 'keras.layers.Dense', 'Dense', (['(50)'], {'activation': '"""tanh"""'}), "(50, activation='tanh')\n", (13651, 13674), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13691, 13704), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (13698, 13704), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13719, 13747), 'keras.layers.Dense', 'Dense', (['(10)'], {'activation': '"""tanh"""'}), "(10, activation='tanh')\n", (13724, 13747), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((13765, 13794), 'keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""linear"""'}), "(1, activation='linear')\n", (13770, 13794), False, 'from keras.layers import Flatten, Dense, Lambda, Cropping2D, Conv2D, Dropout, MaxPool2D\n'), ((15903, 15927), 'cv2.imread', 'cv2.imread', (['current_path'], {}), '(current_path)\n', (15913, 15927), False, 'import cv2\n'), ((16035, 16053), 'cv2.flip', 'cv2.flip', (['image', '(1)'], {}), '(image, 1)\n', (16043, 16053), False, 'import cv2\n'), ((16686, 16702), 'numpy.array', 'np.array', (['images'], {}), '(images)\n', (16694, 16702), True, 'import numpy as np\n'), ((16706, 16728), 'numpy.array', 'np.array', (['measurements'], {}), '(measurements)\n', (16714, 16728), True, 'import numpy as np\n'), ((2712, 2729), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (2723, 2729), True, 'import tensorflow as tf\n'), ((4029, 4046), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (4040, 4046), True, 'import tensorflow as tf\n'), ((5356, 5373), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (5367, 5373), True, 'import tensorflow as tf\n'), ((6749, 6766), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (6760, 6766), True, 'import tensorflow as tf\n'), ((8038, 8055), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (8049, 8055), True, 'import tensorflow as tf\n'), ((9354, 9371), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (9365, 9371), True, 'import tensorflow as tf\n'), ((10953, 10970), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (10964, 10970), True, 'import tensorflow as tf\n'), ((12576, 12593), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (12587, 12593), True, 'import tensorflow as tf\n'), ((14119, 14136), 'tensorflow.multiply', 'tf.multiply', (['x', '(2)'], {}), '(x, 2)\n', (14130, 14136), True, 'import tensorflow as tf\n')] |
import torch
from torch import nn
from configs import ANCHOR_SIZES
class PostRes(nn.Module):
def __init__(self, n_in, n_out, stride=1):
super(PostRes, self).__init__()
self.conv1 = nn.Conv3d(n_in, n_out, kernel_size=3, stride=stride, padding=1)
self.bn1 = nn.BatchNorm3d(n_out)
self.relu = nn.ReLU(inplace=True)
self.conv2 = nn.Conv3d(n_out, n_out, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm3d(n_out)
if stride != 1 or n_out != n_in:
self.shortcut = nn.Sequential(
nn.Conv3d(n_in, n_out, kernel_size=1, stride=stride),
nn.BatchNorm3d(n_out))
else:
self.shortcut = None
def forward(self, x):
residual = x
if self.shortcut is not None:
residual = self.shortcut(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += residual
out = self.relu(out)
return out
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.preBlock = nn.Sequential(
nn.Conv3d(1, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True),
nn.Conv3d(24, 24, kernel_size=3, padding=1),
nn.BatchNorm3d(24),
nn.ReLU(inplace=True))
num_blocks_forw = [2, 2, 3, 3]
num_blocks_back = [3, 3]
self.featureNum_forw = [24, 32, 64, 64, 64]
self.featureNum_back = [128, 64, 64]
for i in range(len(num_blocks_forw)):
blocks = []
for j in range(num_blocks_forw[i]):
if j == 0:
blocks.append(PostRes(self.featureNum_forw[i], self.featureNum_forw[i + 1]))
else:
blocks.append(PostRes(self.featureNum_forw[i + 1], self.featureNum_forw[i + 1]))
setattr(self, 'forw' + str(i + 1), nn.Sequential(*blocks))
for i in range(len(num_blocks_back)):
blocks = []
for j in range(num_blocks_back[i]):
if j == 0:
if i == 0:
addition = 3
else:
addition = 0
blocks.append(PostRes(self.featureNum_back[i + 1] + self.featureNum_forw[i + 2] + addition,
self.featureNum_back[i]))
else:
blocks.append(PostRes(self.featureNum_back[i], self.featureNum_back[i]))
setattr(self, 'back' + str(i + 2), nn.Sequential(*blocks))
self.maxpool1 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool2 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool3 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.maxpool4 = nn.MaxPool3d(kernel_size=2, stride=2, return_indices=True)
self.unmaxpool1 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.unmaxpool2 = nn.MaxUnpool3d(kernel_size=2, stride=2)
self.path1 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.path2 = nn.Sequential(
nn.ConvTranspose3d(64, 64, kernel_size=2, stride=2),
nn.BatchNorm3d(64),
nn.ReLU(inplace=True))
self.drop = nn.Dropout3d(p=0.5, inplace=False)
self.output = nn.Sequential(nn.Conv3d(self.featureNum_back[0], 64, kernel_size=1),
nn.ReLU(),
nn.Conv3d(64, 5 * len(ANCHOR_SIZES), kernel_size=1))
def forward(self, x, coord):
out = self.preBlock(x) # 16
out_pool, indices0 = self.maxpool1(out)
out1 = self.forw1(out_pool) # 32
out1_pool, indices1 = self.maxpool2(out1)
out2 = self.forw2(out1_pool) # 64
out2_pool, indices2 = self.maxpool3(out2)
out3 = self.forw3(out2_pool) # 96
out3_pool, indices3 = self.maxpool4(out3)
out4 = self.forw4(out3_pool) # 96
rev3 = self.path1(out4)
comb3 = self.back3(torch.cat((rev3, out3), 1)) # 96+96
rev2 = self.path2(comb3)
comb2 = self.back2(torch.cat((rev2, out2, coord), 1)) # 64+64
comb2 = self.drop(comb2)
out = self.output(comb2)
size = out.size()
out = out.view(out.size(0), out.size(1), -1)
out = out.transpose(1, 2).contiguous().view(size[0], size[2], size[3], size[4], len(ANCHOR_SIZES), 5)
return out
| [
"torch.nn.ReLU",
"torch.nn.BatchNorm3d",
"torch.nn.ConvTranspose3d",
"torch.nn.Dropout3d",
"torch.nn.Sequential",
"torch.nn.MaxPool3d",
"torch.nn.MaxUnpool3d",
"torch.cat",
"torch.nn.Conv3d"
] | [((203, 266), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_in', 'n_out'], {'kernel_size': '(3)', 'stride': 'stride', 'padding': '(1)'}), '(n_in, n_out, kernel_size=3, stride=stride, padding=1)\n', (212, 266), False, 'from torch import nn\n'), ((286, 307), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['n_out'], {}), '(n_out)\n', (300, 307), False, 'from torch import nn\n'), ((328, 349), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (335, 349), False, 'from torch import nn\n'), ((371, 420), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_out', 'n_out'], {'kernel_size': '(3)', 'padding': '(1)'}), '(n_out, n_out, kernel_size=3, padding=1)\n', (380, 420), False, 'from torch import nn\n'), ((440, 461), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['n_out'], {}), '(n_out)\n', (454, 461), False, 'from torch import nn\n'), ((2688, 2746), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'return_indices': '(True)'}), '(kernel_size=2, stride=2, return_indices=True)\n', (2700, 2746), False, 'from torch import nn\n'), ((2771, 2829), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'return_indices': '(True)'}), '(kernel_size=2, stride=2, return_indices=True)\n', (2783, 2829), False, 'from torch import nn\n'), ((2854, 2912), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'return_indices': '(True)'}), '(kernel_size=2, stride=2, return_indices=True)\n', (2866, 2912), False, 'from torch import nn\n'), ((2937, 2995), 'torch.nn.MaxPool3d', 'nn.MaxPool3d', ([], {'kernel_size': '(2)', 'stride': '(2)', 'return_indices': '(True)'}), '(kernel_size=2, stride=2, return_indices=True)\n', (2949, 2995), False, 'from torch import nn\n'), ((3022, 3061), 'torch.nn.MaxUnpool3d', 'nn.MaxUnpool3d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (3036, 3061), False, 'from torch import nn\n'), ((3088, 3127), 'torch.nn.MaxUnpool3d', 'nn.MaxUnpool3d', ([], {'kernel_size': '(2)', 'stride': '(2)'}), '(kernel_size=2, stride=2)\n', (3102, 3127), False, 'from torch import nn\n'), ((3485, 3519), 'torch.nn.Dropout3d', 'nn.Dropout3d', ([], {'p': '(0.5)', 'inplace': '(False)'}), '(p=0.5, inplace=False)\n', (3497, 3519), False, 'from torch import nn\n'), ((1180, 1222), 'torch.nn.Conv3d', 'nn.Conv3d', (['(1)', '(24)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(1, 24, kernel_size=3, padding=1)\n', (1189, 1222), False, 'from torch import nn\n'), ((1236, 1254), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(24)'], {}), '(24)\n', (1250, 1254), False, 'from torch import nn\n'), ((1268, 1289), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1275, 1289), False, 'from torch import nn\n'), ((1303, 1346), 'torch.nn.Conv3d', 'nn.Conv3d', (['(24)', '(24)'], {'kernel_size': '(3)', 'padding': '(1)'}), '(24, 24, kernel_size=3, padding=1)\n', (1312, 1346), False, 'from torch import nn\n'), ((1360, 1378), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(24)'], {}), '(24)\n', (1374, 1378), False, 'from torch import nn\n'), ((1392, 1413), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (1399, 1413), False, 'from torch import nn\n'), ((3177, 3228), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['(64)', '(64)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(64, 64, kernel_size=2, stride=2)\n', (3195, 3228), False, 'from torch import nn\n'), ((3242, 3260), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {}), '(64)\n', (3256, 3260), False, 'from torch import nn\n'), ((3274, 3295), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3281, 3295), False, 'from torch import nn\n'), ((3345, 3396), 'torch.nn.ConvTranspose3d', 'nn.ConvTranspose3d', (['(64)', '(64)'], {'kernel_size': '(2)', 'stride': '(2)'}), '(64, 64, kernel_size=2, stride=2)\n', (3363, 3396), False, 'from torch import nn\n'), ((3410, 3428), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['(64)'], {}), '(64)\n', (3424, 3428), False, 'from torch import nn\n'), ((3442, 3463), 'torch.nn.ReLU', 'nn.ReLU', ([], {'inplace': '(True)'}), '(inplace=True)\n', (3449, 3463), False, 'from torch import nn\n'), ((3556, 3609), 'torch.nn.Conv3d', 'nn.Conv3d', (['self.featureNum_back[0]', '(64)'], {'kernel_size': '(1)'}), '(self.featureNum_back[0], 64, kernel_size=1)\n', (3565, 3609), False, 'from torch import nn\n'), ((3647, 3656), 'torch.nn.ReLU', 'nn.ReLU', ([], {}), '()\n', (3654, 3656), False, 'from torch import nn\n'), ((4246, 4272), 'torch.cat', 'torch.cat', (['(rev3, out3)', '(1)'], {}), '((rev3, out3), 1)\n', (4255, 4272), False, 'import torch\n'), ((4343, 4376), 'torch.cat', 'torch.cat', (['(rev2, out2, coord)', '(1)'], {}), '((rev2, out2, coord), 1)\n', (4352, 4376), False, 'import torch\n'), ((563, 615), 'torch.nn.Conv3d', 'nn.Conv3d', (['n_in', 'n_out'], {'kernel_size': '(1)', 'stride': 'stride'}), '(n_in, n_out, kernel_size=1, stride=stride)\n', (572, 615), False, 'from torch import nn\n'), ((633, 654), 'torch.nn.BatchNorm3d', 'nn.BatchNorm3d', (['n_out'], {}), '(n_out)\n', (647, 654), False, 'from torch import nn\n'), ((1996, 2018), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (2009, 2018), False, 'from torch import nn\n'), ((2639, 2661), 'torch.nn.Sequential', 'nn.Sequential', (['*blocks'], {}), '(*blocks)\n', (2652, 2661), False, 'from torch import nn\n')] |
from django.urls import path
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
TokenVerifyView,
)
from . views import *
urlpatterns = [
path('register/', UserRegisterView.as_view()),
path('logout/', LogoutView.as_view()),
path('token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('token/verify/', TokenVerifyView.as_view(), name='token_verify'),
path('change_password/', ChangePasswordView.as_view()),
]
| [
"rest_framework_simplejwt.views.TokenVerifyView.as_view",
"rest_framework_simplejwt.views.TokenObtainPairView.as_view",
"rest_framework_simplejwt.views.TokenRefreshView.as_view"
] | [((297, 326), 'rest_framework_simplejwt.views.TokenObtainPairView.as_view', 'TokenObtainPairView.as_view', ([], {}), '()\n', (324, 326), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((382, 408), 'rest_framework_simplejwt.views.TokenRefreshView.as_view', 'TokenRefreshView.as_view', ([], {}), '()\n', (406, 408), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n'), ((459, 484), 'rest_framework_simplejwt.views.TokenVerifyView.as_view', 'TokenVerifyView.as_view', ([], {}), '()\n', (482, 484), False, 'from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView\n')] |
from __future__ import print_function
import os
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from collections import OrderedDict
import numpy as np
from edgeml_pytorch.trainer.drocc_trainer import DROCCTrainer
class MLP(nn.Module):
"""
Multi-layer perceptron with single hidden layer.
"""
def __init__(self,
input_dim=2,
num_classes=1,
num_hidden_nodes=20):
super(MLP, self).__init__()
self.input_dim = input_dim
self.num_classes = num_classes
self.num_hidden_nodes = num_hidden_nodes
activ = nn.ReLU(True)
self.feature_extractor = nn.Sequential(OrderedDict([
('fc', nn.Linear(self.input_dim, self.num_hidden_nodes)),
('relu1', activ)]))
self.size_final = self.num_hidden_nodes
self.classifier = nn.Sequential(OrderedDict([
('fc1', nn.Linear(self.size_final, self.num_classes))]))
def forward(self, input):
features = self.feature_extractor(input)
logits = self.classifier(features.view(-1, self.size_final))
return logits
def adjust_learning_rate(epoch, total_epochs, only_ce_epochs, learning_rate, optimizer):
"""Adjust learning rate during training.
Parameters
----------
epoch: Current training epoch.
total_epochs: Total number of epochs for training.
only_ce_epochs: Number of epochs for initial pretraining.
learning_rate: Initial learning rate for training.
"""
#We dont want to consider the only ce
#based epochs for the lr scheduler
epoch = epoch - only_ce_epochs
drocc_epochs = total_epochs - only_ce_epochs
# lr = learning_rate
if epoch <= drocc_epochs:
lr = learning_rate * 0.001
if epoch <= 0.90 * drocc_epochs:
lr = learning_rate * 0.01
if epoch <= 0.60 * drocc_epochs:
lr = learning_rate * 0.1
if epoch <= 0.30 * drocc_epochs:
lr = learning_rate
for param_group in optimizer.param_groups:
param_group['lr'] = lr
return optimizer
class CustomDataset(Dataset):
def __init__(self, data, labels):
self.data = data
self.labels = labels
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.tolist()
return torch.from_numpy(self.data[idx]), (self.labels[idx]), torch.tensor([0])
def load_data(path):
train_data = np.load(os.path.join(path, 'train_data.npy'), allow_pickle = True)
train_lab = np.ones((train_data.shape[0])) #All positive labelled data points collected
test_data = np.load(os.path.join(path, 'test_data.npy'), allow_pickle = True)
test_lab = np.load(os.path.join(path, 'test_labels.npy'), allow_pickle = True)
## preprocessing
mean=np.mean(train_data,0)
std=np.std(train_data,0)
train_data=(train_data-mean)/ (std + 1e-4)
num_features = train_data.shape[1]
test_data = (test_data - mean)/(std + 1e-4)
train_samples = train_data.shape[0]
test_samples = test_data.shape[0]
print("Train Samples: ", train_samples)
print("Test Samples: ", test_samples)
return CustomDataset(train_data, train_lab), CustomDataset(test_data, test_lab), num_features
def main():
train_dataset, test_dataset, num_features = load_data(args.data_path)
train_loader = DataLoader(train_dataset, args.batch_size, shuffle=True)
test_loader = DataLoader(test_dataset, args.batch_size, shuffle=True)
model = MLP(input_dim=num_features, num_hidden_nodes=args.hd, num_classes=1).to(device)
if args.optim == 1:
optimizer = optim.SGD(model.parameters(),
lr=args.lr,
momentum=args.mom)
print("using SGD")
else:
optimizer = optim.Adam(model.parameters(),
lr=args.lr)
print("using Adam")
# Training the model
trainer = DROCCTrainer(model, optimizer, args.lamda, args.radius, args.gamma, device)
# Restore from checkpoint
if args.restore == 1:
if os.path.exists(os.path.join(args.model_dir, 'model.pt')):
trainer.load(args.model_dir)
print("Saved Model Loaded")
trainer.train(train_loader, test_loader, args.lr, adjust_learning_rate, args.epochs,
metric=args.metric, ascent_step_size=args.ascent_step_size, only_ce_epochs = args.only_ce_epochs)
trainer.save(args.model_dir)
if __name__ == '__main__':
torch.set_printoptions(precision=5)
parser = argparse.ArgumentParser(description='PyTorch Simple Training')
parser.add_argument('--batch_size', type=int, default=128, metavar='N',
help='batch size for training')
parser.add_argument('--epochs', type=int, default=100, metavar='N',
help='number of epochs to train')
parser.add_argument('-oce,', '--only_ce_epochs', type=int, default=50, metavar='N',
help='number of epochs to train with only CE loss')
parser.add_argument('--ascent_num_steps', type=int, default=50, metavar='N',
help='Number of gradient ascent steps')
parser.add_argument('--hd', type=int, default=128, metavar='N',
help='Number of hidden nodes for LSTM model')
parser.add_argument('--lr', type=float, default=0.001, metavar='LR',
help='learning rate')
parser.add_argument('--ascent_step_size', type=float, default=0.001, metavar='LR',
help='step size of gradient ascent')
parser.add_argument('--mom', type=float, default=0.99, metavar='M',
help='momentum')
parser.add_argument('--model_dir', default='log',
help='path where to save checkpoint')
parser.add_argument('--one_class_adv', type=int, default=1, metavar='N',
help='adv loss to be used or not, 1:use 0:not use(only CE)')
parser.add_argument('--radius', type=float, default=0.2, metavar='N',
help='radius corresponding to the definition of set N_i(r)')
parser.add_argument('--lamda', type=float, default=1, metavar='N',
help='Weight to the adversarial loss')
parser.add_argument('--reg', type=float, default=0, metavar='N',
help='weight reg')
parser.add_argument('--restore', type=int, default=0, metavar='N',
help='whether to load a pretrained model, 1: load 0: train from scratch')
parser.add_argument('--optim', type=int, default=0, metavar='N',
help='0 : Adam 1: SGD')
parser.add_argument('--gamma', type=float, default=2.0, metavar='N',
help='r to gamma * r projection for the set N_i(r)')
parser.add_argument('-d', '--data_path', type=str, default='.')
parser.add_argument('--metric', type=str, default='F1')
args = parser.parse_args()
# settings
#Checkpoint store path
model_dir = args.model_dir
if not os.path.exists(model_dir):
os.makedirs(model_dir)
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
main()
| [
"numpy.mean",
"edgeml_pytorch.trainer.drocc_trainer.DROCCTrainer",
"torch.nn.ReLU",
"os.path.exists",
"numpy.ones",
"torch.set_printoptions",
"argparse.ArgumentParser",
"os.makedirs",
"numpy.std",
"os.path.join",
"torch.from_numpy",
"torch.is_tensor",
"torch.tensor",
"torch.cuda.is_availab... | [((2779, 2807), 'numpy.ones', 'np.ones', (['train_data.shape[0]'], {}), '(train_data.shape[0])\n', (2786, 2807), True, 'import numpy as np\n'), ((3052, 3074), 'numpy.mean', 'np.mean', (['train_data', '(0)'], {}), '(train_data, 0)\n', (3059, 3074), True, 'import numpy as np\n'), ((3082, 3103), 'numpy.std', 'np.std', (['train_data', '(0)'], {}), '(train_data, 0)\n', (3088, 3103), True, 'import numpy as np\n'), ((3611, 3667), 'torch.utils.data.DataLoader', 'DataLoader', (['train_dataset', 'args.batch_size'], {'shuffle': '(True)'}), '(train_dataset, args.batch_size, shuffle=True)\n', (3621, 3667), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((3686, 3741), 'torch.utils.data.DataLoader', 'DataLoader', (['test_dataset', 'args.batch_size'], {'shuffle': '(True)'}), '(test_dataset, args.batch_size, shuffle=True)\n', (3696, 3741), False, 'from torch.utils.data import DataLoader, Dataset\n'), ((4215, 4290), 'edgeml_pytorch.trainer.drocc_trainer.DROCCTrainer', 'DROCCTrainer', (['model', 'optimizer', 'args.lamda', 'args.radius', 'args.gamma', 'device'], {}), '(model, optimizer, args.lamda, args.radius, args.gamma, device)\n', (4227, 4290), False, 'from edgeml_pytorch.trainer.drocc_trainer import DROCCTrainer\n'), ((4765, 4800), 'torch.set_printoptions', 'torch.set_printoptions', ([], {'precision': '(5)'}), '(precision=5)\n', (4787, 4800), False, 'import torch\n'), ((4819, 4881), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""PyTorch Simple Training"""'}), "(description='PyTorch Simple Training')\n", (4842, 4881), False, 'import argparse\n'), ((7455, 7480), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (7478, 7480), False, 'import torch\n'), ((7494, 7537), 'torch.device', 'torch.device', (["('cuda' if use_cuda else 'cpu')"], {}), "('cuda' if use_cuda else 'cpu')\n", (7506, 7537), False, 'import torch\n'), ((718, 731), 'torch.nn.ReLU', 'nn.ReLU', (['(True)'], {}), '(True)\n', (725, 731), True, 'import torch.nn as nn\n'), ((2517, 2537), 'torch.is_tensor', 'torch.is_tensor', (['idx'], {}), '(idx)\n', (2532, 2537), False, 'import torch\n'), ((2704, 2740), 'os.path.join', 'os.path.join', (['path', '"""train_data.npy"""'], {}), "(path, 'train_data.npy')\n", (2716, 2740), False, 'import os\n'), ((2879, 2914), 'os.path.join', 'os.path.join', (['path', '"""test_data.npy"""'], {}), "(path, 'test_data.npy')\n", (2891, 2914), False, 'import os\n'), ((2960, 2997), 'os.path.join', 'os.path.join', (['path', '"""test_labels.npy"""'], {}), "(path, 'test_labels.npy')\n", (2972, 2997), False, 'import os\n'), ((7382, 7407), 'os.path.exists', 'os.path.exists', (['model_dir'], {}), '(model_dir)\n', (7396, 7407), False, 'import os\n'), ((7417, 7439), 'os.makedirs', 'os.makedirs', (['model_dir'], {}), '(model_dir)\n', (7428, 7439), False, 'import os\n'), ((2585, 2617), 'torch.from_numpy', 'torch.from_numpy', (['self.data[idx]'], {}), '(self.data[idx])\n', (2601, 2617), False, 'import torch\n'), ((2639, 2656), 'torch.tensor', 'torch.tensor', (['[0]'], {}), '([0])\n', (2651, 2656), False, 'import torch\n'), ((4379, 4419), 'os.path.join', 'os.path.join', (['args.model_dir', '"""model.pt"""'], {}), "(args.model_dir, 'model.pt')\n", (4391, 4419), False, 'import os\n'), ((812, 860), 'torch.nn.Linear', 'nn.Linear', (['self.input_dim', 'self.num_hidden_nodes'], {}), '(self.input_dim, self.num_hidden_nodes)\n', (821, 860), True, 'import torch.nn as nn\n'), ((1018, 1062), 'torch.nn.Linear', 'nn.Linear', (['self.size_final', 'self.num_classes'], {}), '(self.size_final, self.num_classes)\n', (1027, 1062), True, 'import torch.nn as nn\n')] |
from django import forms
class ContactForm(forms.Form):
user_name = forms.CharField(max_length=60, label='', required=True, widget=forms.TextInput(attrs={'placeholder': '<NAME>'}))
user_email = forms.EmailField(label='', required=True)
message = forms.CharField(label='', required=True, widget=forms.Textarea(attrs={'placeholder': 'Message', 'rows': '4'}))
def __init__(self, *args, **kwargs):
super(ContactForm, self).__init__(*args, **kwargs)
for visible in self.visible_fields():
if visible.name == "user_email":
visible.field.widget.attrs['class'] = 'validate-required validate-email field-error'
visible.field.widget.attrs['placeholder'] = 'Email Address'
else:
visible.field.widget.attrs['class'] = 'validate-required field-error'
| [
"django.forms.Textarea",
"django.forms.EmailField",
"django.forms.TextInput"
] | [((205, 246), 'django.forms.EmailField', 'forms.EmailField', ([], {'label': '""""""', 'required': '(True)'}), "(label='', required=True)\n", (221, 246), False, 'from django import forms\n'), ((138, 186), 'django.forms.TextInput', 'forms.TextInput', ([], {'attrs': "{'placeholder': '<NAME>'}"}), "(attrs={'placeholder': '<NAME>'})\n", (153, 186), False, 'from django import forms\n'), ((309, 370), 'django.forms.Textarea', 'forms.Textarea', ([], {'attrs': "{'placeholder': 'Message', 'rows': '4'}"}), "(attrs={'placeholder': 'Message', 'rows': '4'})\n", (323, 370), False, 'from django import forms\n')] |
#coding:utf-8
import ftplib
import os
from ...core import constants
from . import base
class Configurer(base.Configurer):
def __init__(self,config):
self._config = config
self._key = constants.KEY_CONFIGURER_INSTANCES
self._results = {}
self.instance = ftplib.FTP()
self.instance.set_debuglevel(0)
self.lines = []
self._ftp_log = []
def connect(self,_config):
self._ftp_log.append(self.instance.connect(_config.get('host'),int(_config.get('port',21))))
_pasv_mode =True if _config.get('mode') ==1 else False
self.instance.set_pasv(_pasv_mode)
self._ftp_log.append(self.instance.login(_config.get('username'),_config.get('password')))
if 'defaut_path' in _config:
self.opendir(_config.get('defaut_path'))
self.__current_dir = self.instance.pwd()
def parse(self):
_config = self._config.get('config_body')
self.connect(_config)
self._results[self._config.get('name')] = self
return [(self._key,self._results)]
def close(self):
if self.instance:
self.instance.quit()
def upload(self,remotepath, localpath):
self._ftp_log = []
if os.path.isdir(localpath):
_files = os.listdir(localpath)
_dirs = []
for _file in _files:
_path = os.path.join(localpath,_file)
_remotepath = '{}/{}'.format(remotepath.rstrip('/'),_file)
if os.path.isdir(_path):
_dirs.append((_remotepath,_path))
else:
self.__upload_file(_remotepath,_path)
#解决目录顺序混乱导致多次重复打开子目录和父目录因此的效率问题
for _remotepath,_path in _dirs:
self.upload(_remotepath, _path)
else:
self.__upload_file(remotepath, localpath)
def __upload_file(self,remotepath, localpath):
bufsize = 1024
dirname,basename = self.__split(remotepath)
self.opendir(dirname)
fp = open(localpath, 'rb')
_result = self.instance.storbinary('STOR ' + basename, fp, bufsize)
print('__upload_file_1',type(_result),_result)
fp.close()
def download(self,remotepath, localpath):
self._ftp_log = []
try:
_result = self.instance.cwd(remotepath)
print('download',type(_result), _result)
self.__download_dir(remotepath,localpath)
except ftplib.error_perm:
self.__download_file(remotepath, localpath)
def __download_file(self,remotepath, localpath):
bufsize = 1024
dirname, basename = self.__split(remotepath)
self.opendir(dirname)
fp = open(localpath, 'wb')
_result = self.instance.retrbinary('RETR ' + basename, fp.write, bufsize)
print('__download_file_1', type(_result), _result)
fp.close()
def __download_dir(self,remotepath, localpath):
if not os.path.exists(localpath):
os.makedirs(localpath)
self.__clear_lines()
self.opendir(remotepath)
_result = self.instance.retrlines("LIST", callback=self.__save_line)
print('__download_dir_1', type(_result), _result)
for line in self.lines:
name = line.split(" ")[-1]
if name in ['.','..']:
continue
_remote_path = '{}/{}'.format(remotepath.rstrip('/'),name)
_local_path = os.path.join(localpath,name)
if line[0] == "d":
self.__download_dir(_remote_path,_local_path)
else:
self.__download_file(_remote_path,_local_path)
def delete(self,remotepath):
self._ftp_log = []
try:
self.instance.cwd(remotepath)
self.delete_dir(remotepath)
except ftplib.error_perm:
_result = self.instance.delete(remotepath)
print('__delete_1', type(_result), _result)
def delete_dir(self,remotepath):
self.__clear_lines()
self.opendir(remotepath)
self.instance.retrlines("LIST", callback=self.__save_line)
for line in self.lines:
name = line.split(" ")[-1]
if name in ['.','..']:
continue
_path = remotepath + "/" + name
if line[0] == "d":
self.delete_dir(_path)
else:
_result = self.instance.delete(_path)
print('delete_dir1', type(_result), _result)
if remotepath !='/':
_result = self.instance.rmd(remotepath)
print('delete_dir_2', type(_result), _result)
def opendir(self,remotepath):
remotepath = remotepath.rstrip(r'/')
if not remotepath or self.__compare_path(remotepath,self.__current_dir):
return True
if self.__current_dir.find(remotepath) ==0:
self.instance.cwd(remotepath)
else:
_diff = remotepath.lstrip(self.__current_dir)
_common = remotepath.rstrip(_diff)
if _common and not self.__compare_path(_common,self.__current_dir):
self._ftp_log.append(self.instance.cwd(_common))
dir_lists = _diff.split(r'/')
for _dir in dir_lists:
try:
self._ftp_log.append(self.instance.cwd(_dir))
except ftplib.error_perm:
try:
_result = self.instance.mkd(_dir)
self._ftp_log.append('mkdir dir {} successful'.format(_result))
except ftplib.error_perm:
pass
self.instance.cwd(_dir)
self.__current_dir = self.instance.pwd()
return True
def __clear_lines(self):
self.lines = []
def __save_line(self, line):
self.lines.append(line)
def __split(self,path):
while(path.find('//') !=-1):
path = path.replace('//','/')
index = path.rfind('/')
if index == -1:
return '',path
return path[:index],path[index + 1:]
def __common_path(self,remotepath,oldpath):
if len(remotepath) < oldpath:
remotepath,oldpath = oldpath,remotepath
for _idnex,_char in enumerate(remotepath):
if _char != oldpath[_idnex]:
break
return remotepath[:_idnex].rstrip(r'/')
def __compare_path(self,path,t_path):
return path.rstrip(r'/') == t_path.rstrip(r'/')
| [
"os.path.exists",
"os.listdir",
"ftplib.FTP",
"os.makedirs",
"os.path.join",
"os.path.isdir"
] | [((290, 302), 'ftplib.FTP', 'ftplib.FTP', ([], {}), '()\n', (300, 302), False, 'import ftplib\n'), ((1235, 1259), 'os.path.isdir', 'os.path.isdir', (['localpath'], {}), '(localpath)\n', (1248, 1259), False, 'import os\n'), ((1282, 1303), 'os.listdir', 'os.listdir', (['localpath'], {}), '(localpath)\n', (1292, 1303), False, 'import os\n'), ((2972, 2997), 'os.path.exists', 'os.path.exists', (['localpath'], {}), '(localpath)\n', (2986, 2997), False, 'import os\n'), ((3011, 3033), 'os.makedirs', 'os.makedirs', (['localpath'], {}), '(localpath)\n', (3022, 3033), False, 'import os\n'), ((3459, 3488), 'os.path.join', 'os.path.join', (['localpath', 'name'], {}), '(localpath, name)\n', (3471, 3488), False, 'import os\n'), ((1384, 1414), 'os.path.join', 'os.path.join', (['localpath', '_file'], {}), '(localpath, _file)\n', (1396, 1414), False, 'import os\n'), ((1508, 1528), 'os.path.isdir', 'os.path.isdir', (['_path'], {}), '(_path)\n', (1521, 1528), False, 'import os\n')] |
# Name: load.py
# Date: June 2019
# Function: goes trough a bookmark file checking the status of each URL
# Input: bookmark file in json format
# Output: new text and json files including those URLs according with their status
import os
import ast
try:
import requests
except:
sys.stderr.write("%s: Please install the required module 'requests'.\n" % sys.argv[0])
sys.exit(1)
try:
import json
except:
# Python < 2.6
try:
import simplejson as json
except:
sys.stderr.write("%s: Please install the required module 'simplejson'.\n" % sys.argv[0])
sys.exit(1)
DIRNAME = "output/"
JSONIN = DIRNAME + "chrome_bookmarks.json"
JSONOK = DIRNAME + "OK.json"
URLERROR = DIRNAME + "error.url"
URL404 = DIRNAME + "404.url"
URLOK = DIRNAME + "OK.url"
RED='\033[0;31m'
NC='\033[0m' # No Color
# Read source bookmark file
input_filename = open(JSONIN, "r")
bookmark_data = json.load(input_filename)
input_filename.close()
# Compute number of elements, including categories and end nodes
elements = len(bookmark_data)
print("Checking", str(elements), "entries in bookmark data")
# Create output/ directory if not exists
try:
os.mkdir(DIRNAME)
print("Directory" , DIRNAME , "created ")
except:
print("Directory" , DIRNAME , "preserved")
# Defining output files
urlError = open(URLERROR,"w")
jsonOK = open(JSONOK,"w")
urlOK = open(URLOK,"w")
url404 = open(URL404,"w")
jsonOK.write("[")
count = 1
for dict in bookmark_data:
# Shredding dict into variables
id = str(dict["id"])
dateAddedLocal = str(dict["dateAddedLocal"])
dateAddedUTC = str(dict["dateAddedUTC"])
index = str(dict["index"])
parentId = dict["parentId"]
string = str(dict["title"])
try:
url = dict["url"]
except:
url = ""
# Tweak title here
title = string.replace('"', '')
#
print("@@@@@@@@@@@@", id)
#print(" L ", dateAddedLocal)
#print(" U ", dateAddedUTC)
#print(" I ", index)
#print(" P ", parentId)
# if there is something in url
if url:
print(" T ", title)
# Try here to access that URL
try:
try:
folder = parent[parentId]
except:
folder = "1"
print(" > [", folder, "] ", url)
req = requests.head(url, timeout=10)
# Attends all & timeout
except:
print(RED + "XXX" + NC)
urlError.write(url + "\n")
else:
status = req.status_code
if status == 404:
print(RED + "404" + NC)
url404.write(url + "\n")
else:
print(" + ", status)
# Original json entries pasted here
# Approach from scratch
# Write to file
jsonOK.write('{\n')
jsonOK.write(' "id": ' + id + ',\n')
jsonOK.write(' "dateAddedLocal": "' + dateAddedLocal + '",\n')
jsonOK.write(' "dateAddedUTC": "' + dateAddedUTC + '",\n')
jsonOK.write(' "index": ' + index + ',\n')
jsonOK.write(' "parentId": ' + parentId + ',\n')
jsonOK.write(' "title": "' + title + '",\n')
jsonOK.write(' "url": "' + url + '"\n')
if count<elements:
jsonOK.write('},\n')
else:
jsonOK.write('}]\n')
urlOK.write(url + '\n')
# When it is only a bookmark folder
# Original json entries be pasted here
else:
lastTitle = "[" + title + "]"
print(lastTitle)
# Create parent dictionary
parent = {}
parent[id] = title
# Write to file
jsonOK.write('{\n')
jsonOK.write(' "id": ' + id + ',\n')
jsonOK.write(' "dateAddedLocal": "' + dateAddedLocal + '",\n')
jsonOK.write(' "dateAddedUTC": "' + dateAddedUTC + '",\n')
jsonOK.write(' "index": ' + index + ',\n')
jsonOK.write(' "parentId": ' + parentId + ',\n')
jsonOK.write(' "title": "' + title + '"\n')
if count<elements:
jsonOK.write('},\n')
else:
jsonOK.write('}]\n')
urlOK.write(url + '\n')
count += 1
jsonOK.close()
urlError.close()
url404.close()
urlOK.close()
| [
"simplejson.load",
"requests.head",
"os.mkdir"
] | [((915, 940), 'simplejson.load', 'json.load', (['input_filename'], {}), '(input_filename)\n', (924, 940), True, 'import simplejson as json\n'), ((1172, 1189), 'os.mkdir', 'os.mkdir', (['DIRNAME'], {}), '(DIRNAME)\n', (1180, 1189), False, 'import os\n'), ((2281, 2311), 'requests.head', 'requests.head', (['url'], {'timeout': '(10)'}), '(url, timeout=10)\n', (2294, 2311), False, 'import requests\n')] |
from tir import Webapp
import unittest
from tir.technologies.apw_internal import ApwInternal
import datetime
import time
DateSystem = datetime.datetime.today().strftime('%d/%m/%Y')
DateVal = datetime.datetime(2120, 5, 17)
"""-------------------------------------------------------------------
/*/{Protheus.doc} PLSA809TestCase
TIR - Casos de testes da rotina Indicacao de Prestador via CallCenter
@author <NAME>
@since 10/2020
@version 12
-------------------------------------------------------------------"""
class PLSA809(unittest.TestCase):
@classmethod
def setUpClass(inst):
inst.oHelper = Webapp()
inst.oHelper.Setup("SIGAPLS","13/10/2020","T1","M SP 01","33")
inst.oHelper.Program("PLSA809")
inst.oHelper.AddParameter("MV_PLCALPG","" , "2")
inst.oHelper.AddParameter("MV_PL809VL","" , ".F.")
inst.oHelper.SetParameters()
def test_PLSA809_001(self):
# INCLUIR
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("M SP 01 ")
self.oHelper.SetValue("B9Y_CARTEI","00010100000001024", check_value = False)
self.oHelper.SetValue("B9Y_CRMCGC","41226834671", check_value = False)
time.sleep(10)
self.oHelper.SetValue("B9Y_NOME","PLS DSAUPC TIR INCLUSAO")
self.oHelper.SetValue("B9Y_EMAIL","<EMAIL>")
self.oHelper.SetValue("B9Y_TEL","11332220000", check_value = False)
self.oHelper.SetValue("B9Y_TIPOAT", "3 - Ambos")
self.oHelper.SetValue("B9Y_OBS", "TESTE 2 TIR INCLUSAO")
# Grid Enderecos
self.oHelper.ClickGridCell("Cód Logr",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
self.oHelper.SetValue("B9V_CODLOG","008")
self.oHelper.ClickGridCell("Endereço",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(10)
self.oHelper.SetValue("B9V_ENDER","ALBERT BARTHOLOME")
self.oHelper.ClickGridCell("Nº",row=1, grid_number=1)
self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(10)
self.oHelper.SetValue("B9V_NUMERO","434")
#self.oHelper.ClickGridCell("Complemento",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_COMEND","SALA 10")
#self.oHelper.ClickGridCell("Bairro",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_BAIRRO","BUTANTA")
#self.oHelper.ClickGridCell("Cód Cidade",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_CODCID","3550308")
#self.oHelper.ClickGridCell("CEP",row=1, grid_number=1)
#self.oHelper.SetKey("Enter", grid=True, grid_number=1)
time.sleep(30)
#self.oHelper.SetValue("B9V_CEP","05541000", check_value = False)
# Grid Especialidades
self.oHelper.ClickGridCell("Cod Espec",row=1, grid_number=2)
self.oHelper.SetKey("Enter", grid=True, grid_number=2)
time.sleep(10)
self.oHelper.SetValue("B9Q_CODESP","002")
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar") # "O beneficiário não possui email cadastrado na base de dados, favor informar o protocolo a ele para que seja possível acompanhar a indicação feita"
self.oHelper.SetButton("Fechar") # "Registro inserido com sucesso."
# VISUALIZAR
self.oHelper.SetButton("Visualizar")
self.oHelper.CheckResult("B9Y_CRMCGC","41226834671")
self.oHelper.SetButton("Fechar")
# INCLUSÃO COM MESMO CRM/CNPJ
self.oHelper.SetButton("Incluir")
self.oHelper.SetBranch("M SP 01 ")
self.oHelper.SetValue("B9Y_CARTEI","00010100000001024", check_value = False)
self.oHelper.SetValue("B9Y_CRMCGC","41226834671", check_value = False)
time.sleep(10)
self.oHelper.SetValue("B9Y_NOME","PLS DSAUPC TIR INCLUSAO 2")
self.oHelper.SetValue("B9Y_EMAIL","<EMAIL>")
self.oHelper.SetValue("B9Y_TEL","11333331234", check_value = False)
self.oHelper.SetValue("B9Y_TIPOAT", "2 - Assistencial")
self.oHelper.SetValue("B9Y_OBS", "TESTE 2 TIR INCLUSAO COM MESMO CRM/CNPJ")
# Grid Especialidades
self.oHelper.ClickGridCell("Indicar",row=1, grid_number=2)
self.oHelper.SetKey("Enter", grid=True, grid_number=2)
self.oHelper.SetButton("Confirmar")
self.oHelper.SetButton("Fechar") # "O beneficiário não possui email cadastrado na base de dados, favor informar o protocolo a ele para que seja possível acompanhar a indicação feita"
self.oHelper.SetButton("Fechar") # "Registro inserido com sucesso."
self.oHelper.SetButton('x')
self.oHelper.AssertTrue()
@classmethod
def tearDownClass(inst):
inst.oHelper.TearDown()
if __name__ == '__main__':
unittest.main() | [
"datetime.datetime",
"unittest.main",
"time.sleep",
"tir.Webapp",
"datetime.datetime.today"
] | [((192, 222), 'datetime.datetime', 'datetime.datetime', (['(2120)', '(5)', '(17)'], {}), '(2120, 5, 17)\n', (209, 222), False, 'import datetime\n'), ((4574, 4589), 'unittest.main', 'unittest.main', ([], {}), '()\n', (4587, 4589), False, 'import unittest\n'), ((135, 160), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (158, 160), False, 'import datetime\n'), ((601, 609), 'tir.Webapp', 'Webapp', ([], {}), '()\n', (607, 609), False, 'from tir import Webapp\n'), ((1113, 1127), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1123, 1127), False, 'import time\n'), ((1722, 1736), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1732, 1736), False, 'import time\n'), ((1910, 1924), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (1920, 1924), False, 'import time\n'), ((2096, 2110), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2106, 2110), False, 'import time\n'), ((2282, 2296), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2292, 2296), False, 'import time\n'), ((2472, 2486), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2482, 2486), False, 'import time\n'), ((2655, 2669), 'time.sleep', 'time.sleep', (['(30)'], {}), '(30)\n', (2665, 2669), False, 'import time\n'), ((2885, 2899), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (2895, 2899), False, 'import time\n'), ((3648, 3662), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (3658, 3662), False, 'import time\n')] |
from __future__ import print_function
from P13pt.mascril.measurement import MeasurementBase
from P13pt.mascril.parameter import Sweep, String, Folder, Boolean
from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter
from P13pt.drivers.zilockin import ZILockin
import time
import numpy as np
import os
class Measurement(MeasurementBase):
params = {
'Vg1s': Sweep([0.0]),
'Vg2s': Sweep([0.0]),
'commongate': Boolean(False),
'Rg1': 100e3,
'Rg2': 100e3,
'Rds': 2.2e3,
'stabilise_time': 0.5,
'comment': String(''),
'data_dir': Folder(r'D:\meso\Desktop\testdata')
}
observables = ['Vg1', 'Vg1m', 'Ileak1', 'Vg2', 'Vg2m', 'Ileak2', 'Vds', 'Vdsm', 'Vdsm_std', 'Rs']
alarms = [
['np.abs(Ileak1) > 1e-8', MeasurementBase.ALARM_CALLCOPS],
['np.abs(Ileak2) > 1e-8', MeasurementBase.ALARM_CALLCOPS],
['np.abs(Vg1-Vg2)', MeasurementBase.ALARM_SHOWVALUE] # useful if we just want to know how much voltage
# is applied between the two gates
]
def measure(self, data_dir, Vg1s, Vg2s, commongate, Rg1, Rg2, Rds, stabilise_time, **kwargs):
print("===================================")
print("Starting acquisition script...")
# initialise instruments
try:
print("Setting up DC sources and voltmeters...")
bilt = Bilt('TCPIP0::192.168.0.2::5025::SOCKET')
self.sourceVg1 = sourceVg1 = BiltVoltageSource(bilt, "I2", "12", "1", 0.01, "Vg1")
self.sourceVg2 = sourceVg2 = BiltVoltageSource(bilt, "I3", "12", "1", 0.01, "Vg2")
self.meterVg1 = meterVg1 = BiltVoltMeter(bilt, "I5;C2", "2", "Vg1m")
self.meterVg2 = meterVg2 = BiltVoltMeter(bilt, "I5;C3", "2", "Vg2m")
print("DC sources and voltmeters are set up.")
except:
print("There has been an error setting up DC sources and voltmeters.")
raise
try:
print("Setting up lock-in amplifier")
self.lockin = lockin = ZILockin()
print("Lock in amplifier is set up.")
except:
print("There has been an error setting up the lock-in amplifier.")
raise
timestamp = time.strftime('%Y-%m-%d_%Hh%Mm%Ss')
# save lock in settings (in case we need to check something later)
lockin.save_settings(os.path.join(data_dir, 'ZIsettings', timestamp+'.ZIsettings.txt'))
# prepare saving data
filename = timestamp + '.txt'
self.prepare_saving(os.path.join(data_dir, filename))
# loops
Vds = lockin.rms_amp
for Vg2 in Vg2s:
sourceVg2.set_voltage(Vg2)
for Vg1 in Vg1s:
if self.flags['quit_requested']:
return locals()
sourceVg1.set_voltage(Vg1)
# stabilise
time.sleep(stabilise_time)
# measure
Vg1m = meterVg1.get_voltage()
Vg2m = meterVg2.get_voltage()
Vdsm, Vdsm_std = lockin.poll_data()
# do calculations
Ileak1 = (Vg1-Vg1m)/Rg1
Ileak2 = (Vg2-Vg2m)/Rg2
Rs = Rds*Vdsm/(Vds-Vdsm)
# save data
self.save_row(locals())
print("Acquisition done.")
return locals()
def tidy_up(self):
self.end_saving()
print("Driving all voltages back to zero...")
self.sourceVg1.set_voltage(0.)
self.sourceVg2.set_voltage(0.)
self.lockin.tidy_up()
if __name__ == "__main__":
m = Measurement()
m.run() | [
"P13pt.mascril.parameter.Folder",
"P13pt.mascril.parameter.String",
"P13pt.drivers.zilockin.ZILockin",
"P13pt.mascril.parameter.Sweep",
"time.strftime",
"os.path.join",
"time.sleep",
"P13pt.mascril.parameter.Boolean",
"P13pt.drivers.bilt.Bilt",
"P13pt.drivers.bilt.BiltVoltageSource",
"P13pt.driv... | [((383, 395), 'P13pt.mascril.parameter.Sweep', 'Sweep', (['[0.0]'], {}), '([0.0])\n', (388, 395), False, 'from P13pt.mascril.parameter import Sweep, String, Folder, Boolean\n'), ((413, 425), 'P13pt.mascril.parameter.Sweep', 'Sweep', (['[0.0]'], {}), '([0.0])\n', (418, 425), False, 'from P13pt.mascril.parameter import Sweep, String, Folder, Boolean\n'), ((449, 463), 'P13pt.mascril.parameter.Boolean', 'Boolean', (['(False)'], {}), '(False)\n', (456, 463), False, 'from P13pt.mascril.parameter import Sweep, String, Folder, Boolean\n'), ((581, 591), 'P13pt.mascril.parameter.String', 'String', (['""""""'], {}), "('')\n", (587, 591), False, 'from P13pt.mascril.parameter import Sweep, String, Folder, Boolean\n'), ((613, 650), 'P13pt.mascril.parameter.Folder', 'Folder', (['"""D:\\\\meso\\\\Desktop\\\\testdata"""'], {}), "('D:\\\\meso\\\\Desktop\\\\testdata')\n", (619, 650), False, 'from P13pt.mascril.parameter import Sweep, String, Folder, Boolean\n'), ((2338, 2373), 'time.strftime', 'time.strftime', (['"""%Y-%m-%d_%Hh%Mm%Ss"""'], {}), "('%Y-%m-%d_%Hh%Mm%Ss')\n", (2351, 2373), False, 'import time\n'), ((1462, 1503), 'P13pt.drivers.bilt.Bilt', 'Bilt', (['"""TCPIP0::192.168.0.2::5025::SOCKET"""'], {}), "('TCPIP0::192.168.0.2::5025::SOCKET')\n", (1466, 1503), False, 'from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter\n'), ((1545, 1598), 'P13pt.drivers.bilt.BiltVoltageSource', 'BiltVoltageSource', (['bilt', '"""I2"""', '"""12"""', '"""1"""', '(0.01)', '"""Vg1"""'], {}), "(bilt, 'I2', '12', '1', 0.01, 'Vg1')\n", (1562, 1598), False, 'from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter\n'), ((1640, 1693), 'P13pt.drivers.bilt.BiltVoltageSource', 'BiltVoltageSource', (['bilt', '"""I3"""', '"""12"""', '"""1"""', '(0.01)', '"""Vg2"""'], {}), "(bilt, 'I3', '12', '1', 0.01, 'Vg2')\n", (1657, 1693), False, 'from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter\n'), ((1733, 1774), 'P13pt.drivers.bilt.BiltVoltMeter', 'BiltVoltMeter', (['bilt', '"""I5;C2"""', '"""2"""', '"""Vg1m"""'], {}), "(bilt, 'I5;C2', '2', 'Vg1m')\n", (1746, 1774), False, 'from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter\n'), ((1814, 1855), 'P13pt.drivers.bilt.BiltVoltMeter', 'BiltVoltMeter', (['bilt', '"""I5;C3"""', '"""2"""', '"""Vg2m"""'], {}), "(bilt, 'I5;C3', '2', 'Vg2m')\n", (1827, 1855), False, 'from P13pt.drivers.bilt import Bilt, BiltVoltageSource, BiltVoltMeter\n'), ((2143, 2153), 'P13pt.drivers.zilockin.ZILockin', 'ZILockin', ([], {}), '()\n', (2151, 2153), False, 'from P13pt.drivers.zilockin import ZILockin\n'), ((2479, 2546), 'os.path.join', 'os.path.join', (['data_dir', '"""ZIsettings"""', "(timestamp + '.ZIsettings.txt')"], {}), "(data_dir, 'ZIsettings', timestamp + '.ZIsettings.txt')\n", (2491, 2546), False, 'import os\n'), ((2643, 2675), 'os.path.join', 'os.path.join', (['data_dir', 'filename'], {}), '(data_dir, filename)\n', (2655, 2675), False, 'import os\n'), ((2990, 3016), 'time.sleep', 'time.sleep', (['stabilise_time'], {}), '(stabilise_time)\n', (3000, 3016), False, 'import time\n')] |
# NOTE: Following example requires boto3 package.
import boto3
from InquirerPy import prompt
from InquirerPy.exceptions import InvalidArgument
from InquirerPy.validator import PathValidator
client = boto3.client("s3")
def get_bucket(_):
return [bucket["Name"] for bucket in client.list_buckets()["Buckets"]]
def walk_s3_bucket(result):
response = []
paginator = client.get_paginator("list_objects")
for result in paginator.paginate(Bucket=result["bucket"]):
for file in result["Contents"]:
response.append(file["Key"])
return response
def is_upload(result):
return result[0] == "Upload"
questions = [
{
"message": "Select an S3 action:",
"type": "list",
"choices": ["Upload", "Download"],
},
{
"message": "Enter the filepath to upload:",
"type": "filepath",
"when": is_upload,
"validate": PathValidator(),
"only_files": True,
},
{
"message": "Select a bucket:",
"type": "fuzzy",
"choices": get_bucket,
"name": "bucket",
"spinner_enable": True,
},
{
"message": "Select files to download:",
"type": "fuzzy",
"when": lambda _: not is_upload(_),
"choices": walk_s3_bucket,
"multiselect": True,
"spinner_enable": True,
},
{
"message": "Enter destination folder:",
"type": "filepath",
"when": lambda _: not is_upload(_),
"only_directories": True,
"validate": PathValidator(),
},
{"message": "Confirm?", "type": "confirm", "default": False},
]
try:
result = prompt(questions, vi_mode=True)
except InvalidArgument:
print("No available choices")
# Download or Upload the file based on result ...
| [
"InquirerPy.prompt",
"InquirerPy.validator.PathValidator",
"boto3.client"
] | [((201, 219), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (213, 219), False, 'import boto3\n'), ((1646, 1677), 'InquirerPy.prompt', 'prompt', (['questions'], {'vi_mode': '(True)'}), '(questions, vi_mode=True)\n', (1652, 1677), False, 'from InquirerPy import prompt\n'), ((911, 926), 'InquirerPy.validator.PathValidator', 'PathValidator', ([], {}), '()\n', (924, 926), False, 'from InquirerPy.validator import PathValidator\n'), ((1535, 1550), 'InquirerPy.validator.PathValidator', 'PathValidator', ([], {}), '()\n', (1548, 1550), False, 'from InquirerPy.validator import PathValidator\n')] |
#!/bin/sh
''''[ ! -z $VIRTUAL_ENV ] && exec python -u -- "$0" ${1+"$@"}; command -v python3 > /dev/null && exec python3 -u -- "$0" ${1+"$@"}; exec python2 -u -- "$0" ${1+"$@"} # '''
import sys
import os
import argparse
HERE = os.path.dirname(__file__)
ROOT = os.path.abspath(os.path.join(HERE, ".."))
sys.path.insert(0, ROOT)
import paella
#----------------------------------------------------------------------------------------------
class SystemSetup(paella.Setup):
def __init__(self, nop=False):
paella.Setup.__init__(self, nop)
def common_first(self):
# self.install("")
# self.group_install("")
# self.setup_pip()
# self.pip_install("")
print("common_first")
def debian_compat(self):
print("debian_compat")
def redhat_compat(self):
print("redhat_compat")
def fedora(self):
print("fedora")
def macos(self):
print("macos")
def common_last(self):
print("common_last")
#----------------------------------------------------------------------------------------------
parser = argparse.ArgumentParser(description='Set up system for build.')
parser.add_argument('-n', '--nop', action="store_true", help='no operation')
# parser.add_argument('--bool', action="store_true", help="flag")
# parser.add_argument('--int', type=int, default=1, help='number')
# parser.add_argument('--str', type=str, default='str', help='string')
args = parser.parse_args()
SystemSetup(nop = args.nop).setup()
| [
"sys.path.insert",
"argparse.ArgumentParser",
"os.path.join",
"os.path.dirname",
"paella.Setup.__init__"
] | [((228, 253), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (243, 253), False, 'import os\n'), ((303, 327), 'sys.path.insert', 'sys.path.insert', (['(0)', 'ROOT'], {}), '(0, ROOT)\n', (318, 327), False, 'import sys\n'), ((1104, 1167), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Set up system for build."""'}), "(description='Set up system for build.')\n", (1127, 1167), False, 'import argparse\n'), ((277, 301), 'os.path.join', 'os.path.join', (['HERE', '""".."""'], {}), "(HERE, '..')\n", (289, 301), False, 'import os\n'), ((516, 548), 'paella.Setup.__init__', 'paella.Setup.__init__', (['self', 'nop'], {}), '(self, nop)\n', (537, 548), False, 'import paella\n')] |
import pyHook
from threading import Timer
import win32gui
import logging
class blockInput():
def OnKeyboardEvent(self,event):
return False
def OnMouseEvent(self,event):
return False
def unblock(self):
logging.info(" -- Unblock!")
if self.t.is_alive():
self.t.cancel()
try: self.hm.UnhookKeyboard()
except: pass
try: self.hm.UnhookMouse()
except: pass
def block(self, timeout = 10, keyboard = True, mouse = True):
self.t = Timer(timeout, self.unblock)
self.t.start()
logging.info(" -- Block!")
if mouse:
self.hm.MouseAll = self.OnMouseEvent
self.hm.HookMouse()
if keyboard:
self.hm.KeyAll = self.OnKeyboardEvent
self.hm.HookKeyboard()
win32gui.PumpWaitingMessages()
def __init__(self):
self.hm = pyHook.HookManager()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
block = blockInput()
block.block()
import time
t0 = time.time()
while time.time() - t0 < 10:
time.sleep(1)
print(time.time() - t0)
block.unblock()
logging.info("Done.")
| [
"logging.basicConfig",
"win32gui.PumpWaitingMessages",
"threading.Timer",
"pyHook.HookManager",
"logging.info",
"time.sleep",
"time.time"
] | [((992, 1031), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (1011, 1031), False, 'import logging\n'), ((1108, 1119), 'time.time', 'time.time', ([], {}), '()\n', (1117, 1119), False, 'import time\n'), ((1238, 1259), 'logging.info', 'logging.info', (['"""Done."""'], {}), "('Done.')\n", (1250, 1259), False, 'import logging\n'), ((253, 281), 'logging.info', 'logging.info', (['""" -- Unblock!"""'], {}), "(' -- Unblock!')\n", (265, 281), False, 'import logging\n'), ((548, 576), 'threading.Timer', 'Timer', (['timeout', 'self.unblock'], {}), '(timeout, self.unblock)\n', (553, 576), False, 'from threading import Timer\n'), ((612, 638), 'logging.info', 'logging.info', (['""" -- Block!"""'], {}), "(' -- Block!')\n", (624, 638), False, 'import logging\n'), ((859, 889), 'win32gui.PumpWaitingMessages', 'win32gui.PumpWaitingMessages', ([], {}), '()\n', (887, 889), False, 'import win32gui\n'), ((936, 956), 'pyHook.HookManager', 'pyHook.HookManager', ([], {}), '()\n', (954, 956), False, 'import pyHook\n'), ((1163, 1176), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1173, 1176), False, 'import time\n'), ((1131, 1142), 'time.time', 'time.time', ([], {}), '()\n', (1140, 1142), False, 'import time\n'), ((1192, 1203), 'time.time', 'time.time', ([], {}), '()\n', (1201, 1203), False, 'import time\n')] |
"""
This python code demonstrates an edge-based active contour model as an application of the
Distance Regularized Level Set Evolution (DRLSE) formulation in the following paper:
<NAME>, <NAME>, <NAME>, <NAME>, "Distance Regularized Level Set Evolution and Its Application to Image Segmentation",
IEEE Trans. Image Processing, vol. 19 (12), pp. 3243-3254, 2010.
Author: <NAME>
E-mail: <EMAIL>
Released Under MIT License
"""
import numpy as np
from skimage.io import imread
from lv_set.find_lsf import find_lsf
from lv_set.potential_func import *
from lv_set.show_fig import draw_all
def gourd_params():
img = imread('gourd.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[24:35, 19:25] = -c0
initial_lsf[24:35, 39:50] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 1, # time step
'iter_inner': 10,
'iter_outer': 30,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': -3, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 0.8, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
def two_cells_params():
img = imread('twocells.bmp', True)
img = np.interp(img, [np.min(img), np.max(img)], [0, 255])
# initialize LSF as binary step function
c0 = 2
initial_lsf = c0 * np.ones(img.shape)
# generate the initial region R0 as two rectangles
initial_lsf[9:55, 9:75] = -c0
# parameters
return {
'img': img,
'initial_lsf': initial_lsf,
'timestep': 5, # time step
'iter_inner': 5,
'iter_outer': 40,
'lmda': 5, # coefficient of the weighted length term L(phi)
'alfa': 1.5, # coefficient of the weighted area term A(phi)
'epsilon': 1.5, # parameter that specifies the width of the DiracDelta function
'sigma': 1.5, # scale parameter in Gaussian kernel
'potential_function': DOUBLE_WELL,
}
params = gourd_params()
# params = two_cells_params()
phi = find_lsf(**params)
print('Show final output')
draw_all(phi, params['img'], 10)
| [
"numpy.ones",
"numpy.max",
"skimage.io.imread",
"lv_set.find_lsf.find_lsf",
"numpy.min",
"lv_set.show_fig.draw_all"
] | [((2340, 2358), 'lv_set.find_lsf.find_lsf', 'find_lsf', ([], {}), '(**params)\n', (2348, 2358), False, 'from lv_set.find_lsf import find_lsf\n'), ((2387, 2419), 'lv_set.show_fig.draw_all', 'draw_all', (['phi', "params['img']", '(10)'], {}), "(phi, params['img'], 10)\n", (2395, 2419), False, 'from lv_set.show_fig import draw_all\n'), ((627, 652), 'skimage.io.imread', 'imread', (['"""gourd.bmp"""', '(True)'], {}), "('gourd.bmp', True)\n", (633, 652), False, 'from skimage.io import imread\n'), ((1488, 1516), 'skimage.io.imread', 'imread', (['"""twocells.bmp"""', '(True)'], {}), "('twocells.bmp', True)\n", (1494, 1516), False, 'from skimage.io import imread\n'), ((796, 814), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (803, 814), True, 'import numpy as np\n'), ((1660, 1678), 'numpy.ones', 'np.ones', (['img.shape'], {}), '(img.shape)\n', (1667, 1678), True, 'import numpy as np\n'), ((679, 690), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (685, 690), True, 'import numpy as np\n'), ((692, 703), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (698, 703), True, 'import numpy as np\n'), ((1543, 1554), 'numpy.min', 'np.min', (['img'], {}), '(img)\n', (1549, 1554), True, 'import numpy as np\n'), ((1556, 1567), 'numpy.max', 'np.max', (['img'], {}), '(img)\n', (1562, 1567), True, 'import numpy as np\n')] |
#!/usr/bin/python
# vim: set fileencoding=utf-8 :
# Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Web-based Vogon configuration file editor.
This utility starts a web server and opens a local web page in the user's
default browser, which contains a GUI to edit the JSON configuration file.
"""
from os import path
import sys
program_dir = path.abspath(path.dirname(__file__))
sys.path.insert(0, program_dir + '/third_party/bottle/')
import argparse
from bottle import get, post, delete, request, route, run, static_file, response
import codecs
from io import StringIO
import csv
from distutils.dir_util import copy_tree
import http.client
import json
import os
import platform
import re
import shutil
import subprocess
import tempfile
import threading
import time
import urllib
import zipfile
import vogon
import yt_api
import google_ads_editor_csv as g_ads_editor
################################################################################
# YOUTUBE AUTHENTICATION
################################################################################
@post('/api/youtube_auth/get_device_code')
def get_device_code():
yt_status, yt_response = yt_api.get_device_code()
response.status = yt_status
return yt_response
@post('/api/youtube_auth/check_device_authorization')
def check_device_authorization():
yt_status, yt_response = yt_api.check_device_authorization(request.json['code'])
response.status = yt_status
return yt_response
@post('/api/youtube/list_channels')
def list_channels():
_, refresh_token_response = yt_api.refresh_access_token(request.json['refresh_token'])
new_access_token = json.loads(refresh_token_response)['access_token']
yt_status, yt_response = yt_api.list_channels(new_access_token)
yt_response_content = json.loads(yt_response)
response.status = yt_status
yt_response_content['access_token'] = new_access_token
yt_response_content['refresh_token'] = request.json['refresh_token']
return yt_response_content
@post('/api/youtube/start_video_upload')
def start_video_upload():
return yt_api.start_video_upload(request.json)
@post('/api/youtube/remove_uploaded_videos')
def remove_uploaded_videos():
return yt_api.remove_uploaded_videos(request.json)
@get('/api/youtube/read_log/<project_id>')
def read_log(project_id):
return yt_api.read_log(project_id, 1)
################################################################################
# CONFIG ACTIONS
################################################################################
@get('/api/projects/<project_folder>/config')
def get_config(project_folder):
config_file = os.path.join("projects", project_folder, "config.json")
return static_file(config_file, root='./')
@get('/api/sheets_client_id')
def get_secrest_json():
secret_file = "credentials/oauth_2_client_secret.json"
with open(secret_file) as s_file:
ctn = json.loads(s_file.read())
s_file.close()
return json.dumps(ctn["web"]["client_id"])
@post('/api/projects/<project_folder>/config')
def post_config(project_folder):
config_file = os.path.join("projects", project_folder, "config.json")
with open(config_file, 'w') as f:
json.dump(request.json, f, indent=2)
f.close()
################################################################################
# VIDEO GENERATION ACTIONS
################################################################################
@get('/api/projects/<project_folder>/preview/row/<index>')
def generate_preview(project_folder, index):
config_file = os.path.join("projects", project_folder, "config.json")
video = vogon.generate_preview(config_file, int(index),
project_dir=project_folder)
return static_file(video, root='./', download=video)
@post('/api/projects/<project_id>/generate_all_videos')
def generate_all_variations(project_id):
arg = (project_id,)
t = threading.Thread(target=vogon.generate_all_video_variations, args=arg)
t.start()
return json.dumps("Started")
@get('/api/projects/<project_id>/cancel_video_generation')
def cancel_video_generation(project_id):
vogon.stop_video_generation(project_id)
return json.dumps("Canceled")
@get('/api/projects/<project_id>/update_on_video_generation')
def update_on_video_generation(project_id):
started_at, current_state = vogon.get_video_generation_percent(project_id)
current_state = current_state.decode('utf-8') if current_state != "--" else ""
return json.dumps({
"started_at": str(started_at),
"current_state": current_state
})
################################################################################
# PROJECT MANAGEMENT ACTIONS
################################################################################
@get('/api/projects/list')
def get_available_projects():
if not os.path.exists("projects"):
os.makedirs("projects")
dirs = os.listdir("projects")
output = []
for d in dirs:
if d[0] != ".":
output.append({
"name": d,
"size": du("projects/"+d)
})
return json.dumps(output)
@post('/api/projects/new/name/<project_folder>')
def copy_base_project(project_folder):
project_folder = re.sub(r'[^\w_]', '', project_folder)
project_dir = os.path.join("projects", project_folder)
base_dir = "base_project/"
is_taken = os.path.isdir(project_dir)
if not is_taken:
# copies base project
copy_tree(base_dir, project_dir)
# fixes config file
conf_file_path = os.path.join(project_dir, "config.json")
data = ""
with open(conf_file_path, 'r') as config_file:
data = config_file.read().replace("{{project_id}}", project_folder)
config_file.close()
with open(conf_file_path, 'w') as config_file:
config_file.write(data)
time.sleep(2)
# returns success
return json.dumps({"success":True, "project": project_folder})
else:
return json.dumps({"success":False, "project": project_folder})
@post('/api/projects/<project_folder>/clear')
def clear_project(project_folder):
project_folder = os.path.join("projects", project_folder, "output")
shutil.rmtree(project_folder)
os.mkdir(project_folder)
return json.dumps("True")
@post('/api/projects/<project_folder>/delete')
def delete_project(project_folder):
project_folder = os.path.join("projects", project_folder)
shutil.rmtree(project_folder)
return json.dumps("True")
################################################################################
# ASSETS MANAGEMENT ACTIONS
################################################################################
@get('/api/projects/<project_id>/google_ads_editor_file')
def generate_and_download_editor_file(project_id):
(uploaded, missing),error = g_ads_editor.build_csv(project_id)
if error is not None:
return json.dumps({"msg": "ERROR generating CSV: %s" % error})
elif missing >0:
return json.dumps({
"msg": "CSV file has %s of %s videos, please make sure to generate "
"all videos and upload all of them to YouTube Before "
"downloading the Editor CSV." % (uploaded, missing)
})
else:
feed_name = "google_ads_editor.csv"
feed_path = os.path.join("projects", project_id)
file_path = os.path.join(feed_path, feed_name)
filename = str(os.path.basename(file_path))
# renders to browser as file to download, not to display.
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-Disposition'] = 'attachment; filename="%s"'
response.headers['Content-Disposition'] %= (filename)
return static_file(feed_name, root=feed_path, download=filename)
@post('/api/projects/<project_id>/feed_content_upload')
def feed_content_upload(project_id):
feed_uri = "projects/%s/feed.csv" % project_id
feed_data = json.loads(request.body.read())['feed_data']
queue = StringIO()
writer = csv.writer(queue, dialect=csv.excel)
encoder = codecs.getincrementalencoder('utf-8')()
with open(feed_uri,'wb') as feed_file:
for feed_row in feed_data:
writer.writerow([v for v in feed_row])
data = queue.getvalue()
feed_file.write(encoder.encode(data))
queue.truncate(0)
feed_file.close()
return json.dumps({'success': True})
@get('/api/projects/<project_id>/fonts')
def get_font_list(project_id):
matches = []
font_dirs = []
font_dirs.append("projects/%s/assets" % project_id)
font_dirs.append("/Library/Fonts")
font_dirs.append("/System/Library/Fonts")
font_dirs.append("/usr/share/fonts")
font_dirs.append("~/fonts")
font_dirs.append("~/.fonts")
for font_dir in font_dirs:
try:
for root, dirnames, filenames in os.walk(font_dir):
for filename in filenames:
font_file = os.path.join(root, filename)
if filename[-4:] in (".ttf",".otf"):
beaut_name = filename[:-4].replace("_"," ").replace("-"," ")
beaut_name = " ".join(re.findall('[A-Z][^A-Z]*', beaut_name))
if not beaut_name:
beaut_name = filename[:-4].replace("_"," ").replace("-"," ").split(" ")
beaut_name = " ".join([w.capitalize() for w in beaut_name])
matches.append([beaut_name, font_file])
except Exception as e:
print("error loading fonts: "+e)
return json.dumps(sorted(matches))
@get('/api/projects/<project_id>/assets')
def get_assets_list(project_id):
assets_path = "projects/%s/assets" % project_id
matches = []
for root, dirnames, filenames in os.walk(assets_path):
for filename in filenames:
asset = os.path.join(root, filename)
asset = asset.replace(assets_path, '')[1:]
matches.append(asset)
return json.dumps(sorted(matches))
@post('/api/projects/<project_id>/assets')
def post_single_asset(project_id):
assets_path = "projects/%s/assets/" % project_id
with PostedFileWriter(request) as file_path:
if file_path[-4:] == '.zip':
with zipfile.ZipFile(file_path, 'r') as f:
f.extractall(assets_path)
else:
_, asset_name = os.path.split(file_path)
new_asset_path = assets_path + asset_name
shutil.copy(file_path, new_asset_path)
return get_assets_list(project_id)
def move_file(origin_path, dest_path):
upload = bottle.request.files.get('file')
upload.save(dest_path)
return 1
with open(origin_path, 'r') as of:
with open(dest_path, 'w') as df:
df.write(of.read())
df.close()
of.close()
@delete('/api/projects/<project_id>/assets/')
def delete_asset(project_id):
asset_name = request.query['asset_path']
try:
assets_path = "projects/%s/assets/" % project_id
asset_full_path = os.path.join(assets_path, asset_name)
os.unlink(asset_full_path)
except Exception as e: # pylint: disable=broad-except
error_msg = 'Error unlinking file %s.\nError: %s.'
error_msg %= (request.body, e)
print(error_msg)
return get_assets_list(project_id)
@get('/api/projects/<project_id>/download/assets/')
def download_asset(project_id):
asset_name = request.query['asset_path']
assets_path = "projects/%s/assets/" % project_id
file_path = os.path.join(assets_path, asset_name)
filename = str(os.path.basename(file_path))
# renders to browser as file to download, not to display.
response.headers['Content-Type'] = 'application/octet-stream'
response.headers['Content-Disposition'] = 'attachment; filename="%s"'
response.headers['Content-Disposition'] %= (filename)
return static_file(asset_name, root=assets_path, download=filename)
################################################################################
# Main page Actions
################################################################################
@get('/#!/project/<project_folder>')
def get_index(project_folder):
filename = 'html/index.html'
return get_static(filename)
@get('/')
def get_index():
filename = 'html/index.html'
return get_static(filename)
################################################################################
# Static Files
################################################################################
@get('/static/<filepath:path>')
def get_static(filepath):
static_dir = program_dir + '/static/'
return static_file(filepath, root=static_dir)
################################################################################
# Helpers
################################################################################
def du(path):
"""disk usage in human readable format (e.g. '2,1GB')"""
return subprocess.check_output(['du','-sh', path]).split()[0].decode('utf-8')
class PostedFileWriter(object):
"""Defines a resource to use on 'with' statements to clean up after upload."""
# not used
def __init__(self, request):
self.request = request
# not used
def __enter__(self):
self.temp_dir = tempfile.mkdtemp()
# ngFileUpload sends the content in the "file" parameter by default
input_file = request.files.get("file")
if input_file.filename:
file_name = input_file.filename
file_path = os.path.join(self.temp_dir, file_name)
buffer_size = 2**16 # 65k
with open(file_path, 'wb') as output_file:
buf = input_file.file.read(buffer_size)
while buf:
output_file.write(buf)
buf = input_file.file.read(buffer_size)
output_file.close()
return file_path
else:
return None
# not used
def __exit__(self, type_, value, traceback_):
shutil.rmtree(self.temp_dir)
################################################################################
# Main
################################################################################
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--debug",
help="Enable debug mode",
action="store_true")
args = parser.parse_args()
run(host='0.0.0.0', port=8080, debug=args.debug)
if __name__=='__main__':
main()
| [
"sys.path.insert",
"distutils.dir_util.copy_tree",
"zipfile.ZipFile",
"yt_api.remove_uploaded_videos",
"time.sleep",
"yt_api.start_video_upload",
"bottle.get",
"os.walk",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"bottle.post",
"json.dumps",
"os.path.split",
"os.path.isd... | [((931, 987), 'sys.path.insert', 'sys.path.insert', (['(0)', "(program_dir + '/third_party/bottle/')"], {}), "(0, program_dir + '/third_party/bottle/')\n", (946, 987), False, 'import sys\n'), ((1611, 1652), 'bottle.post', 'post', (['"""/api/youtube_auth/get_device_code"""'], {}), "('/api/youtube_auth/get_device_code')\n", (1615, 1652), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((1782, 1834), 'bottle.post', 'post', (['"""/api/youtube_auth/check_device_authorization"""'], {}), "('/api/youtube_auth/check_device_authorization')\n", (1786, 1834), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((2006, 2040), 'bottle.post', 'post', (['"""/api/youtube/list_channels"""'], {}), "('/api/youtube/list_channels')\n", (2010, 2040), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((2530, 2569), 'bottle.post', 'post', (['"""/api/youtube/start_video_upload"""'], {}), "('/api/youtube/start_video_upload')\n", (2534, 2569), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((2648, 2691), 'bottle.post', 'post', (['"""/api/youtube/remove_uploaded_videos"""'], {}), "('/api/youtube/remove_uploaded_videos')\n", (2652, 2691), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((2777, 2818), 'bottle.get', 'get', (['"""/api/youtube/read_log/<project_id>"""'], {}), "('/api/youtube/read_log/<project_id>')\n", (2780, 2818), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((3067, 3111), 'bottle.get', 'get', (['"""/api/projects/<project_folder>/config"""'], {}), "('/api/projects/<project_folder>/config')\n", (3070, 3111), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((3263, 3291), 'bottle.get', 'get', (['"""/api/sheets_client_id"""'], {}), "('/api/sheets_client_id')\n", (3266, 3291), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((3511, 3556), 'bottle.post', 'post', (['"""/api/projects/<project_folder>/config"""'], {}), "('/api/projects/<project_folder>/config')\n", (3515, 3556), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((3957, 4014), 'bottle.get', 'get', (['"""/api/projects/<project_folder>/preview/row/<index>"""'], {}), "('/api/projects/<project_folder>/preview/row/<index>')\n", (3960, 4014), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((4308, 4362), 'bottle.post', 'post', (['"""/api/projects/<project_id>/generate_all_videos"""'], {}), "('/api/projects/<project_id>/generate_all_videos')\n", (4312, 4362), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((4548, 4605), 'bottle.get', 'get', (['"""/api/projects/<project_id>/cancel_video_generation"""'], {}), "('/api/projects/<project_id>/cancel_video_generation')\n", (4551, 4605), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((4723, 4783), 'bottle.get', 'get', (['"""/api/projects/<project_id>/update_on_video_generation"""'], {}), "('/api/projects/<project_id>/update_on_video_generation')\n", (4726, 4783), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((5280, 5305), 'bottle.get', 'get', (['"""/api/projects/list"""'], {}), "('/api/projects/list')\n", (5283, 5305), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((5598, 5645), 'bottle.post', 'post', (['"""/api/projects/new/name/<project_folder>"""'], {}), "('/api/projects/new/name/<project_folder>')\n", (5602, 5645), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((6467, 6511), 'bottle.post', 'post', (['"""/api/projects/<project_folder>/clear"""'], {}), "('/api/projects/<project_folder>/clear')\n", (6471, 6511), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((6707, 6752), 'bottle.post', 'post', (['"""/api/projects/<project_folder>/delete"""'], {}), "('/api/projects/<project_folder>/delete')\n", (6711, 6752), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((7103, 7159), 'bottle.get', 'get', (['"""/api/projects/<project_id>/google_ads_editor_file"""'], {}), "('/api/projects/<project_id>/google_ads_editor_file')\n", (7106, 7159), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((8201, 8255), 'bottle.post', 'post', (['"""/api/projects/<project_id>/feed_content_upload"""'], {}), "('/api/projects/<project_id>/feed_content_upload')\n", (8205, 8255), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((8800, 8839), 'bottle.get', 'get', (['"""/api/projects/<project_id>/fonts"""'], {}), "('/api/projects/<project_id>/fonts')\n", (8803, 8839), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((9862, 9902), 'bottle.get', 'get', (['"""/api/projects/<project_id>/assets"""'], {}), "('/api/projects/<project_id>/assets')\n", (9865, 9902), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((10248, 10289), 'bottle.post', 'post', (['"""/api/projects/<project_id>/assets"""'], {}), "('/api/projects/<project_id>/assets')\n", (10252, 10289), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((11008, 11052), 'bottle.delete', 'delete', (['"""/api/projects/<project_id>/assets/"""'], {}), "('/api/projects/<project_id>/assets/')\n", (11014, 11052), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((11484, 11534), 'bottle.get', 'get', (['"""/api/projects/<project_id>/download/assets/"""'], {}), "('/api/projects/<project_id>/download/assets/')\n", (11487, 11534), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((12267, 12302), 'bottle.get', 'get', (['"""/#!/project/<project_folder>"""'], {}), "('/#!/project/<project_folder>')\n", (12270, 12302), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((12401, 12409), 'bottle.get', 'get', (['"""/"""'], {}), "('/')\n", (12404, 12409), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((12672, 12702), 'bottle.get', 'get', (['"""/static/<filepath:path>"""'], {}), "('/static/<filepath:path>')\n", (12675, 12702), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((907, 929), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (919, 929), False, 'from os import path\n'), ((1703, 1727), 'yt_api.get_device_code', 'yt_api.get_device_code', ([], {}), '()\n', (1725, 1727), False, 'import yt_api\n'), ((1896, 1951), 'yt_api.check_device_authorization', 'yt_api.check_device_authorization', (["request.json['code']"], {}), "(request.json['code'])\n", (1929, 1951), False, 'import yt_api\n'), ((2092, 2150), 'yt_api.refresh_access_token', 'yt_api.refresh_access_token', (["request.json['refresh_token']"], {}), "(request.json['refresh_token'])\n", (2119, 2150), False, 'import yt_api\n'), ((2251, 2289), 'yt_api.list_channels', 'yt_api.list_channels', (['new_access_token'], {}), '(new_access_token)\n', (2271, 2289), False, 'import yt_api\n'), ((2314, 2337), 'json.loads', 'json.loads', (['yt_response'], {}), '(yt_response)\n', (2324, 2337), False, 'import json\n'), ((2605, 2644), 'yt_api.start_video_upload', 'yt_api.start_video_upload', (['request.json'], {}), '(request.json)\n', (2630, 2644), False, 'import yt_api\n'), ((2731, 2774), 'yt_api.remove_uploaded_videos', 'yt_api.remove_uploaded_videos', (['request.json'], {}), '(request.json)\n', (2760, 2774), False, 'import yt_api\n'), ((2854, 2884), 'yt_api.read_log', 'yt_api.read_log', (['project_id', '(1)'], {}), '(project_id, 1)\n', (2869, 2884), False, 'import yt_api\n'), ((3160, 3215), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder', '"""config.json"""'], {}), "('projects', project_folder, 'config.json')\n", (3172, 3215), False, 'import os\n'), ((3225, 3260), 'bottle.static_file', 'static_file', (['config_file'], {'root': '"""./"""'}), "(config_file, root='./')\n", (3236, 3260), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((3473, 3508), 'json.dumps', 'json.dumps', (["ctn['web']['client_id']"], {}), "(ctn['web']['client_id'])\n", (3483, 3508), False, 'import json\n'), ((3608, 3663), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder', '"""config.json"""'], {}), "('projects', project_folder, 'config.json')\n", (3620, 3663), False, 'import os\n'), ((4076, 4131), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder', '"""config.json"""'], {}), "('projects', project_folder, 'config.json')\n", (4088, 4131), False, 'import os\n'), ((4260, 4305), 'bottle.static_file', 'static_file', (['video'], {'root': '"""./"""', 'download': 'video'}), "(video, root='./', download=video)\n", (4271, 4305), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((4432, 4502), 'threading.Thread', 'threading.Thread', ([], {'target': 'vogon.generate_all_video_variations', 'args': 'arg'}), '(target=vogon.generate_all_video_variations, args=arg)\n', (4448, 4502), False, 'import threading\n'), ((4524, 4545), 'json.dumps', 'json.dumps', (['"""Started"""'], {}), "('Started')\n", (4534, 4545), False, 'import json\n'), ((4649, 4688), 'vogon.stop_video_generation', 'vogon.stop_video_generation', (['project_id'], {}), '(project_id)\n', (4676, 4688), False, 'import vogon\n'), ((4698, 4720), 'json.dumps', 'json.dumps', (['"""Canceled"""'], {}), "('Canceled')\n", (4708, 4720), False, 'import json\n'), ((4858, 4904), 'vogon.get_video_generation_percent', 'vogon.get_video_generation_percent', (['project_id'], {}), '(project_id)\n', (4892, 4904), False, 'import vogon\n'), ((5410, 5432), 'os.listdir', 'os.listdir', (['"""projects"""'], {}), "('projects')\n", (5420, 5432), False, 'import os\n'), ((5577, 5595), 'json.dumps', 'json.dumps', (['output'], {}), '(output)\n', (5587, 5595), False, 'import json\n'), ((5704, 5741), 're.sub', 're.sub', (['"""[^\\\\w_]"""', '""""""', 'project_folder'], {}), "('[^\\\\w_]', '', project_folder)\n", (5710, 5741), False, 'import re\n'), ((5758, 5798), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder'], {}), "('projects', project_folder)\n", (5770, 5798), False, 'import os\n'), ((5841, 5867), 'os.path.isdir', 'os.path.isdir', (['project_dir'], {}), '(project_dir)\n', (5854, 5867), False, 'import os\n'), ((6566, 6616), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder', '"""output"""'], {}), "('projects', project_folder, 'output')\n", (6578, 6616), False, 'import os\n'), ((6619, 6648), 'shutil.rmtree', 'shutil.rmtree', (['project_folder'], {}), '(project_folder)\n', (6632, 6648), False, 'import shutil\n'), ((6651, 6675), 'os.mkdir', 'os.mkdir', (['project_folder'], {}), '(project_folder)\n', (6659, 6675), False, 'import os\n'), ((6685, 6703), 'json.dumps', 'json.dumps', (['"""True"""'], {}), "('True')\n", (6695, 6703), False, 'import json\n'), ((6808, 6848), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_folder'], {}), "('projects', project_folder)\n", (6820, 6848), False, 'import os\n'), ((6851, 6880), 'shutil.rmtree', 'shutil.rmtree', (['project_folder'], {}), '(project_folder)\n', (6864, 6880), False, 'import shutil\n'), ((6890, 6908), 'json.dumps', 'json.dumps', (['"""True"""'], {}), "('True')\n", (6900, 6908), False, 'import json\n'), ((7243, 7277), 'google_ads_editor_csv.build_csv', 'g_ads_editor.build_csv', (['project_id'], {}), '(project_id)\n', (7265, 7277), True, 'import google_ads_editor_csv as g_ads_editor\n'), ((8411, 8421), 'io.StringIO', 'StringIO', ([], {}), '()\n', (8419, 8421), False, 'from io import StringIO\n'), ((8433, 8469), 'csv.writer', 'csv.writer', (['queue'], {'dialect': 'csv.excel'}), '(queue, dialect=csv.excel)\n', (8443, 8469), False, 'import csv\n'), ((8768, 8797), 'json.dumps', 'json.dumps', (["{'success': True}"], {}), "({'success': True})\n", (8778, 8797), False, 'import json\n'), ((10036, 10056), 'os.walk', 'os.walk', (['assets_path'], {}), '(assets_path)\n', (10043, 10056), False, 'import os\n'), ((11675, 11712), 'os.path.join', 'os.path.join', (['assets_path', 'asset_name'], {}), '(assets_path, asset_name)\n', (11687, 11712), False, 'import os\n'), ((12021, 12081), 'bottle.static_file', 'static_file', (['asset_name'], {'root': 'assets_path', 'download': 'filename'}), '(asset_name, root=assets_path, download=filename)\n', (12032, 12081), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((12782, 12820), 'bottle.static_file', 'static_file', (['filepath'], {'root': 'static_dir'}), '(filepath, root=static_dir)\n', (12793, 12820), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((14249, 14274), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (14272, 14274), False, 'import argparse\n'), ((14416, 14464), 'bottle.run', 'run', ([], {'host': '"""0.0.0.0"""', 'port': '(8080)', 'debug': 'args.debug'}), "(host='0.0.0.0', port=8080, debug=args.debug)\n", (14419, 14464), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((2172, 2206), 'json.loads', 'json.loads', (['refresh_token_response'], {}), '(refresh_token_response)\n', (2182, 2206), False, 'import json\n'), ((3710, 3746), 'json.dump', 'json.dump', (['request.json', 'f'], {'indent': '(2)'}), '(request.json, f, indent=2)\n', (3719, 3746), False, 'import json\n'), ((5345, 5371), 'os.path.exists', 'os.path.exists', (['"""projects"""'], {}), "('projects')\n", (5359, 5371), False, 'import os\n'), ((5377, 5400), 'os.makedirs', 'os.makedirs', (['"""projects"""'], {}), "('projects')\n", (5388, 5400), False, 'import os\n'), ((5917, 5949), 'distutils.dir_util.copy_tree', 'copy_tree', (['base_dir', 'project_dir'], {}), '(base_dir, project_dir)\n', (5926, 5949), False, 'from distutils.dir_util import copy_tree\n'), ((5995, 6035), 'os.path.join', 'os.path.join', (['project_dir', '"""config.json"""'], {}), "(project_dir, 'config.json')\n", (6007, 6035), False, 'import os\n'), ((6286, 6299), 'time.sleep', 'time.sleep', (['(2)'], {}), '(2)\n', (6296, 6299), False, 'import time\n'), ((6333, 6389), 'json.dumps', 'json.dumps', (["{'success': True, 'project': project_folder}"], {}), "({'success': True, 'project': project_folder})\n", (6343, 6389), False, 'import json\n'), ((6408, 6465), 'json.dumps', 'json.dumps', (["{'success': False, 'project': project_folder}"], {}), "({'success': False, 'project': project_folder})\n", (6418, 6465), False, 'import json\n'), ((7317, 7372), 'json.dumps', 'json.dumps', (["{'msg': 'ERROR generating CSV: %s' % error}"], {}), "({'msg': 'ERROR generating CSV: %s' % error})\n", (7327, 7372), False, 'import json\n'), ((8482, 8519), 'codecs.getincrementalencoder', 'codecs.getincrementalencoder', (['"""utf-8"""'], {}), "('utf-8')\n", (8510, 8519), False, 'import codecs\n'), ((11208, 11245), 'os.path.join', 'os.path.join', (['assets_path', 'asset_name'], {}), '(assets_path, asset_name)\n', (11220, 11245), False, 'import os\n'), ((11250, 11276), 'os.unlink', 'os.unlink', (['asset_full_path'], {}), '(asset_full_path)\n', (11259, 11276), False, 'import os\n'), ((11730, 11757), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (11746, 11757), False, 'import os\n'), ((13390, 13408), 'tempfile.mkdtemp', 'tempfile.mkdtemp', ([], {}), '()\n', (13406, 13408), False, 'import tempfile\n'), ((13498, 13523), 'bottle.request.files.get', 'request.files.get', (['"""file"""'], {}), "('file')\n", (13515, 13523), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((14023, 14051), 'shutil.rmtree', 'shutil.rmtree', (['self.temp_dir'], {}), '(self.temp_dir)\n', (14036, 14051), False, 'import shutil\n'), ((7407, 7600), 'json.dumps', 'json.dumps', (["{'msg': \n 'CSV file has %s of %s videos, please make sure to generate all videos and upload all of them to YouTube Before downloading the Editor CSV.'\n % (uploaded, missing)}"], {}), "({'msg': \n 'CSV file has %s of %s videos, please make sure to generate all videos and upload all of them to YouTube Before downloading the Editor CSV.'\n % (uploaded, missing)})\n", (7417, 7600), False, 'import json\n'), ((7719, 7755), 'os.path.join', 'os.path.join', (['"""projects"""', 'project_id'], {}), "('projects', project_id)\n", (7731, 7755), False, 'import os\n'), ((7774, 7808), 'os.path.join', 'os.path.join', (['feed_path', 'feed_name'], {}), '(feed_path, feed_name)\n', (7786, 7808), False, 'import os\n'), ((8141, 8198), 'bottle.static_file', 'static_file', (['feed_name'], {'root': 'feed_path', 'download': 'filename'}), '(feed_name, root=feed_path, download=filename)\n', (8152, 8198), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((8367, 8386), 'bottle.request.body.read', 'request.body.read', ([], {}), '()\n', (8384, 8386), False, 'from bottle import get, post, delete, request, route, run, static_file, response\n'), ((9215, 9232), 'os.walk', 'os.walk', (['font_dir'], {}), '(font_dir)\n', (9222, 9232), False, 'import os\n'), ((10103, 10131), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (10115, 10131), False, 'import os\n'), ((10571, 10595), 'os.path.split', 'os.path.split', (['file_path'], {}), '(file_path)\n', (10584, 10595), False, 'import os\n'), ((10650, 10688), 'shutil.copy', 'shutil.copy', (['file_path', 'new_asset_path'], {}), '(file_path, new_asset_path)\n', (10661, 10688), False, 'import shutil\n'), ((13608, 13646), 'os.path.join', 'os.path.join', (['self.temp_dir', 'file_name'], {}), '(self.temp_dir, file_name)\n', (13620, 13646), False, 'import os\n'), ((7830, 7857), 'os.path.basename', 'os.path.basename', (['file_path'], {}), '(file_path)\n', (7846, 7857), False, 'import os\n'), ((10467, 10498), 'zipfile.ZipFile', 'zipfile.ZipFile', (['file_path', '"""r"""'], {}), "(file_path, 'r')\n", (10482, 10498), False, 'import zipfile\n'), ((9291, 9319), 'os.path.join', 'os.path.join', (['root', 'filename'], {}), '(root, filename)\n', (9303, 9319), False, 'import os\n'), ((13078, 13122), 'subprocess.check_output', 'subprocess.check_output', (["['du', '-sh', path]"], {}), "(['du', '-sh', path])\n", (13101, 13122), False, 'import subprocess\n'), ((9474, 9512), 're.findall', 're.findall', (['"""[A-Z][^A-Z]*"""', 'beaut_name'], {}), "('[A-Z][^A-Z]*', beaut_name)\n", (9484, 9512), False, 'import re\n')] |
#!/usr/bin/env python
# coding: utf-8
# # Lab 02
#
# ## Solving a system of nonlinear equations
#
# ### <NAME>, Б01-818
#
# IV.12.7.д
# $$\begin{cases} x^7 - 5x^2y^4 + 1510 = 0 \\ y^3 - 3x^4y - 105 = 0 \end{cases}$$
# $$\begin{cases} x_{n+1} = \sqrt{\frac{x_n^7 + 1510}{5y_n^4}} \\ y_{n+1} = \sqrt[3]{3x_{n}^4y_{n}+105} \end{cases}$$
# $$J=\begin{pmatrix}7x^6-10xy^4 & -20x^2y^3\\-12x^3y & 3y^2-3x^4\end{pmatrix}$$
# In[1]:
import unittest
import logging
import numpy as np
import pandas as pd
# In[2]:
#logging.basicConfig(level=logging.DEBUG)
# In[3]:
class FPI:
def __init__(self, f_vec):
self.__f_vec = f_vec
self.iter = 0
self.log = logging.getLogger("FPI")
def __is_stop(self, next_x, cur_x, q, delta):
if next_x == cur_x:
return False
if sum(np.abs((next_x[i] - cur_x[i])) for i in range(len(cur_x))) <= delta * (1 - q):
return True
return False
def solve(self, init_x, q, delta):
cur_x = init_x
next_x = init_x
while not self.__is_stop(next_x, cur_x, q, delta):
cur_x = next_x
next_x = cur_x[:]
for i in range(len(self.__f_vec)):
next_x[i] = self.__f_vec[i](cur_x)
self.log.debug(f"Iter[{self.iter}]: Init: {cur_x} Next: {next_x}")
self.iter = self.iter + 1
return next_x
# In[4]:
class Newton:
def __init__(self, f_vec, J):
self.__f_vec = f_vec
self.__J = J
self.iter = 0
self.log = logging.getLogger("Newton")
def __J_mul_f(self, x, i):
return sum(self.__f_vec[j](x) * self.__J[i][j](x) for j in range(len(self.__f_vec)))
def __is_stop(self, next_x, cur_x, M2, m1, delta):
if next_x == cur_x:
return False
if sum(np.abs(next_x[i] - cur_x[i]) for i in range(len(cur_x))) < np.sqrt(2*delta*m1/M2):
return True
return False
def solve(self, init_x, M2, m1, delta):
self.iter = 0
cur_x = init_x
next_x = init_x
while not self.__is_stop(next_x, cur_x, M2, m1, delta):
cur_x = next_x
next_x = cur_x[:]
for i in range(len(self.__f_vec)):
next_x[i] = cur_x[i] - self.__J_mul_f(cur_x, i)
self.log.debug(f"Iter[{self.iter}]: Init: {cur_x} Next: {next_x}")
self.iter = self.iter + 1
return next_x
# In[5]:
def fpi_f1(x):
return np.sqrt((x[0]**7 + 1510)/(5 * (x[1]**4)))
def fpi_f2(x):
return np.cbrt(3*(x[0]**4)*x[1] + 105)
fpi = FPI([fpi_f1, fpi_f2])
# In[6]:
def newton_f1(x):
return x[0]**7-5*(x[0]**2)*(x[1]**4)+1510
def newton_f2(x):
return x[1]**3-3*(x[0]**4)*x[1]-105
def J00(x):
return 7*(x[0]**6)-10*x[0]*(x[1]**4)
def J01(x):
return -20*(x[0]**2)*(x[1]**3)
def J10(x):
return -12*(x[0]**3)*x[1]
def J11(x):
return 3*(x[1]**2) - 3*(x[0]**4)
def J(x):
return [[J00(x), J01(x)], [J10(x), J11(x)]]
def J00_inv(x):
return J11(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J01_inv(x):
return - J01(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J10_inv(x):
return - J10(x)/(J00(x)*J11(x)-J10(x)*J01(x))
def J11_inv(x):
return J00(x)/(J00(x)*J11(x)-J10(x)*J01(x))
J_inv = [[J00_inv, J01_inv], [J10_inv, J11_inv]]
newton = Newton([newton_f1, newton_f2], J_inv)
# In[7]:
log = logging.getLogger()
x_init_vec_fpi = [[1,5], [3, -4], [-1, 5]]
x_init_vec_newton = [[1,5], [3, -4], [-1, 5], [-4, 0], [-2, -2]]
delta = 10**-5
q = 0.5
m1 = 1
M2 = 1
fpi_results = []
fpi_iterations = []
newton_results = []
newton_iterations = []
for x in x_init_vec_fpi:
fpi_results.append(fpi.solve(x, q, delta))
fpi_iterations.append(fpi.iter)
for x in x_init_vec_newton:
newton_results.append(newton.solve(x, M2, m1, delta))
newton_iterations.append(newton.iter)
# In[8]:
fpi_dt = pd.DataFrame({"Начальное приближение": x_init_vec_fpi, "Результат": fpi_results, "Итераций": fpi_iterations})
newton_dt = pd.DataFrame({"Начальное приближение": x_init_vec_newton, "Результат": newton_results, "Итераций": newton_iterations})
print("Метод простых итераций")
print(fpi_dt)
print("\nМетод Ньютона")
print(newton_dt)
| [
"logging.getLogger",
"numpy.abs",
"numpy.sqrt",
"pandas.DataFrame",
"numpy.cbrt"
] | [((3476, 3495), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (3493, 3495), False, 'import logging\n'), ((3984, 4097), 'pandas.DataFrame', 'pd.DataFrame', (["{'Начальное приближение': x_init_vec_fpi, 'Результат': fpi_results,\n 'Итераций': fpi_iterations}"], {}), "({'Начальное приближение': x_init_vec_fpi, 'Результат':\n fpi_results, 'Итераций': fpi_iterations})\n", (3996, 4097), True, 'import pandas as pd\n'), ((4106, 4228), 'pandas.DataFrame', 'pd.DataFrame', (["{'Начальное приближение': x_init_vec_newton, 'Результат': newton_results,\n 'Итераций': newton_iterations}"], {}), "({'Начальное приближение': x_init_vec_newton, 'Результат':\n newton_results, 'Итераций': newton_iterations})\n", (4118, 4228), True, 'import pandas as pd\n'), ((2576, 2621), 'numpy.sqrt', 'np.sqrt', (['((x[0] ** 7 + 1510) / (5 * x[1] ** 4))'], {}), '((x[0] ** 7 + 1510) / (5 * x[1] ** 4))\n', (2583, 2621), True, 'import numpy as np\n'), ((2645, 2680), 'numpy.cbrt', 'np.cbrt', (['(3 * x[0] ** 4 * x[1] + 105)'], {}), '(3 * x[0] ** 4 * x[1] + 105)\n', (2652, 2680), True, 'import numpy as np\n'), ((684, 708), 'logging.getLogger', 'logging.getLogger', (['"""FPI"""'], {}), "('FPI')\n", (701, 708), False, 'import logging\n'), ((1600, 1627), 'logging.getLogger', 'logging.getLogger', (['"""Newton"""'], {}), "('Newton')\n", (1617, 1627), False, 'import logging\n'), ((1944, 1972), 'numpy.sqrt', 'np.sqrt', (['(2 * delta * m1 / M2)'], {}), '(2 * delta * m1 / M2)\n', (1951, 1972), True, 'import numpy as np\n'), ((841, 869), 'numpy.abs', 'np.abs', (['(next_x[i] - cur_x[i])'], {}), '(next_x[i] - cur_x[i])\n', (847, 869), True, 'import numpy as np\n'), ((1885, 1913), 'numpy.abs', 'np.abs', (['(next_x[i] - cur_x[i])'], {}), '(next_x[i] - cur_x[i])\n', (1891, 1913), True, 'import numpy as np\n')] |
#!/usr/bin/env python3
"""PenIn setup script."""
from setuptools import setup, find_packages
from penin.core.version import get_version
VERSION = get_version()
readme_file = open("README.md", "r")
LONG_DESCRIPTION = readme_file.read()
readme_file.close()
setup(
name="penin",
version=VERSION,
description="Information gathering and penetration testing framework",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
author="<NAME>",
author_email="<EMAIL>",
url="https://github.com/fabaff/penin",
license="Apache 2.0",
packages=find_packages(exclude=["ez_setup", "tests*"]),
package_data={"penin": ["templates/*"]},
include_package_data=True,
install_requires=["cement", "pyyaml", "colorlog", "jinja2", "tinydb"],
entry_points={"console_scripts": ["penin = penin.main:main"]},
)
| [
"setuptools.find_packages",
"penin.core.version.get_version"
] | [((147, 160), 'penin.core.version.get_version', 'get_version', ([], {}), '()\n', (158, 160), False, 'from penin.core.version import get_version\n'), ((600, 645), 'setuptools.find_packages', 'find_packages', ([], {'exclude': "['ez_setup', 'tests*']"}), "(exclude=['ez_setup', 'tests*'])\n", (613, 645), False, 'from setuptools import setup, find_packages\n')] |
# -*- coding: utf-8 -*-
"""
Copyright (c) 2012 University of Oxford
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, --INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from webob import Request
import zope.interface
from repoze.who.classifiers import default_request_classifier
from repoze.who.interfaces import IRequestClassifier
import ConfigParser
from pylons import config
def custom_request_classifier(environ):
""" Returns one of the classifiers 'app', 'browser' or any
standard classifiers returned by
repoze.who.classifiers:default_request_classifier
"""
classifier = default_request_classifier(environ)
if classifier == 'browser':
login_form_url = '/login'
login_handler = '/login_handler'
logout_handler = '/logout_handler'
logout_url = '/logout'
# Decide if the client is a (user-driven) browser or an application
if config.has_key("who.config_file"):
config_file = config["who.config_file"]
config_who = ConfigParser.ConfigParser()
config_who.readfp(open(config_file))
login_form_url = config_who.get("plugin:friendlyform", "login_form_url")
login_handler = config_who.get("plugin:friendlyform", "login_handler_path")
logout_handler = config_who.get("plugin:friendlyform", "logout_handler_path")
logout_url = config_who.get("plugin:friendlyform", "post_logout_url")
path_info = environ['PATH_INFO']
#request = Request(environ)
#if not request.accept.best_match(['application/xhtml+xml', 'text/html']):
# # In our view, any client who doesn't support HTML/XHTML is an "app",
# # not a (user-driven) "browser".
# classifier = 'app'
if not path_info in [login_form_url, login_handler, logout_handler, logout_url]:
# In our view, any client who hasn't come in from the login url is an app
classifier = 'app'
return classifier
zope.interface.directlyProvides(custom_request_classifier, IRequestClassifier)
| [
"ConfigParser.ConfigParser",
"repoze.who.classifiers.default_request_classifier",
"pylons.config.has_key"
] | [((1530, 1565), 'repoze.who.classifiers.default_request_classifier', 'default_request_classifier', (['environ'], {}), '(environ)\n', (1556, 1565), False, 'from repoze.who.classifiers import default_request_classifier\n'), ((1834, 1867), 'pylons.config.has_key', 'config.has_key', (['"""who.config_file"""'], {}), "('who.config_file')\n", (1848, 1867), False, 'from pylons import config\n'), ((1946, 1973), 'ConfigParser.ConfigParser', 'ConfigParser.ConfigParser', ([], {}), '()\n', (1971, 1973), False, 'import ConfigParser\n')] |
#!/usr/bin/env python3
#------------------------------------------------------------------------------#
# Filename: apod_linux_config.py / \ #
# Project : APOD_Linux | () | #
# Date : 06/23/2021 | | #
# Author : <NAME> | \____/ | #
# License : WTFPLv2 \ / #
#------------------------------------------------------------------------------#
# imports
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
import logging
import os
import subprocess
str_prog_name = "apod_linux"
# find the config file
home_dir = os.path.expanduser("~")
pic_dir = os.path.join(home_dir, "." + str_prog_name)
conf_file = os.path.join(pic_dir, str_prog_name + ".conf")
# get log file name`
log_file = os.path.join(pic_dir, str_prog_name + ".log")
# set up logging
logging.basicConfig(filename = log_file, level = logging.DEBUG,
format = "%(asctime)s - %(message)s")
# set defaults for first tab
def_enabled = True
def_delay = 30
def_caption = True
def_position = "BR"
# set defaults for second tab
def_text_r = 255
def_text_g = 255
def_text_b = 255
def_text_a = 100
def_bg_r = 0
def_bg_g = 0
def_bg_b = 0
def_bg_a = 75
# set defaults for third tab
def_width = 500
def_font_size = 15
def_corner = 15
def_border = 20
def_top_pad = 50
def_bottom_pad = 10
def_side_pad = 10
# default strings
str_title = "APOD_Linux"
str_label_enabled = "Enable " + str_title + ":"
str_tooltip_enabled = "Enables or disables the " + str_title + " program"
str_label_delay = "Delay (0-60):"
str_tooltip_delay = "How long to wait (in seconds) for an internet connection \
before downloading"
str_label_caption = "Use caption:"
str_tooltip_caption = "Enables or disables the caption on top of the wallpaper"
str_tab_general = "General"
str_label_text = "<b>Text</b>"
str_label_text_r = "Red (0-255):"
str_tooltip_text_r = "The red value for the caption text"
str_label_text_g = "Green (0-255):"
str_tooltip_text_g = "The green value for the caption text"
str_label_text_b = "Blue (0-255):"
str_tooltip_text_b = "The blue value for the caption text"
str_label_text_a = "Alpha % (0-100):"
str_tooltip_text_a = "The alpha (transparency) value for the caption text"
str_label_bg = "<b>Background</b>"
str_label_bg_r = "Red (0-255):"
str_tooltip_bg_r = "The red value for the caption background"
str_label_bg_g = "Green (0-255):"
str_tooltip_bg_g = "The green value for the caption background"
str_label_bg_b = "Blue (0-255):"
str_tooltip_bg_b = "The blue value for the caption background"
str_label_bg_a = "Alpha % (0-100):"
str_tooltip_bg_a = "The alpha (transparency) value for the caption background"
str_tab_colors = "Colors"
str_label_position = "Position:"
str_tooltip_position = "The position of the caption relative to the screen"
str_label_width = "Width (0-1000):"
str_tooltip_width = "The width of the caption bubble"
str_label_font_size = "Font size (0-50):"
str_tooltip_font_size = "The font size of the caption"
str_label_corner = "Corner radius (0-50):"
str_tooltip_corner = "The corner radius of the caption bubble"
str_label_border = "Border (0-50):"
str_tooltip_border = "The spacing between the caption text and the background \
bubble"
str_label_top_pad = "Top padding (0-100):"
str_tooltip_top_pad = "The spacing between the caption and the top of the \
screen"
str_label_bottom_pad = "Bottom padding (0-100):"
str_tooltip_bottom_pad = "The spacing between the caption and the bottom of \
the screen"
str_label_side_pad = "Side padding (0-100):"
str_tooltip_side_pad = "The spacing between the caption and the sides of the \
screen"
str_tab_sizes = "Sizes"
str_button_ok = "OK"
str_button_cancel = "Cancel"
str_button_apply = "Apply"
str_tl = "Top Left"
str_tr = "Top Right"
str_bl = "Bottom Left"
str_br = "Bottom Right"
str_c = "Center"
# map short names to display strings
position_map = {
"TL" : str_tl,
"TR" : str_tr,
"BL" : str_bl,
"BR" : str_br,
"C" : str_c
}
run_prog_cmd = "python3 /usr/bin/apod_linux.py & disown"
# the main window class
class MyWindow(Gtk.Window):
# constructor
def __init__(self):
# call super constructor
Gtk.Window.__init__(self, title=str_title)
# the padding between the window edge and the content
self.set_border_width(20)
# set new width and default (fit) height
self.set_default_size(600, -1)
# don't allow resizing of window
self.set_resizable(False)
# create the stack BEFORE switcher and set props
stack = Gtk.Stack()
stack.set_transition_type(Gtk.StackTransitionType.NONE)
# create the switcher and attach stack
stack_switcher = Gtk.StackSwitcher()
stack_switcher.set_stack(stack)
# create a box for the switcher that keeps it centered horizontally
# resize box to fill parent but do not resize child (switcher)
# in an hbox, the child automatically fills vertically but is centered
# horizontally
hbox_switcher = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL)
hbox_switcher.pack_start(stack_switcher, True, False, 0)
# the first tab
# create a grid with inter-spacig
grid_general = Gtk.Grid()
grid_general.set_row_spacing(20)
grid_general.set_column_spacing(20)
# add a label and switch
label_enabled = Gtk.Label(label=str_label_enabled)
label_enabled.set_alignment(1, 0)
grid_general.attach(label_enabled, 0, 0, 1, 1)
self.switch_enabled = Gtk.Switch()
self.switch_enabled.connect("notify::active",
self.switch_enabled_clicked)
self.switch_enabled.set_tooltip_text(str_tooltip_enabled)
hbox_enabled = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL)
hbox_enabled.pack_start(self.switch_enabled, False, False, 0)
grid_general.attach(hbox_enabled, 1, 0, 1, 1)
# add a label
label_delay = Gtk.Label(label=str_label_delay)
label_delay.set_alignment(1, 0)
grid_general.attach(label_delay, 0, 1, 1, 1)
# add a spinbox that grows horizontally
adj_delay = Gtk.Adjustment(
0.0,
0.0,
60.0,
1.0,
5.0,
0.0
)
self.spin_delay = Gtk.SpinButton(adjustment=adj_delay, hexpand=True)
self.spin_delay.set_numeric(True)
self.spin_delay.set_tooltip_text(str_tooltip_delay)
grid_general.attach(self.spin_delay, 1, 1, 1, 1)
# add a label and switch
label_caption = Gtk.Label(label=str_label_caption)
label_caption.set_alignment(1, 0)
grid_general.attach(label_caption, 0, 2, 1, 1)
self.switch_caption = Gtk.Switch()
self.switch_caption.connect("notify::active",
self.switch_caption_clicked)
self.switch_caption.set_tooltip_text(str_tooltip_caption)
hbox_caption = Gtk.Box(orientation = Gtk.Orientation.HORIZONTAL)
hbox_caption.pack_start(self.switch_caption, False, False, 0)
grid_general.attach(hbox_caption, 1, 2, 1, 1)
label_position = Gtk.Label(label=str_label_position)
label_position.set_alignment(1, 0)
grid_general.attach(label_position, 0, 3, 1, 1)
# combos can take keys and vals and will only diplay vals
self.combo_position = Gtk.ComboBoxText()
self.combo_position.set_tooltip_text(str_tooltip_position)
grid_general.attach(self.combo_position, 1, 3, 1, 1)
for key, val in position_map.items():
self.combo_position.append(key, val)
# add the grid to the stack with a name and a title
stack.add_titled(grid_general, "general", str_tab_general)
# the second tab
# create a grid with inter-spacig
grid_colors = Gtk.Grid()
grid_colors.set_row_spacing(20)
grid_colors.set_column_spacing(20)
label_text = Gtk.Label()
label_text.set_markup(str_label_text)
sep_text = Gtk.HSeparator()
# a box to vertically center the separator
# resize the box to fill the cell but do not resize child
# in a vbox, the child automatically fills the width but is centered
# vertically
vbox_sep_text = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vbox_sep_text.pack_start(sep_text, True, False, 0)
# a box to contain label and separator box
# label is F, F to make it as small as possible
# box is T, T to make it as big as possible
hbox_text = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox_text.pack_start(label_text, False, False, 0)
hbox_text.pack_start(vbox_sep_text, True, True, 0)
grid_colors.attach(hbox_text, 0, 0, 2, 1)
# right-align labels, set spin min/max, and numeric only
label_text_r = Gtk.Label(label=str_label_text_r)
label_text_r.set_alignment(1, 0)
grid_colors.attach(label_text_r, 0, 1, 1, 1)
adj_text_r = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_text_r = Gtk.SpinButton(adjustment=adj_text_r, hexpand=True)
self.spin_text_r.set_numeric(True)
self.spin_text_r.set_tooltip_text(str_tooltip_text_r)
grid_colors.attach(self.spin_text_r, 1, 1, 1, 1)
label_text_g = Gtk.Label(label=str_label_text_g)
label_text_g.set_alignment(1, 0)
grid_colors.attach(label_text_g, 0, 2, 1, 1)
adj_text_g = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_text_g = Gtk.SpinButton(adjustment=adj_text_g, hexpand=True)
self.spin_text_g.set_numeric(True)
self.spin_text_g.set_tooltip_text(str_tooltip_text_g)
grid_colors.attach(self.spin_text_g, 1, 2, 1, 1)
label_text_b = Gtk.Label(label=str_label_text_b)
label_text_b.set_alignment(1, 0)
grid_colors.attach(label_text_b, 0, 3, 1, 1)
adj_text_b = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_text_b = Gtk.SpinButton(adjustment=adj_text_b, hexpand=True)
self.spin_text_b.set_numeric(True)
self.spin_text_b.set_tooltip_text(str_tooltip_text_b)
grid_colors.attach(self.spin_text_b, 1, 3, 1, 1)
label_text_a = Gtk.Label(label=str_label_text_a)
label_text_a.set_alignment(1, 0)
grid_colors.attach(label_text_a, 0, 4, 1, 1)
adj_text_a = Gtk.Adjustment(
0.0,
0.0,
100.0,
1.0,
5.0,
0.0
)
self.spin_text_a = Gtk.SpinButton(adjustment=adj_text_a, hexpand=True)
self.spin_text_a.set_numeric(True)
self.spin_text_a.set_tooltip_text(str_tooltip_text_a)
grid_colors.attach(self.spin_text_a, 1, 4, 1, 1)
label_bg = Gtk.Label()
label_bg.set_markup(str_label_bg)
sep_bg = Gtk.HSeparator()
# a box to vertically center the separator
# resize the box to fill the cell but do not resize child
# in a vbox, the child automatically fills the width but is centered
# vertically
vbox_sep_bg = Gtk.Box(orientation=Gtk.Orientation.VERTICAL)
vbox_sep_bg.pack_start(sep_bg, True, False, 0)
# a box to contain label and separator box
# label is F, F to make it as small as possible
# box is T, T to make it as big as possible
hbox_bg = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)
hbox_bg.pack_start(label_bg, False, False, 0)
hbox_bg.pack_start(vbox_sep_bg, True, True, 0)
grid_colors.attach(hbox_bg, 0, 5, 2, 1)
# right-align labels, set spin min/max, and numeric only
label_bg_r = Gtk.Label(label=str_label_bg_r)
label_bg_r.set_alignment(1, 0)
grid_colors.attach(label_bg_r, 0, 6, 1, 1)
adj_bg_r = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_bg_r = Gtk.SpinButton(adjustment=adj_bg_r, hexpand=True)
self.spin_bg_r.set_numeric(True)
self.spin_bg_r.set_tooltip_text(str_tooltip_bg_r)
grid_colors.attach(self.spin_bg_r, 1, 6, 1, 1)
label_bg_g = Gtk.Label(label=str_label_bg_g)
label_bg_g.set_alignment(1, 0)
grid_colors.attach(label_bg_g, 0, 7, 1, 1)
adj_bg_g = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_bg_g = Gtk.SpinButton(adjustment=adj_bg_g, hexpand=True)
self.spin_bg_g.set_numeric(True)
self.spin_bg_g.set_tooltip_text(str_tooltip_bg_g)
grid_colors.attach(self.spin_bg_g, 1, 7, 1, 1)
label_bg_b = Gtk.Label(label=str_label_bg_b)
label_bg_b.set_alignment(1, 0)
grid_colors.attach(label_bg_b, 0, 8, 1, 1)
adj_bg_b = Gtk.Adjustment(
0.0,
0.0,
255.0,
1.0,
5.0,
0.0
)
self.spin_bg_b = Gtk.SpinButton(adjustment=adj_bg_b, hexpand=True)
self.spin_bg_b.set_numeric(True)
self.spin_bg_b.set_tooltip_text(str_tooltip_bg_b)
grid_colors.attach(self.spin_bg_b, 1, 8, 1, 1)
label_bg_a = Gtk.Label(label=str_label_bg_a)
label_bg_a.set_alignment(1, 0)
grid_colors.attach(label_bg_a, 0, 9, 1, 1)
adj_bg_a = Gtk.Adjustment(
0.0,
0.0,
100.0,
1.0,
5.0,
0.0
)
self.spin_bg_a = Gtk.SpinButton(adjustment=adj_bg_a, hexpand=True)
self.spin_bg_a.set_numeric(True)
self.spin_bg_a.set_tooltip_text(str_tooltip_bg_a)
grid_colors.attach(self.spin_bg_a, 1, 9, 1, 1)
# add the grid to the stack with a name and a title
stack.add_titled(grid_colors, "colors", str_tab_colors)
# the third tab
# create a grid with inter-spacig
grid_sizes = Gtk.Grid()
grid_sizes.set_row_spacing(20)
grid_sizes.set_column_spacing(20)
# create all the labels and spins with adjustments and numeric only
label_width = Gtk.Label(label=str_label_width)
label_width.set_alignment(1, 0)
grid_sizes.attach(label_width, 0, 0, 1, 1)
adj_width = Gtk.Adjustment(
0.0,
0.0,
1000.0,
1.0,
5.0,
0.0
)
self.spin_width = Gtk.SpinButton(adjustment=adj_width, hexpand=True)
self.spin_width.set_numeric(True)
self.spin_width.set_tooltip_text(str_tooltip_width)
grid_sizes.attach(self.spin_width, 1, 0, 1, 1)
label_font_size = Gtk.Label(label=str_label_font_size)
label_font_size.set_alignment(1, 0)
grid_sizes.attach(label_font_size, 0, 1, 1, 1)
adj_font_size = Gtk.Adjustment(
0.0,
0.0,
50.0,
1.0,
5.0,
0.0
)
self.spin_font_size = Gtk.SpinButton(adjustment=adj_font_size,
hexpand=True)
self.spin_font_size.set_numeric(True)
self.spin_font_size.set_tooltip_text(str_tooltip_font_size)
grid_sizes.attach(self.spin_font_size, 1, 1, 1, 1)
label_corner = Gtk.Label(label=str_label_corner)
label_corner.set_alignment(1, 0)
grid_sizes.attach(label_corner, 0, 2, 1, 1)
adj_corner = Gtk.Adjustment(
0.0,
0.0,
50.0,
1.0,
5.0,
0.0
)
self.spin_corner = Gtk.SpinButton(adjustment=adj_corner, hexpand=True)
self.spin_corner.set_numeric(True)
self.spin_corner.set_tooltip_text(str_tooltip_corner)
grid_sizes.attach(self.spin_corner, 1, 2, 1, 1)
label_border = Gtk.Label(label=str_label_border)
label_border.set_alignment(1, 0)
grid_sizes.attach(label_border, 0, 3, 1, 1)
adj_border = Gtk.Adjustment(
0.0,
0.0,
50.0,
1.0,
5.0,
0.0
)
self.spin_border = Gtk.SpinButton(adjustment=adj_border, hexpand=True)
self.spin_border.set_numeric(True)
self.spin_border.set_tooltip_text(str_tooltip_border)
grid_sizes.attach(self.spin_border, 1, 3, 1, 1)
label_top_pad = Gtk.Label(label=str_label_top_pad)
label_top_pad.set_alignment(1, 0)
grid_sizes.attach(label_top_pad, 0, 4, 1, 1)
adj_top_pad = Gtk.Adjustment(
0.0,
0.0,
100.0,
1.0,
5.0,
0.0
)
self.spin_top_pad = Gtk.SpinButton(adjustment=adj_top_pad, hexpand=True)
self.spin_top_pad.set_numeric(True)
self.spin_top_pad.set_tooltip_text(str_tooltip_top_pad)
grid_sizes.attach(self.spin_top_pad, 1, 4, 1, 1)
label_bottom_pad = Gtk.Label(label=str_label_bottom_pad)
label_bottom_pad.set_alignment(1, 0)
grid_sizes.attach(label_bottom_pad, 0, 5, 1, 1)
adj_bottom_pad = Gtk.Adjustment(
0.0,
0.0,
100.0,
1.0,
5.0,
0.0
)
self.spin_bottom_pad = Gtk.SpinButton(adjustment=adj_bottom_pad,
hexpand=True)
self.spin_bottom_pad.set_numeric(True)
self.spin_bottom_pad.set_tooltip_text(str_tooltip_bottom_pad)
grid_sizes.attach(self.spin_bottom_pad, 1, 5, 1, 1)
label_side_pad = Gtk.Label(label=str_label_side_pad)
label_side_pad.set_alignment(1, 0)
grid_sizes.attach(label_side_pad, 0, 6, 1, 1)
adj_side_pad = Gtk.Adjustment(
0.0,
0.0,
100.0,
1.0,
5.0,
0.0
)
self.spin_side_pad = Gtk.SpinButton(adjustment=adj_side_pad,
hexpand=True)
self.spin_side_pad.set_numeric(True)
self.spin_side_pad.set_tooltip_text(str_tooltip_side_pad)
grid_sizes.attach(self.spin_side_pad, 1, 6, 1, 1)
# add the grid to the stack with a name and a title
stack.add_titled(grid_sizes, "sizes", str_tab_sizes)
# create a box for the buttons
hbox_buttons = Gtk.Box(orientation=Gtk.Orientation.HORIZONTAL,
spacing=20)
# create the buttons
button_ok = Gtk.Button(label=str_button_ok)
button_ok.connect("clicked", self.button_ok_clicked)
hbox_buttons.pack_start(button_ok, True, True, 0)
button_cancel = Gtk.Button(label=str_button_cancel)
button_cancel.connect("clicked", self.button_cancel_clicked)
hbox_buttons.pack_start(button_cancel, True, True, 0)
button_apply = Gtk.Button(label=str_button_apply)
button_apply.connect("clicked", self.button_apply_clicked)
hbox_buttons.pack_start(button_apply, True, True, 0)
# create a vbox for the switcher box, stack, and button box and add it
# as main window's content
vbox_content = Gtk.Box(orientation=Gtk.Orientation.VERTICAL, spacing=50)
self.add(vbox_content)
# add the switcher's box, the stack, and button box as content
# do not resize switcher's box (horizontal fill is implicit)
# fully resize stack
# do not resize button box either
vbox_content.pack_start(hbox_switcher, False, False, 0)
vbox_content.pack_start(stack, True, True, 0)
vbox_content.pack_start(hbox_buttons, False, False, 0)
# load props or defaults
self.load_config()
# do switch routines
self.switch_caption_clicked(self.switch_caption, 0)
self.switch_enabled_clicked(self.switch_enabled, 0)
# load values from config file
def load_config(self):
# set defaults
self.switch_enabled.set_active(int(def_enabled))
self.spin_delay.set_value(int(def_delay))
self.switch_caption.set_active(int(def_caption))
self.spin_text_r.set_value(int(def_text_r))
self.spin_text_g.set_value(int(def_text_g))
self.spin_text_b.set_value(int(def_text_b))
self.spin_text_a.set_value(int(def_text_a))
self.spin_bg_r.set_value(int(def_bg_r))
self.spin_bg_g.set_value(int(def_bg_g))
self.spin_bg_b.set_value(int(def_bg_b))
self.spin_bg_a.set_value(int(def_bg_a))
for short_pos, long_pos in position_map.items():
if def_position == short_pos:
self.combo_position.set_active_id(short_pos)
self.spin_width.set_value(int(def_width))
self.spin_font_size.set_value(int(def_font_size))
self.spin_corner.set_value(int(def_corner))
self.spin_border.set_value(int(def_border))
self.spin_top_pad.set_value(int(def_top_pad))
self.spin_bottom_pad.set_value(int(def_bottom_pad))
self.spin_side_pad.set_value(int(def_side_pad))
# check if config file exists
if os.path.exists(conf_file):
# open config file and get all lines
with open(conf_file, "r") as f:
lines = f.readlines()
# try to find a value in the conf file
for line in lines:
line_clean = line.strip().upper()
# ignore comment lines
if line_clean.startswith("#") or line_clean == "":
continue
# split key off at equals
key_val = line_clean.split("=")
key = key_val[0].strip()
# split val off ignoring trailing comments
val = ""
if (len(key_val) > 1):
val_array = key_val[1].split("#")
val = val_array[0].strip()
# set values for keys
if key == "ENABLED":
if val != "":
self.switch_enabled.set_active(int(val))
if key == "DELAY":
if val != "":
self.spin_delay.set_value(int(val))
if key == "CAPTION":
if val != "":
self.switch_caption.set_active(int(val))
if key == "POSITION":
for short_pos, long_pos in position_map.items():
if val == short_pos:
self.combo_position.set_active_id(short_pos)
if key == "TEXT_R":
if val != "":
self.spin_text_r.set_value(int(val))
if key == "TEXT_G":
if val != "":
self.spin_text_g.set_value(int(val))
if key == "TEXT_B":
if val != "":
self.spin_text_b.set_value(int(val))
if key == "TEXT_A":
if val != "":
self.spin_text_a.set_value(int(val))
if key == "BG_R":
if val != "":
self.spin_bg_r.set_value(int(val))
if key == "BG_G":
if val != "":
self.spin_bg_g.set_value(int(val))
if key == "BG_B" in key:
if val != "":
self.spin_bg_b.set_value(int(val))
if key == "BG_A":
if val != "":
self.spin_bg_a.set_value(int(val))
if key == "WIDTH":
if val != "":
self.spin_width.set_value(int(val))
if key == "FONT_SIZE":
if val != "":
self.spin_font_size.set_value(int(val))
if key == "CORNER_RADIUS":
if val != "":
self.spin_corner.set_value(int(val))
if key == "BORDER":
if val != "":
self.spin_border.set_value(int(val))
if key == "TOP_PADDING":
if val != "":
if val != "":
self.spin_top_pad.set_value(int(val))
if key == "BOTTOM_PADDING":
if val != "":
self.spin_bottom_pad.set_value(int(val))
if key == "SIDE_PADDING":
if val != "":
self.spin_side_pad.set_value(int(val))
def save_config(self):
# open or create config file
with open(conf_file, "w+") as f:
# TODO: find line for key, replace value instead of overwriting
# whole file
f.write("# DO NOT EDIT THIS FILE BY HAND!\n\n")
# start writing options
f.write("ENABLED=" + str(int(self.switch_enabled.get_active())) +
"\n")
f.write("DELAY=" + str(int(self.spin_delay.get_value())) + "\n")
f.write("CAPTION=" + str(int(self.switch_caption.get_active())) +
"\n")
# fudge the position option from the array
val = self.combo_position.get_active_text()
for short_pos, long_pos in position_map.items():
if val == long_pos:
f.write("POSITION=" + short_pos + "\n")
break
f.write("TEXT_R=" + str(int(self.spin_text_r.get_value())) + "\n")
f.write("TEXT_G=" + str(int(self.spin_text_g.get_value())) + "\n")
f.write("TEXT_B=" + str(int(self.spin_text_b.get_value())) + "\n")
f.write("TEXT_A=" + str(int(self.spin_text_a.get_value())) + "\n")
f.write("BG_R=" + str(int(self.spin_bg_r.get_value())) + "\n")
f.write("BG_G=" + str(int(self.spin_bg_g.get_value())) + "\n")
f.write("BG_B=" + str(int(self.spin_bg_b.get_value())) + "\n")
f.write("BG_A=" + str(int(self.spin_bg_a.get_value())) + "\n")
f.write("WIDTH=" + str(int(self.spin_width.get_value())) + "\n")
f.write("FONT_SIZE=" + str(int(self.spin_font_size.get_value())) +
"\n")
f.write("CORNER_RADIUS=" + str(int(self.spin_corner.get_value())) +
"\n")
f.write("BORDER=" + str(int(self.spin_border.get_value())) + "\n")
f.write("TOP_PADDING=" + str(int(self.spin_top_pad.get_value())) +
"\n")
f.write("BOTTOM_PADDING=" +
str(int(self.spin_bottom_pad.get_value())) + "\n")
f.write("SIDE_PADDING=" + str(int(self.spin_side_pad.get_value())) +
"\n")
def run_prog(self):
logging.debug('GUI')
# only run once, no listener
cmd_array = run_prog_cmd.split()
# non-blocking subprocess
subprocess.Popen(cmd_array)
def switch_enabled_clicked(self, widget, gparam):
if widget.get_active():
self.spin_delay.set_sensitive(True)
self.switch_caption.set_sensitive(True)
self.switch_caption_clicked(self.switch_caption, 0)
else:
self.spin_delay.set_sensitive(False)
self.switch_caption.set_sensitive(False)
self.spin_text_r.set_sensitive(False)
self.spin_text_g.set_sensitive(False)
self.spin_text_b.set_sensitive(False)
self.spin_text_a.set_sensitive(False)
self.spin_bg_r.set_sensitive(False)
self.spin_bg_g.set_sensitive(False)
self.spin_bg_b.set_sensitive(False)
self.spin_bg_a.set_sensitive(False)
self.combo_position.set_sensitive(False)
self.spin_width.set_sensitive(False)
self.spin_font_size.set_sensitive(False)
self.spin_corner.set_sensitive(False)
self.spin_border.set_sensitive(False)
self.spin_top_pad.set_sensitive(False)
self.spin_bottom_pad.set_sensitive(False)
self.spin_side_pad.set_sensitive(False)
def switch_caption_clicked(self, widget, gparam):
if widget.get_active():
self.spin_text_r.set_sensitive(True)
self.spin_text_g.set_sensitive(True)
self.spin_text_b.set_sensitive(True)
self.spin_text_a.set_sensitive(True)
self.spin_bg_r.set_sensitive(True)
self.spin_bg_g.set_sensitive(True)
self.spin_bg_b.set_sensitive(True)
self.spin_bg_a.set_sensitive(True)
self.combo_position.set_sensitive(True)
self.spin_width.set_sensitive(True)
self.spin_font_size.set_sensitive(True)
self.spin_corner.set_sensitive(True)
self.spin_border.set_sensitive(True)
self.spin_top_pad.set_sensitive(True)
self.spin_bottom_pad.set_sensitive(True)
self.spin_side_pad.set_sensitive(True)
else:
self.spin_text_r.set_sensitive(False)
self.spin_text_g.set_sensitive(False)
self.spin_text_b.set_sensitive(False)
self.spin_text_a.set_sensitive(False)
self.spin_bg_r.set_sensitive(False)
self.spin_bg_g.set_sensitive(False)
self.spin_bg_b.set_sensitive(False)
self.spin_bg_a.set_sensitive(False)
self.combo_position.set_sensitive(False)
self.spin_width.set_sensitive(False)
self.spin_font_size.set_sensitive(False)
self.spin_corner.set_sensitive(False)
self.spin_border.set_sensitive(False)
self.spin_top_pad.set_sensitive(False)
self.spin_bottom_pad.set_sensitive(False)
self.spin_side_pad.set_sensitive(False)
def button_ok_clicked(self, widget):
self.save_config()
self.run_prog()
self.destroy()
def button_cancel_clicked(self, widget):
self.destroy()
def button_apply_clicked(self, widget):
self.save_config()
self.run_prog()
win = MyWindow()
win.connect("destroy", Gtk.main_quit)
win.show_all()
Gtk.main()
# -)
| [
"logging.debug",
"gi.repository.Gtk.Grid",
"gi.repository.Gtk.Button",
"gi.repository.Gtk.Adjustment",
"gi.repository.Gtk.main",
"os.path.exists",
"gi.repository.Gtk.Stack",
"gi.repository.Gtk.SpinButton",
"subprocess.Popen",
"gi.repository.Gtk.Window.__init__",
"os.path.expanduser",
"gi.requi... | [((607, 639), 'gi.require_version', 'gi.require_version', (['"""Gtk"""', '"""3.0"""'], {}), "('Gtk', '3.0')\n", (625, 639), False, 'import gi\n'), ((778, 801), 'os.path.expanduser', 'os.path.expanduser', (['"""~"""'], {}), "('~')\n", (796, 801), False, 'import os\n'), ((812, 855), 'os.path.join', 'os.path.join', (['home_dir', "('.' + str_prog_name)"], {}), "(home_dir, '.' + str_prog_name)\n", (824, 855), False, 'import os\n'), ((868, 914), 'os.path.join', 'os.path.join', (['pic_dir', "(str_prog_name + '.conf')"], {}), "(pic_dir, str_prog_name + '.conf')\n", (880, 914), False, 'import os\n'), ((948, 993), 'os.path.join', 'os.path.join', (['pic_dir', "(str_prog_name + '.log')"], {}), "(pic_dir, str_prog_name + '.log')\n", (960, 993), False, 'import os\n'), ((1012, 1112), 'logging.basicConfig', 'logging.basicConfig', ([], {'filename': 'log_file', 'level': 'logging.DEBUG', 'format': '"""%(asctime)s - %(message)s"""'}), "(filename=log_file, level=logging.DEBUG, format=\n '%(asctime)s - %(message)s')\n", (1031, 1112), False, 'import logging\n'), ((31270, 31280), 'gi.repository.Gtk.main', 'Gtk.main', ([], {}), '()\n', (31278, 31280), False, 'from gi.repository import Gtk\n'), ((4343, 4385), 'gi.repository.Gtk.Window.__init__', 'Gtk.Window.__init__', (['self'], {'title': 'str_title'}), '(self, title=str_title)\n', (4362, 4385), False, 'from gi.repository import Gtk\n'), ((4722, 4733), 'gi.repository.Gtk.Stack', 'Gtk.Stack', ([], {}), '()\n', (4731, 4733), False, 'from gi.repository import Gtk\n'), ((4871, 4890), 'gi.repository.Gtk.StackSwitcher', 'Gtk.StackSwitcher', ([], {}), '()\n', (4888, 4890), False, 'from gi.repository import Gtk\n'), ((5205, 5252), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL'}), '(orientation=Gtk.Orientation.HORIZONTAL)\n', (5212, 5252), False, 'from gi.repository import Gtk\n'), ((5409, 5419), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ([], {}), '()\n', (5417, 5419), False, 'from gi.repository import Gtk\n'), ((5563, 5597), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_enabled'}), '(label=str_label_enabled)\n', (5572, 5597), False, 'from gi.repository import Gtk\n'), ((5726, 5738), 'gi.repository.Gtk.Switch', 'Gtk.Switch', ([], {}), '()\n', (5736, 5738), False, 'from gi.repository import Gtk\n'), ((5928, 5975), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL'}), '(orientation=Gtk.Orientation.HORIZONTAL)\n', (5935, 5975), False, 'from gi.repository import Gtk\n'), ((6148, 6180), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_delay'}), '(label=str_label_delay)\n', (6157, 6180), False, 'from gi.repository import Gtk\n'), ((6343, 6388), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(60.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 60.0, 1.0, 5.0, 0.0)\n', (6357, 6388), False, 'from gi.repository import Gtk\n'), ((6521, 6571), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_delay', 'hexpand': '(True)'}), '(adjustment=adj_delay, hexpand=True)\n', (6535, 6571), False, 'from gi.repository import Gtk\n'), ((6789, 6823), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_caption'}), '(label=str_label_caption)\n', (6798, 6823), False, 'from gi.repository import Gtk\n'), ((6952, 6964), 'gi.repository.Gtk.Switch', 'Gtk.Switch', ([], {}), '()\n', (6962, 6964), False, 'from gi.repository import Gtk\n'), ((7154, 7201), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL'}), '(orientation=Gtk.Orientation.HORIZONTAL)\n', (7161, 7201), False, 'from gi.repository import Gtk\n'), ((7355, 7390), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_position'}), '(label=str_label_position)\n', (7364, 7390), False, 'from gi.repository import Gtk\n'), ((7587, 7605), 'gi.repository.Gtk.ComboBoxText', 'Gtk.ComboBoxText', ([], {}), '()\n', (7603, 7605), False, 'from gi.repository import Gtk\n'), ((8048, 8058), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ([], {}), '()\n', (8056, 8058), False, 'from gi.repository import Gtk\n'), ((8164, 8175), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {}), '()\n', (8173, 8175), False, 'from gi.repository import Gtk\n'), ((8242, 8258), 'gi.repository.Gtk.HSeparator', 'Gtk.HSeparator', ([], {}), '()\n', (8256, 8258), False, 'from gi.repository import Gtk\n'), ((8499, 8544), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.VERTICAL'}), '(orientation=Gtk.Orientation.VERTICAL)\n', (8506, 8544), False, 'from gi.repository import Gtk\n'), ((8784, 8842), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL', 'spacing': '(5)'}), '(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)\n', (8791, 8842), False, 'from gi.repository import Gtk\n'), ((9099, 9132), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_text_r'}), '(label=str_label_text_r)\n', (9108, 9132), False, 'from gi.repository import Gtk\n'), ((9249, 9295), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (9263, 9295), False, 'from gi.repository import Gtk\n'), ((9429, 9480), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_text_r', 'hexpand': '(True)'}), '(adjustment=adj_text_r, hexpand=True)\n', (9443, 9480), False, 'from gi.repository import Gtk\n'), ((9667, 9700), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_text_g'}), '(label=str_label_text_g)\n', (9676, 9700), False, 'from gi.repository import Gtk\n'), ((9817, 9863), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (9831, 9863), False, 'from gi.repository import Gtk\n'), ((9997, 10048), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_text_g', 'hexpand': '(True)'}), '(adjustment=adj_text_g, hexpand=True)\n', (10011, 10048), False, 'from gi.repository import Gtk\n'), ((10235, 10268), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_text_b'}), '(label=str_label_text_b)\n', (10244, 10268), False, 'from gi.repository import Gtk\n'), ((10385, 10431), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (10399, 10431), False, 'from gi.repository import Gtk\n'), ((10565, 10616), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_text_b', 'hexpand': '(True)'}), '(adjustment=adj_text_b, hexpand=True)\n', (10579, 10616), False, 'from gi.repository import Gtk\n'), ((10803, 10836), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_text_a'}), '(label=str_label_text_a)\n', (10812, 10836), False, 'from gi.repository import Gtk\n'), ((10953, 10999), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(100.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 100.0, 1.0, 5.0, 0.0)\n', (10967, 10999), False, 'from gi.repository import Gtk\n'), ((11133, 11184), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_text_a', 'hexpand': '(True)'}), '(adjustment=adj_text_a, hexpand=True)\n', (11147, 11184), False, 'from gi.repository import Gtk\n'), ((11367, 11378), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {}), '()\n', (11376, 11378), False, 'from gi.repository import Gtk\n'), ((11439, 11455), 'gi.repository.Gtk.HSeparator', 'Gtk.HSeparator', ([], {}), '()\n', (11453, 11455), False, 'from gi.repository import Gtk\n'), ((11694, 11739), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.VERTICAL'}), '(orientation=Gtk.Orientation.VERTICAL)\n', (11701, 11739), False, 'from gi.repository import Gtk\n'), ((11973, 12031), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL', 'spacing': '(5)'}), '(orientation=Gtk.Orientation.HORIZONTAL, spacing=5)\n', (11980, 12031), False, 'from gi.repository import Gtk\n'), ((12276, 12307), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_bg_r'}), '(label=str_label_bg_r)\n', (12285, 12307), False, 'from gi.repository import Gtk\n'), ((12418, 12464), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (12432, 12464), False, 'from gi.repository import Gtk\n'), ((12596, 12645), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_bg_r', 'hexpand': '(True)'}), '(adjustment=adj_bg_r, hexpand=True)\n', (12610, 12645), False, 'from gi.repository import Gtk\n'), ((12822, 12853), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_bg_g'}), '(label=str_label_bg_g)\n', (12831, 12853), False, 'from gi.repository import Gtk\n'), ((12964, 13010), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (12978, 13010), False, 'from gi.repository import Gtk\n'), ((13142, 13191), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_bg_g', 'hexpand': '(True)'}), '(adjustment=adj_bg_g, hexpand=True)\n', (13156, 13191), False, 'from gi.repository import Gtk\n'), ((13368, 13399), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_bg_b'}), '(label=str_label_bg_b)\n', (13377, 13399), False, 'from gi.repository import Gtk\n'), ((13510, 13556), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(255.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 255.0, 1.0, 5.0, 0.0)\n', (13524, 13556), False, 'from gi.repository import Gtk\n'), ((13688, 13737), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_bg_b', 'hexpand': '(True)'}), '(adjustment=adj_bg_b, hexpand=True)\n', (13702, 13737), False, 'from gi.repository import Gtk\n'), ((13914, 13945), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_bg_a'}), '(label=str_label_bg_a)\n', (13923, 13945), False, 'from gi.repository import Gtk\n'), ((14056, 14102), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(100.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 100.0, 1.0, 5.0, 0.0)\n', (14070, 14102), False, 'from gi.repository import Gtk\n'), ((14234, 14283), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_bg_a', 'hexpand': '(True)'}), '(adjustment=adj_bg_a, hexpand=True)\n', (14248, 14283), False, 'from gi.repository import Gtk\n'), ((14652, 14662), 'gi.repository.Gtk.Grid', 'Gtk.Grid', ([], {}), '()\n', (14660, 14662), False, 'from gi.repository import Gtk\n'), ((14843, 14875), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_width'}), '(label=str_label_width)\n', (14852, 14875), False, 'from gi.repository import Gtk\n'), ((14988, 15035), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(1000.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 1000.0, 1.0, 5.0, 0.0)\n', (15002, 15035), False, 'from gi.repository import Gtk\n'), ((15168, 15218), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_width', 'hexpand': '(True)'}), '(adjustment=adj_width, hexpand=True)\n', (15182, 15218), False, 'from gi.repository import Gtk\n'), ((15403, 15439), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_font_size'}), '(label=str_label_font_size)\n', (15412, 15439), False, 'from gi.repository import Gtk\n'), ((15564, 15609), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(50.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 50.0, 1.0, 5.0, 0.0)\n', (15578, 15609), False, 'from gi.repository import Gtk\n'), ((15746, 15800), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_font_size', 'hexpand': '(True)'}), '(adjustment=adj_font_size, hexpand=True)\n', (15760, 15800), False, 'from gi.repository import Gtk\n'), ((16014, 16047), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_corner'}), '(label=str_label_corner)\n', (16023, 16047), False, 'from gi.repository import Gtk\n'), ((16163, 16208), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(50.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 50.0, 1.0, 5.0, 0.0)\n', (16177, 16208), False, 'from gi.repository import Gtk\n'), ((16342, 16393), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_corner', 'hexpand': '(True)'}), '(adjustment=adj_corner, hexpand=True)\n', (16356, 16393), False, 'from gi.repository import Gtk\n'), ((16579, 16612), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_border'}), '(label=str_label_border)\n', (16588, 16612), False, 'from gi.repository import Gtk\n'), ((16728, 16773), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(50.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 50.0, 1.0, 5.0, 0.0)\n', (16742, 16773), False, 'from gi.repository import Gtk\n'), ((16907, 16958), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_border', 'hexpand': '(True)'}), '(adjustment=adj_border, hexpand=True)\n', (16921, 16958), False, 'from gi.repository import Gtk\n'), ((17145, 17179), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_top_pad'}), '(label=str_label_top_pad)\n', (17154, 17179), False, 'from gi.repository import Gtk\n'), ((17298, 17344), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(100.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 100.0, 1.0, 5.0, 0.0)\n', (17312, 17344), False, 'from gi.repository import Gtk\n'), ((17479, 17531), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_top_pad', 'hexpand': '(True)'}), '(adjustment=adj_top_pad, hexpand=True)\n', (17493, 17531), False, 'from gi.repository import Gtk\n'), ((17725, 17762), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_bottom_pad'}), '(label=str_label_bottom_pad)\n', (17734, 17762), False, 'from gi.repository import Gtk\n'), ((17890, 17936), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(100.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 100.0, 1.0, 5.0, 0.0)\n', (17904, 17936), False, 'from gi.repository import Gtk\n'), ((18074, 18129), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_bottom_pad', 'hexpand': '(True)'}), '(adjustment=adj_bottom_pad, hexpand=True)\n', (18088, 18129), False, 'from gi.repository import Gtk\n'), ((18349, 18384), 'gi.repository.Gtk.Label', 'Gtk.Label', ([], {'label': 'str_label_side_pad'}), '(label=str_label_side_pad)\n', (18358, 18384), False, 'from gi.repository import Gtk\n'), ((18506, 18552), 'gi.repository.Gtk.Adjustment', 'Gtk.Adjustment', (['(0.0)', '(0.0)', '(100.0)', '(1.0)', '(5.0)', '(0.0)'], {}), '(0.0, 0.0, 100.0, 1.0, 5.0, 0.0)\n', (18520, 18552), False, 'from gi.repository import Gtk\n'), ((18688, 18741), 'gi.repository.Gtk.SpinButton', 'Gtk.SpinButton', ([], {'adjustment': 'adj_side_pad', 'hexpand': '(True)'}), '(adjustment=adj_side_pad, hexpand=True)\n', (18702, 18741), False, 'from gi.repository import Gtk\n'), ((19112, 19171), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.HORIZONTAL', 'spacing': '(20)'}), '(orientation=Gtk.Orientation.HORIZONTAL, spacing=20)\n', (19119, 19171), False, 'from gi.repository import Gtk\n'), ((19238, 19269), 'gi.repository.Gtk.Button', 'Gtk.Button', ([], {'label': 'str_button_ok'}), '(label=str_button_ok)\n', (19248, 19269), False, 'from gi.repository import Gtk\n'), ((19414, 19449), 'gi.repository.Gtk.Button', 'Gtk.Button', ([], {'label': 'str_button_cancel'}), '(label=str_button_cancel)\n', (19424, 19449), False, 'from gi.repository import Gtk\n'), ((19605, 19639), 'gi.repository.Gtk.Button', 'Gtk.Button', ([], {'label': 'str_button_apply'}), '(label=str_button_apply)\n', (19615, 19639), False, 'from gi.repository import Gtk\n'), ((19906, 19963), 'gi.repository.Gtk.Box', 'Gtk.Box', ([], {'orientation': 'Gtk.Orientation.VERTICAL', 'spacing': '(50)'}), '(orientation=Gtk.Orientation.VERTICAL, spacing=50)\n', (19913, 19963), False, 'from gi.repository import Gtk\n'), ((21844, 21869), 'os.path.exists', 'os.path.exists', (['conf_file'], {}), '(conf_file)\n', (21858, 21869), False, 'import os\n'), ((27878, 27898), 'logging.debug', 'logging.debug', (['"""GUI"""'], {}), "('GUI')\n", (27891, 27898), False, 'import logging\n'), ((28021, 28048), 'subprocess.Popen', 'subprocess.Popen', (['cmd_array'], {}), '(cmd_array)\n', (28037, 28048), False, 'import subprocess\n')] |
import vtk
class Scene(object):
def __init__(self):
self.sceneSources = list()
self.sceneMappers = list()
self.sceneActors = list()
self.sceneLights = list()
self.sceneSources.append(vtk.vtkCubeSource())
self.sceneSources[-1].SetXLength(50000)
self.sceneSources[-1].SetYLength(50000)
self.sceneSources[-1].SetZLength(5)
# self.sceneMappers.append(vtk.vtkPolyDataMapper())
# self.sceneMappers[-1].SetInputConnection(self.sceneSources[-1].GetOutputPort())
reader = vtk.vtkJPEGReader()
reader.SetFileName("blackandwhite.jpg")
# reader.SetFileName("white.jpg")
# Create texture object
texture = vtk.vtkTexture()
texture.SetInputConnection(reader.GetOutputPort())
texture.RepeatOn()
#Map texture coordinates
map_to_plane = vtk.vtkTextureMapToPlane()
map_to_plane.SetInputConnection(self.sceneSources[-1].GetOutputPort())
# Create mapper and set the mapped texture as input
mapperplane = vtk.vtkPolyDataMapper()
mapperplane.SetInputConnection(map_to_plane.GetOutputPort())
self.sceneActors.append(vtk.vtkActor())
self.sceneActors[-1].RotateX(90)
self.sceneActors[-1].SetPosition(1300,-800,2500) # -1200
self.sceneActors[-1].SetMapper(mapperplane)
self.sceneActors[-1].SetTexture(texture)
# self.sceneActors[-1].GetProperty().SetColor(1,1,1)
self.addLight(1.0, 1.0, 1.0, 1000, 1000, -1000, 0.75, 180, 0.75)
self.addLight(1.0, 1.0, 1.0, -1000, 500, 1000, 0.5, 180, 0.0)
self.addLight(1.0, 1.0, 1.0, -1000, 500,- 1000, 0.5, 180, 0.0)
def addLight(self, cR, cG, cB, pX, pY, pZ, Intensity, ConeAngle, Attenuation):
self.sceneLights.append(vtk.vtkLight())
self.sceneLights[-1].SetColor(cR, cG, cB)
self.sceneLights[-1].SetPosition(pX, pY, pZ)
self.sceneLights[-1].SetIntensity(Intensity)
self.sceneLights[-1].SetConeAngle(ConeAngle)
self.sceneLights[-1].SetShadowAttenuation(Attenuation)
self.sceneLights[-1].SetLightTypeToSceneLight()
| [
"vtk.vtkJPEGReader",
"vtk.vtkTexture",
"vtk.vtkPolyDataMapper",
"vtk.vtkActor",
"vtk.vtkLight",
"vtk.vtkCubeSource",
"vtk.vtkTextureMapToPlane"
] | [((561, 580), 'vtk.vtkJPEGReader', 'vtk.vtkJPEGReader', ([], {}), '()\n', (578, 580), False, 'import vtk\n'), ((722, 738), 'vtk.vtkTexture', 'vtk.vtkTexture', ([], {}), '()\n', (736, 738), False, 'import vtk\n'), ((882, 908), 'vtk.vtkTextureMapToPlane', 'vtk.vtkTextureMapToPlane', ([], {}), '()\n', (906, 908), False, 'import vtk\n'), ((1071, 1094), 'vtk.vtkPolyDataMapper', 'vtk.vtkPolyDataMapper', ([], {}), '()\n', (1092, 1094), False, 'import vtk\n'), ((230, 249), 'vtk.vtkCubeSource', 'vtk.vtkCubeSource', ([], {}), '()\n', (247, 249), False, 'import vtk\n'), ((1197, 1211), 'vtk.vtkActor', 'vtk.vtkActor', ([], {}), '()\n', (1209, 1211), False, 'import vtk\n'), ((1812, 1826), 'vtk.vtkLight', 'vtk.vtkLight', ([], {}), '()\n', (1824, 1826), False, 'import vtk\n')] |
import sys
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import random
from cobras_ts.querier import Querier
from IPython import display
def _query_yes_no(question, default="yes"):
"""Ask a yes/no question via raw_input() and return their answer.
"question" is a string that is presented to the user.
"default" is the presumed answer if the user just hits <Enter>.
It must be "yes" (the default), "no" or None (meaning
an answer is required of the user).
The "answer" return value is True for "yes" or False for "no".
Taken from: http://code.activestate.com/recipes/577058/
"""
valid = {"yes": True, "y": True, "ye": True,
"no": False, "n": False}
if default is None:
prompt = " [y/n] "
elif default == "yes":
prompt = " [Y/n] "
elif default == "no":
prompt = " [y/N] "
else:
raise ValueError("invalid default answer: '%s'" % default)
while True:
sys.stdout.write(question + prompt)
choice = input().lower()
if default is not None and choice == '':
return valid[default]
elif choice in valid:
return valid[choice]
else:
sys.stdout.write("Please respond with 'yes' or 'no' "
"(or 'y' or 'n').\n")
class NotebookQuerierImages(Querier):
def __init__(self, fns):
super(NotebookQuerierImages, self).__init__()
self.fns = fns
plt.figure(figsize=(20,20))
def query_points(self, idx1, idx2):
plt.clf()
plt.subplot(1,2,1)
print(idx1)
img = mpimg.imread(self.fns[idx1])
imgplot = plt.imshow(img)
plt.subplot(1,2,2)
img = mpimg.imread(self.fns[idx2])
imgplot = plt.imshow(img)
display.clear_output(wait=True)
display.display(plt.gcf())
return _query_yes_no(
"Should the following instances be in the same cluster? " + str(idx1) + " and " + str(idx2))
def update_clustering(self, clustering):
plt.clf()
plt.subplots_adjust(wspace=0.2, hspace=0.0)
n_clusters = len(clustering.clusters)
for cluster_idx, cluster in enumerate(clustering.clusters):
idxs = cluster.get_all_points()
n_to_plot = min(5, len(idxs))
random_selection = random.sample(idxs, n_to_plot)
for idx, pt_idx in enumerate(random_selection):
plt.subplot(len(clustering.clusters),5,cluster_idx * 5 + idx+1)
img = mpimg.imread(self.fns[pt_idx])
imgplot = plt.imshow(img)
#plt.subplot(1,n_clusters,cluster_idx+1)
#plt.plot(self.fns[clusterid, :], alpha=0.5)
display.clear_output(wait=True)
display.display(plt.gcf())
return _query_yes_no("Continue querying?") | [
"matplotlib.pyplot.imshow",
"random.sample",
"matplotlib.pyplot.gcf",
"matplotlib.image.imread",
"matplotlib.pyplot.clf",
"IPython.display.clear_output",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.subplots_adjust",
"sys.stdout.write"
] | [((990, 1025), 'sys.stdout.write', 'sys.stdout.write', (['(question + prompt)'], {}), '(question + prompt)\n', (1006, 1025), False, 'import sys\n'), ((1490, 1518), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 20)'}), '(figsize=(20, 20))\n', (1500, 1518), True, 'import matplotlib.pyplot as plt\n'), ((1567, 1576), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (1574, 1576), True, 'import matplotlib.pyplot as plt\n'), ((1585, 1605), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (1596, 1605), True, 'import matplotlib.pyplot as plt\n'), ((1638, 1666), 'matplotlib.image.imread', 'mpimg.imread', (['self.fns[idx1]'], {}), '(self.fns[idx1])\n', (1650, 1666), True, 'import matplotlib.image as mpimg\n'), ((1685, 1700), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1695, 1700), True, 'import matplotlib.pyplot as plt\n'), ((1709, 1729), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (1720, 1729), True, 'import matplotlib.pyplot as plt\n'), ((1742, 1770), 'matplotlib.image.imread', 'mpimg.imread', (['self.fns[idx2]'], {}), '(self.fns[idx2])\n', (1754, 1770), True, 'import matplotlib.image as mpimg\n'), ((1789, 1804), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (1799, 1804), True, 'import matplotlib.pyplot as plt\n'), ((1813, 1844), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (1833, 1844), False, 'from IPython import display\n'), ((2071, 2080), 'matplotlib.pyplot.clf', 'plt.clf', ([], {}), '()\n', (2078, 2080), True, 'import matplotlib.pyplot as plt\n'), ((2089, 2132), 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'wspace': '(0.2)', 'hspace': '(0.0)'}), '(wspace=0.2, hspace=0.0)\n', (2108, 2132), True, 'import matplotlib.pyplot as plt\n'), ((2757, 2788), 'IPython.display.clear_output', 'display.clear_output', ([], {'wait': '(True)'}), '(wait=True)\n', (2777, 2788), False, 'from IPython import display\n'), ((1869, 1878), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (1876, 1878), True, 'import matplotlib.pyplot as plt\n'), ((2364, 2394), 'random.sample', 'random.sample', (['idxs', 'n_to_plot'], {}), '(idxs, n_to_plot)\n', (2377, 2394), False, 'import random\n'), ((2813, 2822), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (2820, 2822), True, 'import matplotlib.pyplot as plt\n'), ((1231, 1303), 'sys.stdout.write', 'sys.stdout.write', (['"""Please respond with \'yes\' or \'no\' (or \'y\' or \'n\').\n"""'], {}), '("Please respond with \'yes\' or \'no\' (or \'y\' or \'n\').\\n")\n', (1247, 1303), False, 'import sys\n'), ((2558, 2588), 'matplotlib.image.imread', 'mpimg.imread', (['self.fns[pt_idx]'], {}), '(self.fns[pt_idx])\n', (2570, 2588), True, 'import matplotlib.image as mpimg\n'), ((2615, 2630), 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {}), '(img)\n', (2625, 2630), True, 'import matplotlib.pyplot as plt\n')] |
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from dataclasses import dataclass
from argparse import ArgumentParser
from tqdm import tqdm
from torch.utils.data import DataLoader
from data import VCTKAudio
from model import WaveNet
def set_option():
parser = ArgumentParser()
parser.add_argument('--DEVICE', default='cuda', type=str)
parser.add_argument('--epoch', default=20, type=int)
parser.add_argument('--lr', default=1e-3, type=float)
parser.add_argument('--batch_sz', default=32, type=int)
parser.add_argument('--num_class', default=256, type=int)
parser.add_argument('--clip', default=1.0, type=float)
parser.add_argument('--max_itr', default=10_000, type=int)
parser.add_argument('--src_len', default=1024 + 64, type=int)
parser.add_argument('--tgt_len', default=64, type=int)
parser.add_argument('--num_block', default=4, type=int)
parser.add_argument('--num_layer', default=10, type=int)
parser.add_argument('--residual_dim', default=32, type=int)
parser.add_argument('--dilation_dim', default=128, type=int)
parser.add_argument('--skip_dim', default=256, type=int)
parser.add_argument('--kernel_size', default=2, type=int)
parser.add_argument('--bias', default=False, type=bool)
parser.add_argument('--loss_update_itr', default=100, type=int)
parser.add_argument('--ckpt_dir', default='ckpt', type=str)
parser.add_argument('--ckpt_name', default='', type=str)
parser.add_argument('--dataset_path', default='dataset.npz', type=str)
return parser.parse_args()
def save_ckpt(ckpt_path, model, optimizer, misc=None):
is_train = model.training
device = next(model.parameters()).device
# eval mode and cpu
model.eval()
model.cpu()
# save checkpoint
torch.save({
'model_state_dict': model.state_dict(),
'optimizer_state_dict': optimizer.state_dict(),
'last_epoch': misc['epoch'],
'losses': misc['losses'],
}, ckpt_path)
# recover mode and device
if is_train:
model.train()
model.to(device)
if __name__ == '__main__':
opt = set_option()
os.makedirs(opt.ckpt_dir, exist_ok=True)
# prepare dataset and dataloader
dataset = VCTKAudio(opt.dataset_path, opt.src_len, opt.tgt_len, opt.num_class) # TODO: give parameter accordingly
dataloader = DataLoader(dataset,
batch_size=opt.batch_sz,
shuffle=True,
num_workers=2)
pbar = tqdm(range(opt.epoch * min(opt.max_itr, len(dataloader))))
# prepare model
model = WaveNet(
num_block = opt.num_block,
num_layer = opt.num_layer, # 10,
class_dim = opt.num_class,
residual_dim = opt.residual_dim,
dilation_dim = opt.dilation_dim,
skip_dim = opt.skip_dim,
kernel_size = opt.kernel_size,
bias=opt.bias
)
# prepare optimizer
loss_fn = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=opt.lr)
losses = []
last_epoch = 0
# load from checkpoint
if opt.ckpt_name:
ckpt = torch.load(os.path.join(opt.ckpt_dir, opt.ckpt_name))
model.load_state_dict(ckpt['model_state_dict'])
optimizer.load_state_dict(ckpt['optimizer_state_dict'])
last_epoch = ckpt['last_epoch']
losses = ckpt['losses']
# load model to device
model.train()
model.to(opt.DEVICE)
# train
for e in range(last_epoch, opt.epoch):
accum_loss = 0
for idx, batch in enumerate(dataloader):
src, tgt = batch['src'].to(opt.DEVICE), batch['tgt'].to(opt.DEVICE)
pred = model(src)[:, :, -opt.tgt_len: ]
loss = loss_fn(pred, tgt)
optimizer.zero_grad()
loss.backward()
torch.nn.utils.clip_grad_norm_(model.parameters(), opt.clip)
optimizer.step()
accum_loss += loss.item()
pbar.update()
if (idx + 1) % opt.loss_update_itr == 0:
avg_loss = accum_loss / opt.loss_update_itr
pbar.set_description(f"Epoch {round(e + idx / min(opt.max_itr, len(dataloader)), 2)} | Loss: {round(avg_loss, 5)}")
losses.append(avg_loss)
accum_loss = 0
if idx + 1 == opt.max_itr:
break
# save checkpoint
save_ckpt(os.path.join(opt.ckpt_dir, str(e) + '.pt'),
model,
optimizer,
{"epoch": e + 1,
"losses": losses})
| [
"data.VCTKAudio",
"os.makedirs",
"argparse.ArgumentParser",
"torch.nn.CrossEntropyLoss",
"os.path.join",
"torch.utils.data.DataLoader",
"model.WaveNet"
] | [((324, 340), 'argparse.ArgumentParser', 'ArgumentParser', ([], {}), '()\n', (338, 340), False, 'from argparse import ArgumentParser\n'), ((2195, 2235), 'os.makedirs', 'os.makedirs', (['opt.ckpt_dir'], {'exist_ok': '(True)'}), '(opt.ckpt_dir, exist_ok=True)\n', (2206, 2235), False, 'import os\n'), ((2288, 2356), 'data.VCTKAudio', 'VCTKAudio', (['opt.dataset_path', 'opt.src_len', 'opt.tgt_len', 'opt.num_class'], {}), '(opt.dataset_path, opt.src_len, opt.tgt_len, opt.num_class)\n', (2297, 2356), False, 'from data import VCTKAudio\n'), ((2412, 2485), 'torch.utils.data.DataLoader', 'DataLoader', (['dataset'], {'batch_size': 'opt.batch_sz', 'shuffle': '(True)', 'num_workers': '(2)'}), '(dataset, batch_size=opt.batch_sz, shuffle=True, num_workers=2)\n', (2422, 2485), False, 'from torch.utils.data import DataLoader\n'), ((2676, 2896), 'model.WaveNet', 'WaveNet', ([], {'num_block': 'opt.num_block', 'num_layer': 'opt.num_layer', 'class_dim': 'opt.num_class', 'residual_dim': 'opt.residual_dim', 'dilation_dim': 'opt.dilation_dim', 'skip_dim': 'opt.skip_dim', 'kernel_size': 'opt.kernel_size', 'bias': 'opt.bias'}), '(num_block=opt.num_block, num_layer=opt.num_layer, class_dim=opt.\n num_class, residual_dim=opt.residual_dim, dilation_dim=opt.dilation_dim,\n skip_dim=opt.skip_dim, kernel_size=opt.kernel_size, bias=opt.bias)\n', (2683, 2896), False, 'from model import WaveNet\n'), ((3023, 3044), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (3042, 3044), True, 'import torch.nn as nn\n'), ((3215, 3256), 'os.path.join', 'os.path.join', (['opt.ckpt_dir', 'opt.ckpt_name'], {}), '(opt.ckpt_dir, opt.ckpt_name)\n', (3227, 3256), False, 'import os\n')] |
import os
def remove_comments_and_crlf(inp_path, comment_string=';', overwrite=False):
tmpfilename = os.path.splitext(os.path.basename(inp_path))[0] + '_mod.inp'
tmpfilepath = os.path.join(os.path.dirname(inp_path), tmpfilename)
with open (inp_path) as oldf:
with open(tmpfilepath, 'w') as newf:
for line in oldf:
if ';' in line:
#remove the comments
if line.strip()[0] == comment_string:
#skip the whole line
pass
else:
#write the line to the left of the comment
non_comment_line = line.split(';')[0]
newf.write(non_comment_line + '\n')
elif line == '\n':
pass
else:
newf.write(line)
if overwrite:
os.remove(inp_path)
os.rename(tmpfilepath, inp_path)
def line_by_line(path1, path2, outfile):
"""
given paths to two INP files, return a text file showing where differences
occur in line-by-line fashion. If the order of elements do not match, this
will be recorded as a difference.
ignores any spaces in a file such that lines with more or less white space
having the same non-whitespace will be considered equal.
"""
#outfile =r"P:\06_Tools\v_control\Testing\cleaned\linebyline.txt"
with open(outfile, 'w') as diff_file:
with open (path1) as f1:
with open(path2) as f2:
line1 = next(f1)
line2 = next(f2)
while line1 and line2:
#replace all white space to check only actual content
if line1.replace(" ", "") != line2.replace(" ", ""):
diff_file.write(line1)
line1 = next(f1)
line2 = next(f2)
| [
"os.path.dirname",
"os.rename",
"os.path.basename",
"os.remove"
] | [((199, 224), 'os.path.dirname', 'os.path.dirname', (['inp_path'], {}), '(inp_path)\n', (214, 224), False, 'import os\n'), ((916, 935), 'os.remove', 'os.remove', (['inp_path'], {}), '(inp_path)\n', (925, 935), False, 'import os\n'), ((944, 976), 'os.rename', 'os.rename', (['tmpfilepath', 'inp_path'], {}), '(tmpfilepath, inp_path)\n', (953, 976), False, 'import os\n'), ((124, 150), 'os.path.basename', 'os.path.basename', (['inp_path'], {}), '(inp_path)\n', (140, 150), False, 'import os\n')] |
import re
import pprint
pp = pprint.PrettyPrinter(indent=4)
from sys import version_info # py3, for checking type of input
def combine_messages(messages):
""" Combines messages that have one or more integers in them, such as
"trial001" "trial002", into a single message like "trial# (#=1-2)".
This is to reduce the number of messages required to be displayed.
Operates by creating the following structure, named "ti" for "template info":
{
't2tn': {} - maps each template (containing "#") to a template number (tn)
'tn2t': [] - list of templates, indexed by the template number
'm2tns': {} - maps each message number (index in messages) to
array of template numbers (tns)
'tn2dm': {} - maps each template number to a dictionary that has as keys the digits
used to make the template, and with value the message number used to make the template
with those digits. i.e.:
{ tn1: {d1: m1, d2: m2}, tn2: {d3: m3, d4: m4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
'tn2md': {} - maps each template number of a dictionary that has keys the message number
and value the digits used to make the message. These reverse the key-values in 'tn2dm', e.g.:
{ tn1: {m1: d1, m2: d2}, tn2: {m3: d3, m4: d4}, tn2: { ...}}
where:
tn - template number
d: m - digits used to make template from message number m
This array is used to dynamically remove entries in 'tn2dm' as each message in a
template is displayed so that structure always has an accurate list of remaining messages.
'mout': [] - messages to display (output), formed by combining messages
'mfin': [] - set of message numbers "finished" (already included in mout).
}
This function works by first creating everything except mout and mfin, then
going through each message, finding the template numbers that have the most
digits, and using those to make the combined message.
"""
ti = {}
ti['t2tn'] = {}
ti['tn2t'] = []
ti['m2tns'] = {}
ti['tn2dm'] = {}
ti['tn2md'] = {}
# debug_msg = "/acquisition/timeseries/fov_15002_17/data"
# debug_mn = -1
for mn in range(len(messages)):
msg = messages[mn]
if version_info[0] > 2:
assert isinstance(msg, str), "in Python 3, messages must be str (unicode) type"
# if msg.startswith(debug_msg):
# debug_mn = mn
found_nums = re.findall("\d+", msg)
if not found_nums:
# no numbers found, don't process
continue
# remove any duplicates
found_nums = list(set(found_nums))
for digits in found_nums:
pattern = "(?<!\d)%s(?!\d)" % digits # substitute only if digits not surrounded by other digits
template = re.sub(pattern, "#", msg) # make template for this message and digits
if template not in ti['t2tn']:
tn = len(ti['tn2t']) # template number
ti['tn2t'].append(template) # add template to list of templates
ti['t2tn'][template] = tn # add entry to map of template to template number
else:
tn = ti['t2tn'][template]
# save template number (tn) in 'm2tns'
if mn not in ti['m2tns']:
ti['m2tns'][mn] = [tn,]
else:
ti['m2tns'][mn].append(tn)
# save template number, digits and message number in 'tn2dm'
idigits = int(digits)
if tn not in ti['tn2dm']:
ti['tn2dm'][tn] = {idigits: mn}
ti['tn2md'][tn] = {mn: idigits}
else:
if digits in ti['tn2dm'][tn]:
print ("duplicate message found: %s" % msg)
break
ti['tn2dm'][tn][idigits] = mn
ti['tn2md'][tn][mn] = idigits
# done building needed structures. Now generate 'output' (i.e. ti['mfin'] and ti['mout']
ti['mout'] = []
ti['mfin'] = set([])
for mn in range(len(messages)):
# if mn == debug_mn:
# print ("found mn %i '%s'" % (debug_mn, debug_msg))
# import pdb; pdb.set_trace()
if mn in ti['mfin']:
# message has already been displayed (using a template)
continue
if mn not in ti['m2tns']:
# no digits found in this message, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# this message has at least one pattern. Find template with largest number of other messages
# that have not been displayed yet
# build list of pairs, (a, b); a - template number, b - number of messages in template
tn_nm_pairs = [ (tn, len(ti['tn2dm'][tn])) for tn in ti['m2tns'][mn] ]
# get those pairs that have the largest number of messages
ltn_nm_pairs = largest_pairs(tn_nm_pairs)
# nmax = 0
# for tn in ti['m2tns'][mn]:
# dm = ti['tn2dm'][tn]
# num_messages = len(ti['tn2dm'][tn]) # num messages associated with this template
# if num_messages > nmax:
# max_tn = [tn]
# nmax = num_messages
# elif num_messages == nmax:
# # multiple templates have the same number of messages, will need to select
# # one in a deterministic way
# max_tn.append(tn)
# # if no other messages use pattern, just display as is
# if nmax == 1:
if ltn_nm_pairs[0][1] == 1:
# only one messages uses pattern, just display as is
ti['mout'].append(messages[mn])
ti['mfin'].add(mn)
continue
# if len(max_tn) > 1:
if len(ltn_nm_pairs) == 1:
# only one template found that has maximal number of messages. use it.
max_tn = ltn_nm_pairs[0][0]
else:
# multiple templates have the same maximal number of messages. Select the one
# with the rightmost position of '#' in the template
# build list of pairs, (a,b): a - template number, b - index of '#' in template
tn_ix_pairs = [ (ltn_nm_pairs[i][0], ti['tn2t'][ltn_nm_pairs[i][0]].index('#'))
for i in range(len(ltn_nm_pairs))]
tn_ix_pairs = largest_pairs(tn_ix_pairs)
if len(tn_ix_pairs) > 1:
# should never happen since templates made for the same message cannot have
# the same position for the '#'
sys.exit("found multiple templates with same maximal number of messages and same template")
# use the template found
max_tn = tn_ix_pairs[0][0]
# other messages use this template. Get list message numbers and digits that share this template
s_digits = list(ti['tn2dm'][max_tn].keys()) # shared digits
s_mns = list(ti['tn2dm'][max_tn].values()) # shared message numbers
# update tn2dm to remove messages that will be displayed shortly (in this template)
for mn in s_mns:
for tn in ti['m2tns'][mn]:
idigit = ti['tn2md'][tn][mn]
del ti['tn2dm'][tn][idigit]
# make new message by combining shared digits with template
template = ti['tn2t'][max_tn]
# convert digits from string to int
# i_digits = sorted([int(i) for i in s_digits])
i_digits = sorted(s_digits)
# make string representing ranges of digits
prevn = i_digits[0] # initialize previous number to first
sr = str(prevn) # string of ranges being generated
in_range = False
for i in range(1, len(i_digits)):
newn = i_digits[i]
if newn == prevn + 1:
# in a range
in_range = True
else:
# not in a range. But if was previously save end of previous range
if in_range:
sr = "%s-%i" % (sr, prevn)
in_range = False
# save new number
sr = "%s,%i" % (sr, newn)
prevn = newn
# append final number if in range
if in_range:
sr = "%s-%i" % (sr, newn)
new_message = template + " (#=%s)" % sr
ti['mout'].append(new_message)
# add all messages that share this template to ti['mfin'] so they are not displayed again
ti['mfin'].update(s_mns)
# return list of combined messages
return ti['mout']
def largest_pairs(pairs):
""""Input is a list of two-element tuples, e.g. [(5, 4), (2, 7), ...]
Output is list of those, which have the largest 2nd element, e.g. [(2,7)]"""
largest = -1
for pair in pairs:
a, b = pair
if b > largest:
largest = b
lpairs = [pair]
elif b == largest:
lpairs.append(pair)
return lpairs
def test_combine_messages():
""" tests combine_messages function"""
messages = [
"some prefix trial-none",
"some prefix trial23",
"some prefix trial23/timestamps",
"some prefix trial23 timestamps",
"some prefix trial23\ntimestamps",
"some prefix 32-bits, trial32",
"some prefix 32-bits, trial33",
"some prefix 32-bits, trial34",
"some prefix 32-bits, trial35",
"some prefix trial-11",
"some prefix trial23 and trial23 again",
"some prefix trial27",
"some prefix trial27/timestamps",
"some prefix trial27 timestamps",
"some prefix trial27\ntimestamps",
"some prefix 32-bits, trial27",
"some prefix trial27 and trial27 again"]
cm = combine_messages(messages)
pp.pprint(cm)
if __name__ == '__main__':
test_combine_messages()
| [
"re.sub",
"re.findall",
"pprint.PrettyPrinter"
] | [((29, 59), 'pprint.PrettyPrinter', 'pprint.PrettyPrinter', ([], {'indent': '(4)'}), '(indent=4)\n', (49, 59), False, 'import pprint\n'), ((2601, 2624), 're.findall', 're.findall', (['"""\\\\d+"""', 'msg'], {}), "('\\\\d+', msg)\n", (2611, 2624), False, 'import re\n'), ((2959, 2984), 're.sub', 're.sub', (['pattern', '"""#"""', 'msg'], {}), "(pattern, '#', msg)\n", (2965, 2984), False, 'import re\n')] |
# Generated by Django 3.0.1 on 2020-01-02 22:21
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='CustomerProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('account_number', models.CharField(max_length=8)),
('userid', models.CharField(max_length=6)),
('account_encrypted', models.BinaryField(max_length=4096)),
],
),
]
| [
"django.db.models.AutoField",
"django.db.models.CharField",
"django.db.models.BinaryField"
] | [((311, 404), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (327, 404), False, 'from django.db import migrations, models\n'), ((438, 468), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(8)'}), '(max_length=8)\n', (454, 468), False, 'from django.db import migrations, models\n'), ((498, 528), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(6)'}), '(max_length=6)\n', (514, 528), False, 'from django.db import migrations, models\n'), ((569, 604), 'django.db.models.BinaryField', 'models.BinaryField', ([], {'max_length': '(4096)'}), '(max_length=4096)\n', (587, 604), False, 'from django.db import migrations, models\n')] |
import logging
logging.basicConfig(level=logging.INFO)
from flask import Flask
from application.config import Config
app = Flask(__name__)
app.config.from_object(Config)
from application.models.classifiers.CNNClassifier import CNNClassifier
from application.models.classifiers.MLPClassifier import MLPClassifier
from application.models.classifiers.NaiveBayesClassifier import NaiveBayesClassifier
from application.models.classifiers.SVMClassifier import SVMClassifier
from application.models.detectors.CasClasDetector import CasClasDetector
from application.models.detectors.MTCNNDetector import MTCNNDetector
from application.utils import get_urls_list
logging.info("Loading models...")
MODELS = {"mtcnn": MTCNNDetector(),
"casclas": CasClasDetector(app.config["PRETRAINED_CASCLAS"]),
"mlp": MLPClassifier(app.config["MLP_WEIGHTS"]),
"svm": SVMClassifier(app.config["SVM"]),
"cnn": CNNClassifier(app.config["CNN_WEIGHTS"]),
"nb": NaiveBayesClassifier(app.config["CATEGORICAL_NB"])}
IMG_URLS = get_urls_list(app.config["OFFLINE_IMG_URLS"])
from application import routes
| [
"logging.basicConfig",
"application.utils.get_urls_list",
"flask.Flask",
"application.models.classifiers.MLPClassifier.MLPClassifier",
"application.models.classifiers.CNNClassifier.CNNClassifier",
"application.models.detectors.CasClasDetector.CasClasDetector",
"application.models.classifiers.NaiveBayesC... | [((15, 54), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (34, 54), False, 'import logging\n'), ((127, 142), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (132, 142), False, 'from flask import Flask\n'), ((661, 694), 'logging.info', 'logging.info', (['"""Loading models..."""'], {}), "('Loading models...')\n", (673, 694), False, 'import logging\n'), ((1052, 1097), 'application.utils.get_urls_list', 'get_urls_list', (["app.config['OFFLINE_IMG_URLS']"], {}), "(app.config['OFFLINE_IMG_URLS'])\n", (1065, 1097), False, 'from application.utils import get_urls_list\n'), ((714, 729), 'application.models.detectors.MTCNNDetector.MTCNNDetector', 'MTCNNDetector', ([], {}), '()\n', (727, 729), False, 'from application.models.detectors.MTCNNDetector import MTCNNDetector\n'), ((752, 801), 'application.models.detectors.CasClasDetector.CasClasDetector', 'CasClasDetector', (["app.config['PRETRAINED_CASCLAS']"], {}), "(app.config['PRETRAINED_CASCLAS'])\n", (767, 801), False, 'from application.models.detectors.CasClasDetector import CasClasDetector\n'), ((820, 860), 'application.models.classifiers.MLPClassifier.MLPClassifier', 'MLPClassifier', (["app.config['MLP_WEIGHTS']"], {}), "(app.config['MLP_WEIGHTS'])\n", (833, 860), False, 'from application.models.classifiers.MLPClassifier import MLPClassifier\n'), ((879, 911), 'application.models.classifiers.SVMClassifier.SVMClassifier', 'SVMClassifier', (["app.config['SVM']"], {}), "(app.config['SVM'])\n", (892, 911), False, 'from application.models.classifiers.SVMClassifier import SVMClassifier\n'), ((930, 970), 'application.models.classifiers.CNNClassifier.CNNClassifier', 'CNNClassifier', (["app.config['CNN_WEIGHTS']"], {}), "(app.config['CNN_WEIGHTS'])\n", (943, 970), False, 'from application.models.classifiers.CNNClassifier import CNNClassifier\n'), ((988, 1038), 'application.models.classifiers.NaiveBayesClassifier.NaiveBayesClassifier', 'NaiveBayesClassifier', (["app.config['CATEGORICAL_NB']"], {}), "(app.config['CATEGORICAL_NB'])\n", (1008, 1038), False, 'from application.models.classifiers.NaiveBayesClassifier import NaiveBayesClassifier\n')] |
import numpy as np
import unittest
from itertools import product
from ml_techniques.svm import *
class PermutationDataTest(unittest.TestCase):
def testpropershape(self):
data = np.random.random((10, 4))
labels = np.random.randint(0, 2, 10)*2-1
data_per = permut_data(data)
self.assertEqual(data_per.shape, data.shape)
data_per, labels_per = permut_data(data, labels)
self.assertEqual(data_per.shape, data.shape)
self.assertEqual(labels_per.shape, labels.shape)
class BatchCreatorTest(unittest.TestCase):
def test_run_batch_iterator(self):
data_size = 100
batch_size = 9
for init, endit in batch_size_iter(data_size, batch_size):
self.assertTrue(init != endit)
self.assertTrue(init < endit)
self.assertEqual(endit, data_size)
data_size = 100
batch_size = 10
for init, endit in batch_size_iter(data_size, batch_size):
self.assertTrue(init != endit)
self.assertTrue(init < endit)
self.assertEqual(endit, data_size)
class RegularizationTest(unittest.TestCase):
def assert_regularization(self, reg):
reg.parameters
reg.regularize(np.random.randn(10), 1)
reg.gradient_regularization(np.random.randn(10), 1)
def test_abstractregularization(self):
reg = Regularization.create_regularization('l2', 1.)
self.assert_regularization(reg)
reg = Regularization.create_regularization(reg)
self.assert_regularization(reg)
reg = Regularization.create_regularization(Null_Regularization)
self.assert_regularization(reg)
def test_l2_regularization(self):
reg = L1_Regularization(1.)
self.assert_regularization(reg)
def test_l1_regularization(self):
reg = L1_Regularization(1.)
self.assert_regularization(reg)
class AccuracyFunctionTest(unittest.TestCase):
def test_order_independency(self):
n = 10
n_tests = 20
for i in range(n_tests):
y0 = np.random.randint(0, 2, n)
y1 = np.random.randint(0, 2, n)
reindices = np.random.permutation(n)
self.assertEqual(accuracy(y0, y1),
accuracy(y0[reindices], y1[reindices]))
def test_symetry(self):
n = 10
n_tests = 20
for i in range(n_tests):
y0 = np.random.randint(0, 2, n)
y1 = np.random.randint(0, 2, n)
self.assertEqual(accuracy(y0, y1), accuracy(y1, y0))
class LossFunctionTest(unittest.TestCase):
def _generator_labels(self, n):
return np.random.randint(0, 2, n)*2-1
def test_abstractloss(self):
lossf = LossFunction.create_lossfunction('Hinge')
lossf = LossFunction.create_lossfunction(lossf)
lossf = LossFunction.create_lossfunction(Hinge)
def test_loss(self):
n = 20
y0 = np.random.random(n)*2-1
y1 = self._generator_labels(n)
thresholds = [0, 1, 2]
for thr in thresholds:
lossf = Hinge(thr)
lossf.loss(y0, y1)
def test_gradient(self):
n, n_feats = 20, 10
y0 = np.random.random(n)*2-1
y1 = self._generator_labels(n)
x = np.random.random((n, n_feats))
thresholds = [0, 1, 2]
for thr in thresholds:
lossf = Hinge(thr)
grad_w, grad_w0 = lossf.gradient_loss(y0, y1, x)
self.assertEqual(len(grad_w), n_feats)
class Modeltest(unittest.TestCase):
def setUp(self):
n = 100
self.create_X = lambda n_feats: np.random.random((n, n_feats))
def assert_linearmodel(self, linearmodel):
w, w0 = linearmodel.parameters
if w is not None:
linearmodel.compute(self.create_X(len(w)))
linearmodel.reset_model()
def test_abstractmodel(self):
mod = Model.create_model('svm', np.random.randn(10), 0.)
Model.create_model(mod)
Model.create_model(LinearModel, np.random.randn(10), 0.)
def test_linearmodel(self):
lm = LinearModel(None)
self.assert_linearmodel(lm)
lm = LinearModel(np.random.randn(10), 0.)
self.assert_linearmodel(lm)
lm = LinearModel.weights_initialization(10, 'gauss')
self.assert_linearmodel(lm)
lm = LinearModel.weights_initialization(10, 'zeros')
self.assert_linearmodel(lm)
class SVMTest(unittest.TestCase):
def setUp(self):
loss = ['Hinge', Hinge()]
reg_pars = [0.01, 1., 10.]
batch_size = [10]
n_epochs = [0, 100]
learning_rate = [0.001, 1.]
stop_step = [.00001, 100]
history = [True, False]
verbose = [True, False]
self.var_names = ['loss', 'reg_pars', 'batch_size', 'n_epochs',
'learning_rate', 'stop_step', 'history', 'verbose']
self.possibilities = [loss, reg_pars, batch_size, n_epochs,
learning_rate, stop_step, history, verbose]
def test_initialization(self):
n, n_feats = 100, 20
data = np.random.random((n, n_feats))
labels = np.random.randint(0, 2, n)*2-1
for p in product(*self.possibilities):
solver = SVM(**dict(zip(self.var_names, p)))
## General asserts
self.assertEqual(solver.optimizer, 'SGD')
self.assertEqual(solver.batch_size, p[2])
self.assertEqual(solver.n_epochs, p[3])
self.assertEqual(solver.learning_rate, p[4])
self.assertEqual(solver.stop_step, p[5])
## Special cases
if not p[6]:
self.assertIsNone(solver.train_loss_history)
self.assertIsNone(solver.test_loss_history)
self.assertIsNone(solver.train_accuracy_history)
self.assertIsNone(solver.test_accuracy_history)
## Weights initialization
solver.model = solver.model.weights_initialization(n_feats)
solver._reset_history()
## Batch creation testing
for x_batch, y_batch in solver._batch_generator(data, labels):
self.assertTrue(len(x_batch) >= p[2])
## Computer functions
if p[7]:
# model._initialization_weights(n_feats, init_type='gauss')
solver.compute_epoch_measures(data, labels, None, None)
solver.compute_epoch_measures(data, labels, data, labels)
def test_fitmodel(self):
n, n_feats = 100, 5
data = np.random.random((n, n_feats))
labels = np.random.randint(0, 2, n)*2-1
for p in product(*self.possibilities):
solver = SVM(**dict(zip(self.var_names, p)))
solver.report_results()
solver.n_epochs = 100
solver.fit(data, labels)
solver.fit(data, labels, data, labels)
solver.predict(data)
solver.score(data, labels)
if p[6]:
self.assertEqual(solver.epoch_learned,
len(solver.train_loss_history))
self.assertEqual(solver.epoch_learned,
len(solver.train_accuracy_history))
self.assertEqual(solver.epoch_learned,
len(solver.test_loss_history))
self.assertEqual(solver.epoch_learned,
len(solver.test_accuracy_history))
solver.report_results()
| [
"numpy.random.random",
"itertools.product",
"numpy.random.randint",
"numpy.random.randn",
"numpy.random.permutation"
] | [((193, 218), 'numpy.random.random', 'np.random.random', (['(10, 4)'], {}), '((10, 4))\n', (209, 218), True, 'import numpy as np\n'), ((3285, 3315), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (3301, 3315), True, 'import numpy as np\n'), ((5136, 5166), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (5152, 5166), True, 'import numpy as np\n'), ((5233, 5261), 'itertools.product', 'product', (['*self.possibilities'], {}), '(*self.possibilities)\n', (5240, 5261), False, 'from itertools import product\n'), ((6591, 6621), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (6607, 6621), True, 'import numpy as np\n'), ((6688, 6716), 'itertools.product', 'product', (['*self.possibilities'], {}), '(*self.possibilities)\n', (6695, 6716), False, 'from itertools import product\n'), ((1234, 1253), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (1249, 1253), True, 'import numpy as np\n'), ((1294, 1313), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (1309, 1313), True, 'import numpy as np\n'), ((2077, 2103), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2094, 2103), True, 'import numpy as np\n'), ((2121, 2147), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2138, 2147), True, 'import numpy as np\n'), ((2172, 2196), 'numpy.random.permutation', 'np.random.permutation', (['n'], {}), '(n)\n', (2193, 2196), True, 'import numpy as np\n'), ((2429, 2455), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2446, 2455), True, 'import numpy as np\n'), ((2473, 2499), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2490, 2499), True, 'import numpy as np\n'), ((3638, 3668), 'numpy.random.random', 'np.random.random', (['(n, n_feats)'], {}), '((n, n_feats))\n', (3654, 3668), True, 'import numpy as np\n'), ((3946, 3965), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (3961, 3965), True, 'import numpy as np\n'), ((4043, 4062), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (4058, 4062), True, 'import numpy as np\n'), ((4193, 4212), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (4208, 4212), True, 'import numpy as np\n'), ((236, 263), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', '(10)'], {}), '(0, 2, 10)\n', (253, 263), True, 'import numpy as np\n'), ((2662, 2688), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (2679, 2688), True, 'import numpy as np\n'), ((2951, 2970), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (2967, 2970), True, 'import numpy as np\n'), ((3210, 3229), 'numpy.random.random', 'np.random.random', (['n'], {}), '(n)\n', (3226, 3229), True, 'import numpy as np\n'), ((5184, 5210), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (5201, 5210), True, 'import numpy as np\n'), ((6639, 6665), 'numpy.random.randint', 'np.random.randint', (['(0)', '(2)', 'n'], {}), '(0, 2, n)\n', (6656, 6665), True, 'import numpy as np\n')] |
import collections
from numpy.core.defchararray import lower
import streamlit as st
import numpy as np
import pandas as pd
from pages import utils
def app():
st.title("Data Storyteller Application")
st.markdown("## Data Upload")
# Upload the dataset and save as csv
st.markdown("### Upload a csv file for analysis.")
st.write("\n")
# Code to read a single file
uploaded_file = st.file_uploader("Choose a file", type = ['csv', 'xlsx'])
global data
if uploaded_file is not None:
try:
data = pd.read_csv(uploaded_file)
except Exception as e:
print(e)
data = pd.read_excel(uploaded_file)
st.set_option('deprecation.showfileUploaderEncoding', False)
''' Load the data and save the columns with categories as a dataframe.
This section also allows changes in the numerical and categorical columns. '''
if st.button("Load Data"):
# Raw data
st.dataframe(data)
data.to_csv('data/main_data.csv', index=False)
# Collect the categorical and numerical columns
numeric_cols = data.select_dtypes(include=np.number).columns.tolist()
categorical_cols = list(set(list(data.columns)) - set(numeric_cols))
# Save the columns as a dataframe or dictionary
columns = []
# Iterate through the numerical and categorical columns and save in columns
columns = utils.genMetaData(data)
# Save the columns as a dataframe with categories
# Here column_name is the name of the field and the type is whether it's numerical or categorical
columns_df = pd.DataFrame(columns, columns = ['column_name', 'type'])
columns_df.to_csv('data/metadata/column_type_desc.csv', index = False)
# Display columns
st.markdown("**Column Name**-**Type**")
for i in range(columns_df.shape[0]):
st.write(f"{i+1}. **{columns_df.iloc[i]['column_name']}** - {columns_df.iloc[i]['type']}")
st.markdown("""The above are the automated column types detected by the application in the data.
In case you wish to change the column types, head over to the **Column Change** section. """) | [
"streamlit.markdown",
"pandas.read_csv",
"streamlit.file_uploader",
"streamlit.write",
"streamlit.button",
"streamlit.dataframe",
"pandas.read_excel",
"pandas.DataFrame",
"streamlit.set_option",
"pages.utils.genMetaData",
"streamlit.title"
] | [((173, 213), 'streamlit.title', 'st.title', (['"""Data Storyteller Application"""'], {}), "('Data Storyteller Application')\n", (181, 213), True, 'import streamlit as st\n'), ((219, 248), 'streamlit.markdown', 'st.markdown', (['"""## Data Upload"""'], {}), "('## Data Upload')\n", (230, 248), True, 'import streamlit as st\n'), ((296, 346), 'streamlit.markdown', 'st.markdown', (['"""### Upload a csv file for analysis."""'], {}), "('### Upload a csv file for analysis.')\n", (307, 346), True, 'import streamlit as st\n'), ((353, 367), 'streamlit.write', 'st.write', (['"""\n"""'], {}), "('\\n')\n", (361, 367), True, 'import streamlit as st\n'), ((426, 481), 'streamlit.file_uploader', 'st.file_uploader', (['"""Choose a file"""'], {'type': "['csv', 'xlsx']"}), "('Choose a file', type=['csv', 'xlsx'])\n", (442, 481), True, 'import streamlit as st\n'), ((707, 767), 'streamlit.set_option', 'st.set_option', (['"""deprecation.showfileUploaderEncoding"""', '(False)'], {}), "('deprecation.showfileUploaderEncoding', False)\n", (720, 767), True, 'import streamlit as st\n'), ((939, 961), 'streamlit.button', 'st.button', (['"""Load Data"""'], {}), "('Load Data')\n", (948, 961), True, 'import streamlit as st\n'), ((1003, 1021), 'streamlit.dataframe', 'st.dataframe', (['data'], {}), '(data)\n', (1015, 1021), True, 'import streamlit as st\n'), ((1501, 1524), 'pages.utils.genMetaData', 'utils.genMetaData', (['data'], {}), '(data)\n', (1518, 1524), False, 'from pages import utils\n'), ((1724, 1778), 'pandas.DataFrame', 'pd.DataFrame', (['columns'], {'columns': "['column_name', 'type']"}), "(columns, columns=['column_name', 'type'])\n", (1736, 1778), True, 'import pandas as pd\n'), ((1900, 1939), 'streamlit.markdown', 'st.markdown', (['"""**Column Name**-**Type**"""'], {}), "('**Column Name**-**Type**')\n", (1911, 1939), True, 'import streamlit as st\n'), ((2109, 2318), 'streamlit.markdown', 'st.markdown', (['"""The above are the automated column types detected by the application in the data. \n In case you wish to change the column types, head over to the **Column Change** section. """'], {}), '(\n """The above are the automated column types detected by the application in the data. \n In case you wish to change the column types, head over to the **Column Change** section. """\n )\n', (2120, 2318), True, 'import streamlit as st\n'), ((570, 596), 'pandas.read_csv', 'pd.read_csv', (['uploaded_file'], {}), '(uploaded_file)\n', (581, 596), True, 'import pandas as pd\n'), ((1999, 2101), 'streamlit.write', 'st.write', (['f"""{i + 1}. **{columns_df.iloc[i][\'column_name\']}** - {columns_df.iloc[i][\'type\']}"""'], {}), '(\n f"{i + 1}. **{columns_df.iloc[i][\'column_name\']}** - {columns_df.iloc[i][\'type\']}"\n )\n', (2007, 2101), True, 'import streamlit as st\n'), ((671, 699), 'pandas.read_excel', 'pd.read_excel', (['uploaded_file'], {}), '(uploaded_file)\n', (684, 699), True, 'import pandas as pd\n')] |
# -*- coding:utf-8 -*-
import paddle.fluid as fluid
def cnn_net(data,
dict_dim,
emb_dim=128,
hid_dim=128,
hid_dim2=96,
class_dim=2,
win_size=3):
"""
Conv net
"""
# embedding layer
emb = fluid.layers.embedding(
input=data,
size=[dict_dim, emb_dim],
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@embedding_0.w_0"))
# convolution layer
conv_3 = fluid.nets.sequence_conv_pool(
input=emb,
num_filters=hid_dim,
filter_size=win_size,
act="tanh",
pool_type="max",
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@sequence_conv_0.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@sequence_conv_0.b_0"))
# full connect layer
fc_1 = fluid.layers.fc(
input=[conv_3],
size=hid_dim2,
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_0.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_0.b_0"))
# softmax layer
prediction = fluid.layers.fc(
input=[fc_1],
size=class_dim,
act="softmax",
param_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_1.w_0"),
bias_attr=fluid.ParamAttr(name="@HUB_senta_cnn@fc_1.b_0"))
return prediction, fc_1
| [
"paddle.fluid.ParamAttr"
] | [((377, 431), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@embedding_0.w_0"""'}), "(name='@HUB_senta_cnn@embedding_0.w_0')\n", (392, 431), True, 'import paddle.fluid as fluid\n'), ((644, 702), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@sequence_conv_0.w_0"""'}), "(name='@HUB_senta_cnn@sequence_conv_0.w_0')\n", (659, 702), True, 'import paddle.fluid as fluid\n'), ((722, 780), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@sequence_conv_0.b_0"""'}), "(name='@HUB_senta_cnn@sequence_conv_0.b_0')\n", (737, 780), True, 'import paddle.fluid as fluid\n'), ((901, 948), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@fc_0.w_0"""'}), "(name='@HUB_senta_cnn@fc_0.w_0')\n", (916, 948), True, 'import paddle.fluid as fluid\n'), ((968, 1015), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@fc_0.b_0"""'}), "(name='@HUB_senta_cnn@fc_0.b_0')\n", (983, 1015), True, 'import paddle.fluid as fluid\n'), ((1160, 1207), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@fc_1.w_0"""'}), "(name='@HUB_senta_cnn@fc_1.w_0')\n", (1175, 1207), True, 'import paddle.fluid as fluid\n'), ((1227, 1274), 'paddle.fluid.ParamAttr', 'fluid.ParamAttr', ([], {'name': '"""@HUB_senta_cnn@fc_1.b_0"""'}), "(name='@HUB_senta_cnn@fc_1.b_0')\n", (1242, 1274), True, 'import paddle.fluid as fluid\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import runpy
import os
import pytest
import glob
THIS_FILES_DIR_PATH = os.path.realpath(os.path.dirname(__file__))
def get_paths_of_scripts():
exclude_sub_strings = ["do_not_execute"]
plot_script_paths = glob.glob(
os.path.join(
os.path.dirname(THIS_FILES_DIR_PATH), "docs", "source", "notebooks", "*.py"
)
)
plot_script_paths_sorted = sorted(plot_script_paths)
plot_script_paths_sorted_reduced = [
p
for p in plot_script_paths_sorted
if not any(sub in p for sub in exclude_sub_strings)
]
return plot_script_paths_sorted_reduced
class Test_scripts:
@pytest.mark.parametrize(
"path_script",
get_paths_of_scripts(),
)
def test_execute_scripts(self, path_script):
print(f"Execut script:\n{path_script}")
runpy.run_path(path_script, init_globals={}, run_name="__main__")
| [
"os.path.dirname",
"runpy.run_path"
] | [((136, 161), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (151, 161), False, 'import os\n'), ((878, 943), 'runpy.run_path', 'runpy.run_path', (['path_script'], {'init_globals': '{}', 'run_name': '"""__main__"""'}), "(path_script, init_globals={}, run_name='__main__')\n", (892, 943), False, 'import runpy\n'), ((307, 343), 'os.path.dirname', 'os.path.dirname', (['THIS_FILES_DIR_PATH'], {}), '(THIS_FILES_DIR_PATH)\n', (322, 343), False, 'import os\n')] |
# -*- coding: utf-8 -*-
"""
Inspired by:
* https://gist.github.com/shirriff/c9fb5d98e6da79d9a772#file-merkle-py
* https://github.com/richardkiss/pycoin
"""
from __future__ import absolute_import, division, unicode_literals
from builtins import range
import binascii
import hashlib
def merkleroot(hashes):
"""
Args:
hashes: reversed binary form of transactions hashes, e.g.:
``binascii.unhexlify(h)[::-1] for h in block['tx']]``
Returns:
merkle root in hexadecimal form
"""
if len(hashes) == 1:
return binascii.hexlify(bytearray(reversed(hashes[0])))
if len(hashes) % 2 == 1:
hashes.append(hashes[-1])
parent_hashes = []
for i in range(0, len(hashes)-1, 2):
first_round_hash = hashlib.sha256(hashes[i] + hashes[i+1]).digest()
second_round_hash = hashlib.sha256(first_round_hash).digest()
parent_hashes.append(second_round_hash)
return merkleroot(parent_hashes)
| [
"hashlib.sha256"
] | [((773, 814), 'hashlib.sha256', 'hashlib.sha256', (['(hashes[i] + hashes[i + 1])'], {}), '(hashes[i] + hashes[i + 1])\n', (787, 814), False, 'import hashlib\n'), ((850, 882), 'hashlib.sha256', 'hashlib.sha256', (['first_round_hash'], {}), '(first_round_hash)\n', (864, 882), False, 'import hashlib\n')] |
from django.contrib import admin
from simple_history.admin import SimpleHistoryAdmin
from .models import MainMetadata
# Register your models here.
admin.site.register(MainMetadata, SimpleHistoryAdmin) | [
"django.contrib.admin.site.register"
] | [((149, 202), 'django.contrib.admin.site.register', 'admin.site.register', (['MainMetadata', 'SimpleHistoryAdmin'], {}), '(MainMetadata, SimpleHistoryAdmin)\n', (168, 202), False, 'from django.contrib import admin\n')] |
"""
The CPTPState class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.modelmembers.states.densestate import DenseState as _DenseState
from pygsti.modelmembers.states.state import State as _State
from pygsti.evotypes import Evotype as _Evotype
from pygsti.baseobjs import statespace as _statespace
from pygsti.baseobjs.basis import Basis as _Basis
IMAG_TOL = 1e-7 # tolerance for imaginary part being considered zero
class CPTPState(_DenseState):
"""
TODO: update docstring
A state vector constrained to correspond ot a positive density matrix.
This state vector that is parameterized through the Cholesky decomposition of
it's standard-basis representation as a density matrix (not a Liouville
vector). The resulting state vector thus represents a positive density
matrix, and additional constraints on the parameters also guarantee that the
trace == 1. This state vector is meant for use with CPTP processes, hence
the name.
Parameters
----------
vec : array_like or State
a 1D numpy array representing the state operation. The
shape of this array sets the dimension of the state.
basis : {"std", "gm", "pp", "qt"} or Basis
The basis `vec` is in. Needed because this parameterization
requires we construct the density matrix corresponding to
the Lioville vector `vec`.
trunctate : bool, optional
Whether or not a non-positive, trace=1 `vec` should
be truncated to force a successful construction.
evotype : Evotype or str, optional
The evolution type. The special value `"default"` is equivalent
to specifying the value of `pygsti.evotypes.Evotype.default_evotype`.
state_space : StateSpace, optional
The state space for this operation. If `None` a default state space
with the appropriate number of qubits is used.
"""
def __init__(self, vec, basis, truncate=False, evotype="default", state_space=None):
vector = _State._to_vector(vec)
basis = _Basis.cast(basis, len(vector))
self.basis = basis
self.basis_mxs = basis.elements # shape (len(vec), dmDim, dmDim)
self.basis_mxs = _np.rollaxis(self.basis_mxs, 0, 3) # shape (dmDim, dmDim, len(vec))
assert(self.basis_mxs.shape[-1] == len(vector))
# set self.params and self.dmDim
self._set_params_from_vector(vector, truncate)
#parameter labels (parameter encode the Cholesky Lmx)
labels = []
for i, ilbl in enumerate(basis.labels[1:]):
for j, jlbl in enumerate(basis.labels[1:]):
if i == j: labels.append("%s diagonal element of density matrix Cholesky decomp" % ilbl)
elif j < i: labels.append("Re[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
else: labels.append("Im[(%s,%s) element of density matrix Cholesky decomp]" % (ilbl, jlbl))
#scratch space
self.Lmx = _np.zeros((self.dmDim, self.dmDim), 'complex')
state_space = _statespace.default_space_for_dim(len(vector)) if (state_space is None) \
else _statespace.StateSpace.cast(state_space)
evotype = _Evotype.cast(evotype)
_DenseState.__init__(self, vector, evotype, state_space)
self._paramlbls = _np.array(labels, dtype=object)
def _set_params_from_vector(self, vector, truncate):
density_mx = _np.dot(self.basis_mxs, vector)
density_mx = density_mx.squeeze()
dmDim = density_mx.shape[0]
assert(dmDim == density_mx.shape[1]), "Density matrix must be square!"
trc = _np.trace(density_mx)
assert(truncate or _np.isclose(trc, 1.0)), \
"`vec` must correspond to a trace-1 density matrix (truncate == False)!"
if not _np.isclose(trc, 1.0): # truncate to trace == 1
density_mx -= _np.identity(dmDim, 'd') / dmDim * (trc - 1.0)
#push any slightly negative evals of density_mx positive
# so that the Cholesky decomp will work.
evals, U = _np.linalg.eig(density_mx)
Ui = _np.linalg.inv(U)
assert(truncate or all([ev >= -1e-12 for ev in evals])), \
"`vec` must correspond to a positive density matrix (truncate == False)!"
pos_evals = evals.clip(1e-16, 1e100)
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
try:
Lmx = _np.linalg.cholesky(density_mx)
except _np.linalg.LinAlgError: # Lmx not postitive definite?
pos_evals = evals.clip(1e-12, 1e100) # try again with 1e-12
density_mx = _np.dot(U, _np.dot(_np.diag(pos_evals), Ui))
Lmx = _np.linalg.cholesky(density_mx)
#check TP condition: that diagonal els of Lmx squared add to 1.0
Lmx_norm = _np.trace(_np.dot(Lmx.T.conjugate(), Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), \
"Cholesky decomp didn't preserve trace=1!"
self.dmDim = dmDim
self.params = _np.empty(dmDim**2, 'd')
for i in range(dmDim):
assert(_np.linalg.norm(_np.imag(Lmx[i, i])) < IMAG_TOL)
self.params[i * dmDim + i] = Lmx[i, i].real # / paramNorm == 1 as asserted above
for j in range(i):
self.params[i * dmDim + j] = Lmx[i, j].real
self.params[j * dmDim + i] = Lmx[i, j].imag
def _construct_vector(self):
dmDim = self.dmDim
# params is an array of length dmDim^2 that
# encodes a lower-triangular matrix "Lmx" via:
# Lmx[i,i] = params[i*dmDim + i] / param-norm # i = 0...dmDim-2
# *last diagonal el is given by sqrt(1.0 - sum(L[i,j]**2))
# Lmx[i,j] = params[i*dmDim + j] + 1j*params[j*dmDim+i] (i > j)
param2Sum = _np.vdot(self.params, self.params) # or "dot" would work, since params are real
paramNorm = _np.sqrt(param2Sum) # also the norm of *all* Lmx els
for i in range(dmDim):
self.Lmx[i, i] = self.params[i * dmDim + i] / paramNorm
for j in range(i):
self.Lmx[i, j] = (self.params[i * dmDim + j] + 1j * self.params[j * dmDim + i]) / paramNorm
Lmx_norm = _np.trace(_np.dot(self.Lmx.T.conjugate(), self.Lmx)) # sum of magnitude^2 of all els
assert(_np.isclose(Lmx_norm, 1.0)), "Violated trace=1 condition!"
#The (complex, Hermitian) density matrix is build by
# assuming Lmx is its Cholesky decomp, which makes
# the density matrix is pos-def.
density_mx = _np.dot(self.Lmx, self.Lmx.T.conjugate())
assert(_np.isclose(_np.trace(density_mx), 1.0)), "density matrix must be trace == 1"
# write density matrix in given basis: = sum_i alpha_i B_i
# ASSUME that basis is orthogonal, i.e. Tr(Bi^dag*Bj) = delta_ij
basis_mxs = _np.rollaxis(self.basis_mxs, 2) # shape (dmDim, dmDim, len(vec))
vec = _np.array([_np.trace(_np.dot(M.T.conjugate(), density_mx)) for M in basis_mxs])
#for now, assume Liouville vector should always be real (TODO: add 'real' flag later?)
assert(_np.linalg.norm(_np.imag(vec)) < IMAG_TOL)
vec = _np.real(vec)
self._ptr.flags.writeable = True
self._ptr[:] = vec[:] # so shape is (dim,1) - the convention for spam vectors
self._ptr.flags.writeable = False
def set_dense(self, vec):
"""
Set the dense-vector value of this state vector.
Attempts to modify this state vector's parameters so that the raw
state vector becomes `vec`. Will raise ValueError if this operation
is not possible.
Parameters
----------
vec : array_like or State
A numpy array representing a state vector, or a State object.
Returns
-------
None
"""
try:
self._set_params_from_vector(vec, truncate=False)
self.dirty = True
except AssertionError as e:
raise ValueError("Error initializing the parameters of this "
"CPTPState object: " + str(e))
@property
def num_params(self):
"""
Get the number of independent parameters which specify this state vector.
Returns
-------
int
the number of independent parameters.
"""
assert(self.dmDim**2 == self.dim) # should at least be true without composite bases...
return self.dmDim**2
def to_vector(self):
"""
Get the state vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
return self.params
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the state vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of state vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this state vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
assert(len(v) == self.num_params)
self.params[:] = v[:]
self._construct_vector()
self.dirty = dirty_value
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this state vector.
Construct a matrix whose columns are the derivatives of the state vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per state vector parameter.
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
dmDim = self.dmDim
nP = len(self.params)
assert(nP == dmDim**2) # number of parameters
# v_i = trace( B_i^dag * Lmx * Lmx^dag )
# d(v_i) = trace( B_i^dag * (dLmx * Lmx^dag + Lmx * (dLmx)^dag) ) #trace = linear so commutes w/deriv
# /
# where dLmx/d[ab] = {
# \
L, Lbar = self.Lmx, self.Lmx.conjugate()
F1 = _np.tril(_np.ones((dmDim, dmDim), 'd'))
F2 = _np.triu(_np.ones((dmDim, dmDim), 'd'), 1) * 1j
conj_basis_mxs = self.basis_mxs.conjugate()
# Derivative of vector wrt params; shape == [vecLen,dmDim,dmDim] *not dealing with TP condition yet*
# (first get derivative assuming last diagonal el of Lmx *is* a parameter, then use chain rule)
dVdp = _np.einsum('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1) # only a >= b nonzero (F1)
dVdp += _np.einsum('mal,mb,ab->lab', conj_basis_mxs, L, F1) # ditto
dVdp += _np.einsum('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2) # only b > a nonzero (F2)
dVdp += _np.einsum('mbl,ma,ab->lab', conj_basis_mxs, L, F2.conjugate()) # ditto
dVdp.shape = [dVdp.shape[0], nP] # jacobian with respect to "p" params,
# which don't include normalization for TP-constraint
#Now get jacobian of actual params wrt the params used above. Denote the actual
# params "P" in variable names, so p_ij = P_ij / sqrt(sum(P_xy**2))
param2Sum = _np.vdot(self.params, self.params)
paramNorm = _np.sqrt(param2Sum) # norm of *all* Lmx els (note lastDiagEl
dpdP = _np.identity(nP, 'd')
# all p_ij params == P_ij / paramNorm = P_ij / sqrt(sum(P_xy**2))
# and so have derivs wrt *all* Pxy elements.
for ij in range(nP):
for kl in range(nP):
if ij == kl:
# dp_ij / dP_ij = 1.0 / (sum(P_xy**2))^(1/2) - 0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_ij
# = 1.0 / (sum(P_xy**2))^(1/2) - P_ij^2 / (sum(P_xy**2))^(3/2)
dpdP[ij, ij] = 1.0 / paramNorm - self.params[ij]**2 / paramNorm**3
else:
# dp_ij / dP_kl = -0.5 * P_ij / (sum(P_xy**2))^(3/2) * 2*P_kl
# = - P_ij * P_kl / (sum(P_xy**2))^(3/2)
dpdP[ij, kl] = - self.params[ij] * self.params[kl] / paramNorm**3
#Apply the chain rule to get dVdP:
dVdP = _np.dot(dVdp, dpdP) # shape (vecLen, nP) - the jacobian!
dVdp = dpdP = None # free memory!
assert(_np.linalg.norm(_np.imag(dVdP)) < IMAG_TOL)
derivMx = _np.real(dVdP)
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this state vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return True
def hessian_wrt_params(self, wrt_filter1=None, wrt_filter2=None):
"""
Construct the Hessian of this state vector with respect to its parameters.
This function returns a tensor whose first axis corresponds to the
flattened operation matrix and whose 2nd and 3rd axes correspond to the
parameters that are differentiated with respect to.
Parameters
----------
wrt_filter1 : list or numpy.ndarray
List of parameter indices to take 1st derivatives with respect to.
(None means to use all the this operation's parameters.)
wrt_filter2 : list or numpy.ndarray
List of parameter indices to take 2nd derivatives with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Hessian with shape (dimension, num_params1, num_params2)
"""
raise NotImplementedError("TODO: add hessian computation for CPTPState")
| [
"numpy.trace",
"numpy.sqrt",
"pygsti.modelmembers.states.densestate.DenseState.__init__",
"numpy.rollaxis",
"numpy.array",
"numpy.einsum",
"numpy.imag",
"numpy.take",
"numpy.real",
"numpy.dot",
"numpy.empty",
"pygsti.evotypes.Evotype.cast",
"pygsti.modelmembers.states.state.State._to_vector"... | [((2705, 2727), 'pygsti.modelmembers.states.state.State._to_vector', '_State._to_vector', (['vec'], {}), '(vec)\n', (2722, 2727), True, 'from pygsti.modelmembers.states.state import State as _State\n'), ((2903, 2937), 'numpy.rollaxis', '_np.rollaxis', (['self.basis_mxs', '(0)', '(3)'], {}), '(self.basis_mxs, 0, 3)\n', (2915, 2937), True, 'import numpy as _np\n'), ((3686, 3732), 'numpy.zeros', '_np.zeros', (['(self.dmDim, self.dmDim)', '"""complex"""'], {}), "((self.dmDim, self.dmDim), 'complex')\n", (3695, 3732), True, 'import numpy as _np\n'), ((3907, 3929), 'pygsti.evotypes.Evotype.cast', '_Evotype.cast', (['evotype'], {}), '(evotype)\n', (3920, 3929), True, 'from pygsti.evotypes import Evotype as _Evotype\n'), ((3938, 3994), 'pygsti.modelmembers.states.densestate.DenseState.__init__', '_DenseState.__init__', (['self', 'vector', 'evotype', 'state_space'], {}), '(self, vector, evotype, state_space)\n', (3958, 3994), True, 'from pygsti.modelmembers.states.densestate import DenseState as _DenseState\n'), ((4021, 4052), 'numpy.array', '_np.array', (['labels'], {'dtype': 'object'}), '(labels, dtype=object)\n', (4030, 4052), True, 'import numpy as _np\n'), ((4132, 4163), 'numpy.dot', '_np.dot', (['self.basis_mxs', 'vector'], {}), '(self.basis_mxs, vector)\n', (4139, 4163), True, 'import numpy as _np\n'), ((4336, 4357), 'numpy.trace', '_np.trace', (['density_mx'], {}), '(density_mx)\n', (4345, 4357), True, 'import numpy as _np\n'), ((4768, 4794), 'numpy.linalg.eig', '_np.linalg.eig', (['density_mx'], {}), '(density_mx)\n', (4782, 4794), True, 'import numpy as _np\n'), ((4808, 4825), 'numpy.linalg.inv', '_np.linalg.inv', (['U'], {}), '(U)\n', (4822, 4825), True, 'import numpy as _np\n'), ((5602, 5628), 'numpy.isclose', '_np.isclose', (['Lmx_norm', '(1.0)'], {}), '(Lmx_norm, 1.0)\n', (5613, 5628), True, 'import numpy as _np\n'), ((5738, 5764), 'numpy.empty', '_np.empty', (['(dmDim ** 2)', '"""d"""'], {}), "(dmDim ** 2, 'd')\n", (5747, 5764), True, 'import numpy as _np\n'), ((6517, 6551), 'numpy.vdot', '_np.vdot', (['self.params', 'self.params'], {}), '(self.params, self.params)\n', (6525, 6551), True, 'import numpy as _np\n'), ((6618, 6637), 'numpy.sqrt', '_np.sqrt', (['param2Sum'], {}), '(param2Sum)\n', (6626, 6637), True, 'import numpy as _np\n'), ((7032, 7058), 'numpy.isclose', '_np.isclose', (['Lmx_norm', '(1.0)'], {}), '(Lmx_norm, 1.0)\n', (7043, 7058), True, 'import numpy as _np\n'), ((7570, 7601), 'numpy.rollaxis', '_np.rollaxis', (['self.basis_mxs', '(2)'], {}), '(self.basis_mxs, 2)\n', (7582, 7601), True, 'import numpy as _np\n'), ((7898, 7911), 'numpy.real', '_np.real', (['vec'], {}), '(vec)\n', (7906, 7911), True, 'import numpy as _np\n'), ((11960, 12014), 'numpy.einsum', '_np.einsum', (['"""aml,mb,ab->lab"""', 'conj_basis_mxs', 'Lbar', 'F1'], {}), "('aml,mb,ab->lab', conj_basis_mxs, Lbar, F1)\n", (11970, 12014), True, 'import numpy as _np\n'), ((12059, 12110), 'numpy.einsum', '_np.einsum', (['"""mal,mb,ab->lab"""', 'conj_basis_mxs', 'L', 'F1'], {}), "('mal,mb,ab->lab', conj_basis_mxs, L, F1)\n", (12069, 12110), True, 'import numpy as _np\n'), ((12138, 12192), 'numpy.einsum', '_np.einsum', (['"""bml,ma,ab->lab"""', 'conj_basis_mxs', 'Lbar', 'F2'], {}), "('bml,ma,ab->lab', conj_basis_mxs, Lbar, F2)\n", (12148, 12192), True, 'import numpy as _np\n'), ((12638, 12672), 'numpy.vdot', '_np.vdot', (['self.params', 'self.params'], {}), '(self.params, self.params)\n', (12646, 12672), True, 'import numpy as _np\n'), ((12693, 12712), 'numpy.sqrt', '_np.sqrt', (['param2Sum'], {}), '(param2Sum)\n', (12701, 12712), True, 'import numpy as _np\n'), ((12770, 12791), 'numpy.identity', '_np.identity', (['nP', '"""d"""'], {}), "(nP, 'd')\n", (12782, 12791), True, 'import numpy as _np\n'), ((13630, 13649), 'numpy.dot', '_np.dot', (['dVdp', 'dpdP'], {}), '(dVdp, dpdP)\n', (13637, 13649), True, 'import numpy as _np\n'), ((13809, 13823), 'numpy.real', '_np.real', (['dVdP'], {}), '(dVdP)\n', (13817, 13823), True, 'import numpy as _np\n'), ((3847, 3887), 'pygsti.baseobjs.statespace.StateSpace.cast', '_statespace.StateSpace.cast', (['state_space'], {}), '(state_space)\n', (3874, 3887), True, 'from pygsti.baseobjs import statespace as _statespace\n'), ((4385, 4406), 'numpy.isclose', '_np.isclose', (['trc', '(1.0)'], {}), '(trc, 1.0)\n', (4396, 4406), True, 'import numpy as _np\n'), ((4512, 4533), 'numpy.isclose', '_np.isclose', (['trc', '(1.0)'], {}), '(trc, 1.0)\n', (4523, 4533), True, 'import numpy as _np\n'), ((5123, 5154), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['density_mx'], {}), '(density_mx)\n', (5142, 5154), True, 'import numpy as _np\n'), ((7343, 7364), 'numpy.trace', '_np.trace', (['density_mx'], {}), '(density_mx)\n', (7352, 7364), True, 'import numpy as _np\n'), ((11587, 11616), 'numpy.ones', '_np.ones', (['(dmDim, dmDim)', '"""d"""'], {}), "((dmDim, dmDim), 'd')\n", (11595, 11616), True, 'import numpy as _np\n'), ((13916, 13953), 'numpy.take', '_np.take', (['derivMx', 'wrt_filter'], {'axis': '(1)'}), '(derivMx, wrt_filter, axis=1)\n', (13924, 13953), True, 'import numpy as _np\n'), ((5066, 5085), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (5074, 5085), True, 'import numpy as _np\n'), ((5386, 5417), 'numpy.linalg.cholesky', '_np.linalg.cholesky', (['density_mx'], {}), '(density_mx)\n', (5405, 5417), True, 'import numpy as _np\n'), ((7857, 7870), 'numpy.imag', '_np.imag', (['vec'], {}), '(vec)\n', (7865, 7870), True, 'import numpy as _np\n'), ((11640, 11669), 'numpy.ones', '_np.ones', (['(dmDim, dmDim)', '"""d"""'], {}), "((dmDim, dmDim), 'd')\n", (11648, 11669), True, 'import numpy as _np\n'), ((13763, 13777), 'numpy.imag', '_np.imag', (['dVdP'], {}), '(dVdP)\n', (13771, 13777), True, 'import numpy as _np\n'), ((4587, 4611), 'numpy.identity', '_np.identity', (['dmDim', '"""d"""'], {}), "(dmDim, 'd')\n", (4599, 4611), True, 'import numpy as _np\n'), ((5829, 5848), 'numpy.imag', '_np.imag', (['Lmx[i, i]'], {}), '(Lmx[i, i])\n', (5837, 5848), True, 'import numpy as _np\n'), ((5342, 5361), 'numpy.diag', '_np.diag', (['pos_evals'], {}), '(pos_evals)\n', (5350, 5361), True, 'import numpy as _np\n')] |
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 30 11:58:20 2021
@author: <NAME>
"""
import sys
sys.path.append("...")
import macheval as me
class IMSettingsHandler(me.SettingsHandler):
def getSettings(x):
return NotImplementedError #TODO Implement settings functionality
| [
"sys.path.append"
] | [((97, 119), 'sys.path.append', 'sys.path.append', (['"""..."""'], {}), "('...')\n", (112, 119), False, 'import sys\n')] |
import datetime as dt
import json
import os
import pandas as pd
from sqlalchemy import Column, Integer, String, Float, DateTime, Boolean, func
from iotfunctions.preprocessor import BaseTransformer
from iotfunctions.bif import IoTExpression
from iotfunctions.metadata import EntityType, make_sample_entity
from iotfunctions.db import Database
from iotfunctions.estimator import SimpleAnomaly
#replace with a credentials dictionary or provide a credentials file
with open('credentials.json', encoding='utf-8') as F:
credentials = json.loads(F.read())
#create a sample entity to work with
db_schema = None #set if you are not using the default
db = Database(credentials=credentials)
numeric_columns = ['fill_time','temp','humidity','wait_time','size_sd']
table_name = 'as_sample_cereal'
entity = make_sample_entity(db=db, schema = db_schema,
float_cols = numeric_columns,
name = table_name,
register = True)
entity.name
#examine the sample entity
df = db.read_table(entity.name,schema=db_schema)
df.head(1).transpose()
#configure an expression function
expression = '510 + 15*df["temp"] + 5*df["humidity"]'
mass_fn = IoTExpression(expression=expression, output_name='fill_mass')
df = entity.exec_pipeline(mass_fn)
df.head(1).transpose()
#build an anomaly model
features = ['temp', 'humidity', 'wait_time']
targets = ['fill_mass']
anomaly_fn = SimpleAnomaly(features=['temp','humidity','fill_time'],targets=['fill_mass'],threshold=0.01)
df = entity.exec_pipeline(mass_fn,anomaly_fn)
df.head(1).transpose()
| [
"iotfunctions.db.Database",
"iotfunctions.estimator.SimpleAnomaly",
"iotfunctions.metadata.make_sample_entity",
"iotfunctions.bif.IoTExpression"
] | [((652, 685), 'iotfunctions.db.Database', 'Database', ([], {'credentials': 'credentials'}), '(credentials=credentials)\n', (660, 685), False, 'from iotfunctions.db import Database\n'), ((799, 906), 'iotfunctions.metadata.make_sample_entity', 'make_sample_entity', ([], {'db': 'db', 'schema': 'db_schema', 'float_cols': 'numeric_columns', 'name': 'table_name', 'register': '(True)'}), '(db=db, schema=db_schema, float_cols=numeric_columns,\n name=table_name, register=True)\n', (817, 906), False, 'from iotfunctions.metadata import EntityType, make_sample_entity\n'), ((1206, 1267), 'iotfunctions.bif.IoTExpression', 'IoTExpression', ([], {'expression': 'expression', 'output_name': '"""fill_mass"""'}), "(expression=expression, output_name='fill_mass')\n", (1219, 1267), False, 'from iotfunctions.bif import IoTExpression\n'), ((1433, 1534), 'iotfunctions.estimator.SimpleAnomaly', 'SimpleAnomaly', ([], {'features': "['temp', 'humidity', 'fill_time']", 'targets': "['fill_mass']", 'threshold': '(0.01)'}), "(features=['temp', 'humidity', 'fill_time'], targets=[\n 'fill_mass'], threshold=0.01)\n", (1446, 1534), False, 'from iotfunctions.estimator import SimpleAnomaly\n')] |
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi archive policy rule."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GnocchiArchivePolicyRule.list_archive_policy_rule")
class ListArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self):
"""List archive policy rules."""
self.gnocchi.list_archive_policy_rule()
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_archive_policy_rule")
class CreateArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(
context={"admin_cleanup@openstack": ["gnocchi.archive_policy_rule"]},
name="GnocchiArchivePolicyRule.create_delete_archive_policy_rule")
class CreateDeleteArchivePolicyRule(gnocchiutils.GnocchiBase):
def run(self, metric_pattern="cpu_*", archive_policy_name="low"):
"""Create archive policy rule and then delete it.
:param metric_pattern: Pattern for matching metrics
:param archive_policy_name: Archive policy name
"""
name = self.generate_random_name()
self.admin_gnocchi.create_archive_policy_rule(
name,
metric_pattern=metric_pattern,
archive_policy_name=archive_policy_name)
self.admin_gnocchi.delete_archive_policy_rule(name)
| [
"rally.task.validation.add",
"rally_openstack.task.scenario.configure"
] | [((875, 945), 'rally.task.validation.add', 'validation.add', (['"""required_services"""'], {'services': '[consts.Service.GNOCCHI]'}), "('required_services', services=[consts.Service.GNOCCHI])\n", (889, 945), False, 'from rally.task import validation\n'), ((947, 1016), 'rally.task.validation.add', 'validation.add', (['"""required_platform"""'], {'platform': '"""openstack"""', 'users': '(True)'}), "('required_platform', platform='openstack', users=True)\n", (961, 1016), False, 'from rally.task import validation\n'), ((1018, 1094), 'rally_openstack.task.scenario.configure', 'scenario.configure', ([], {'name': '"""GnocchiArchivePolicyRule.list_archive_policy_rule"""'}), "(name='GnocchiArchivePolicyRule.list_archive_policy_rule')\n", (1036, 1094), False, 'from rally_openstack.task import scenario\n'), ((1262, 1332), 'rally.task.validation.add', 'validation.add', (['"""required_services"""'], {'services': '[consts.Service.GNOCCHI]'}), "('required_services', services=[consts.Service.GNOCCHI])\n", (1276, 1332), False, 'from rally.task import validation\n'), ((1334, 1403), 'rally.task.validation.add', 'validation.add', (['"""required_platform"""'], {'platform': '"""openstack"""', 'admin': '(True)'}), "('required_platform', platform='openstack', admin=True)\n", (1348, 1403), False, 'from rally.task import validation\n'), ((1405, 1563), 'rally_openstack.task.scenario.configure', 'scenario.configure', ([], {'context': "{'admin_cleanup@openstack': ['gnocchi.archive_policy_rule']}", 'name': '"""GnocchiArchivePolicyRule.create_archive_policy_rule"""'}), "(context={'admin_cleanup@openstack': [\n 'gnocchi.archive_policy_rule']}, name=\n 'GnocchiArchivePolicyRule.create_archive_policy_rule')\n", (1423, 1563), False, 'from rally_openstack.task import scenario\n'), ((2074, 2144), 'rally.task.validation.add', 'validation.add', (['"""required_services"""'], {'services': '[consts.Service.GNOCCHI]'}), "('required_services', services=[consts.Service.GNOCCHI])\n", (2088, 2144), False, 'from rally.task import validation\n'), ((2146, 2215), 'rally.task.validation.add', 'validation.add', (['"""required_platform"""'], {'platform': '"""openstack"""', 'admin': '(True)'}), "('required_platform', platform='openstack', admin=True)\n", (2160, 2215), False, 'from rally.task import validation\n'), ((2217, 2382), 'rally_openstack.task.scenario.configure', 'scenario.configure', ([], {'context': "{'admin_cleanup@openstack': ['gnocchi.archive_policy_rule']}", 'name': '"""GnocchiArchivePolicyRule.create_delete_archive_policy_rule"""'}), "(context={'admin_cleanup@openstack': [\n 'gnocchi.archive_policy_rule']}, name=\n 'GnocchiArchivePolicyRule.create_delete_archive_policy_rule')\n", (2235, 2382), False, 'from rally_openstack.task import scenario\n')] |
import queue
import textwrap
import threading
import time
import urllib.parse
import pychromecast
def create_notify_url(text: str, lang: str, ttsspeed: float):
payload = {
"ie": "UTF-8",
"q": text,
"tl": lang,
"total": 1,
"idx": 0,
"textlen": len(text),
"client": "tw-ob",
"prev": "input",
"ttsspeed" : ttsspeed
}
params = urllib.parse.urlencode(payload, quote_via = urllib.parse.quote)
url = "https://translate.google.com/translate_tts?{}".format(params)
return url
def split_text(text: str, lang: str):
max_split_text_len = 200
return textwrap.wrap(text, width = max_split_text_len)
class GoogleHome(pychromecast.Chromecast):
def __init__(self, host, port = None, device = None):
self.thread = None
self.mp3_url_queue = queue.Queue()
super().__init__(host, port, device)
def _play_mp3(self, timeout: int):
if self.mp3_url_queue.empty():
self.thread = None
return
url = self.mp3_url_queue.get()
self.media_controller.play_media(url, "audio/mp3")
# wait start playing
time.sleep(1)
self._block_while_playing_queue(timeout)
# play next mp3
self._play_mp3(timeout)
def _block_while_playing_queue(self, timeout: int):
self.media_controller.block_until_active()
t1 = time.time()
while True:
status = self.media_controller.status
player_state = status.player_state
if player_state != "PLAYING":
break
if timeout > 0:
t2 = time.time()
if t2 - t1 >= timeout:
break
time.sleep(0.5)
def notify(self, text: str, lang: str = "en", ttsspeed: float = 1.0, timeout: int = 0):
for line in split_text(text, lang):
url = create_notify_url(line, lang, ttsspeed)
self.mp3_url_queue.put(url)
if self.thread == None:
self.thread = threading.Thread(target = self._play_mp3, args = ([timeout]))
self.thread.start()
def play(self, url: str, timeout: int = 0):
if url != None:
self.mp3_url_queue.put(url)
if self.thread == None:
self.thread = threading.Thread(target = self._play_mp3, args = ([timeout]))
self.thread.start()
def pause(self):
self.media_controller.pause()
def resume(self):
self.media_controller.play()
def block_while_playing(self, timeout: int = 0):
t1 = time.time()
while not self.mp3_url_queue.empty():
if timeout > 0:
t2 = time.time()
elapsed_t = t2 - t1
if elapsed_t >= timeout:
break
else:
pass
if self.thread != None:
self.thread.join()
def is_playing(self):
if not self.mp3_url_queue.empty():
return True
if self.thread != None:
return self.thread.is_alive()
return False
def get_googlehomes(
friendly_name = None,
ipaddr = None,
uuid = None,
tries = None,
retry_wait = None,
timeout = None
):
if ipaddr != None:
# get from ipaddress
googlehome = GoogleHome(ipaddr)
# check friendly_name and uuid
if friendly_name != None and googlehome.name != friendly_name:
return []
if uuid != None and str(googlehome.uuid) != uuid:
return []
return [googlehome]
ccs, browser = pychromecast.get_chromecasts(tries, retry_wait, timeout)
googlehomes = []
for cc in ccs:
# check friendly_name and uuid
if friendly_name != None and cc.name != friendly_name:
return []
if uuid != None and str(cc.uuid) != uuid:
return []
cc.wait()
googlehome = GoogleHome(
host = cc.socket_client.host,
port = cc.socket_client.port,
device = cc.device,
)
googlehomes.append(googlehome)
return googlehomes
| [
"time.sleep",
"pychromecast.get_chromecasts",
"textwrap.wrap",
"threading.Thread",
"queue.Queue",
"time.time"
] | [((684, 729), 'textwrap.wrap', 'textwrap.wrap', (['text'], {'width': 'max_split_text_len'}), '(text, width=max_split_text_len)\n', (697, 729), False, 'import textwrap\n'), ((3656, 3712), 'pychromecast.get_chromecasts', 'pychromecast.get_chromecasts', (['tries', 'retry_wait', 'timeout'], {}), '(tries, retry_wait, timeout)\n', (3684, 3712), False, 'import pychromecast\n'), ((891, 904), 'queue.Queue', 'queue.Queue', ([], {}), '()\n', (902, 904), False, 'import queue\n'), ((1215, 1228), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (1225, 1228), False, 'import time\n'), ((1456, 1467), 'time.time', 'time.time', ([], {}), '()\n', (1465, 1467), False, 'import time\n'), ((2644, 2655), 'time.time', 'time.time', ([], {}), '()\n', (2653, 2655), False, 'import time\n'), ((1787, 1802), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (1797, 1802), False, 'import time\n'), ((2097, 2152), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._play_mp3', 'args': '[timeout]'}), '(target=self._play_mp3, args=[timeout])\n', (2113, 2152), False, 'import threading\n'), ((2363, 2418), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._play_mp3', 'args': '[timeout]'}), '(target=self._play_mp3, args=[timeout])\n', (2379, 2418), False, 'import threading\n'), ((1698, 1709), 'time.time', 'time.time', ([], {}), '()\n', (1707, 1709), False, 'import time\n'), ((2751, 2762), 'time.time', 'time.time', ([], {}), '()\n', (2760, 2762), False, 'import time\n')] |
from taurex.log import Logger
class LinesReader:
def __init__(self, lines):
self._lines = lines
self._count = 0
def skip(self, num=1):
self._count += num
def read_int(self, skip=1):
val = int(self._lines[self._count])
self.skip(skip)
return val
def read_float(self, skip=1):
val = float(self._lines[self._count])
self.skip(skip)
return val
def read_float_array(self, skip=1):
line = self.read_string()
split = line.split()
return [float(s) for s in split]
def read_string(self, skip=1):
val = self._lines[self._count]
self.skip(skip)
return val
def read_bool(self, skip=1):
val = int(self._lines[self._count])
self.skip(skip)
return val == 1
def reset():
self._count = 0
class BroadenerData:
def __init__(self, molecule, filename, Jmax, default_gamma, default_n):
self._molecule = molecule.strip()
self._filename = filename
self._default_gamma = default_gamma
self._default_n = default_n
self._Jmax = Jmax
self._avail_codes = []
self._quanta={}
def add_code(self, quanta_code, quanta):
self._avail_codes.append(quanta_code)
quanta.insert(0,'J"')
self._quanta[quanta_code] = quanta
@property
def molecule(self):
return self._molecule
@property
def availableCodes(self):
return self._avail_codes
def generate_input(self, maximum_model='JJ', broadener_path='.'):
from .exocrosswriter import BroadenerInput
import os
bb = BroadenerInput(self._molecule, self._default_gamma,
self._default_n, filename=os.path.join(broadener_path, self._filename),
broadener_type='JJ' if 'a1' in self._avail_codes else 'J')
return bb
def generate_exomolbroadener(self, filename=None):
from .exomolbroads import ExomolBroadener
return ExomolBroadener(self._default_gamma, self._default_n,label_defs=self._quanta,filename=filename)
class ExomolDef(Logger):
def __init__(self, exomol_def_file):
super().__init__(self.__class__.__name__)
self.info(f'Opening {exomol_def_file}')
with open(exomol_def_file, 'r') as f:
unclean_exocross_lines = f.read().splitlines()
self.exocross_lines = [s.split('#')[0].strip()
for s in unclean_exocross_lines]
self.parse_definition()
def parse_definition(self):
lr = LinesReader(self.exocross_lines)
if lr.read_string() != 'EXOMOL.def':
raise IOError('Incorrect EXOMOL def header')
lr.skip(1)
self._molecule_slug = lr.read_string()
self._linelist_name = lr.read_string()
self._version_number = lr.read_string()
self._inchikey = lr.read_string()
self._natoms = lr.read_int()
self.info(f'Molecule is {self._molecule_slug}')
self.info(f'Linelist: {self._linelist_name} '
f'Version: {self._version_number}')
while(True):
try:
arr = lr.read_float_array()
self._mass = arr[0]
test = arr[1]
break
except (IndexError, ValueError, ):
continue
self._symmetry_group = lr.read_string()
num_irr = lr.read_int()
lr.skip(num_irr * 3)
self._max_temp = lr.read_float()
self._num_broadeners = lr.read_int()
self._dipole_avail = lr.read_bool()
self._no_cross = lr.read_int()
self._no_ktab = lr.read_int()
self._life_avail = lr.read_bool()
self._landeg_avail = lr.read_bool()
self._num_states = lr.read_int()
num_cases = lr.read_int()
self._quanta_cases = {}
for case in range(num_cases):
case_label = lr.read_string()
no_quanta = lr.read_int()
quanta_definition = []
for q in range(no_quanta):
label = lr.read_string()
form =lr.read_string()
form = form.split()[1].strip()
descrp = lr.read_string()
quanta_definition.append((label, form, descrp))
self._quanta_cases[case_label] = quanta_definition
self._total_transitions = lr.read_int()
self._num_trans_files = lr.read_int()
self._max_wavenumber = lr.read_float()
self._highest_complete = lr.read_float()
self._max_temp_q = lr.read_float()
self._t_step = lr.read_float(skip=2)
self._default_gamma = lr.read_float()
self._default_n = lr.read_float()
self._broadener_defs = {}
if self._num_broadeners > 0:
for b in range(self._num_broadeners):
broadener_label = lr.read_string()
self.info(f'Reading broadener {broadener_label}')
broadener_filename = lr.read_string()
jmax = lr.read_int()
default_gamma = lr.read_float()
default_n = lr.read_float()
new_broad = BroadenerData(broadener_label, broadener_filename,
jmax, default_gamma, default_n)
self._broadener_defs[broadener_label] = new_broad
n_broad_quanta_set = lr.read_int()
for x in range(n_broad_quanta_set):
code_label = lr.read_string(skip=2)
no_quanta = lr.read_int()
quanta =[lr.read_string() for x in range(no_quanta)]
new_broad.add_code(code_label, quanta)
def _pandas_state_fwf(self):
import re
import numpy as np
widths = [12,12,6,7]
headers = ['i', 'E', 'g_tot', 'J']
if self._life_avail:
widths.append(12)
headers.append('lftime')
if self._landeg_avail:
widths.append(12)
headers.append('lande-g')
# Let pandas auto determine above types
# We will be specific about the quanta
dtype = {}
form_conv = {'d' : np.int64,
'f' : np.float64,
's' : str,
'i' : np.int64 }
for case in self._quanta_cases.values():
for label, form, desc in case:
headers.append(label)
wid = re.findall(r'\d+',form)[0]
widths.append(int(wid))
typ = form[-1].strip()
dtype[label] = form_conv[typ]
widths[1:-1] = [w+1 for w in widths[1:-1]]
return headers, widths , dtype
def read_state(self, state_filename):
from .exomolstate import ExomolStates
return ExomolStates(state_filename, self._pandas_state_fwf())
@property
def maximumTemperature(self):
return self._max_temp
@property
def maximumPartitionTemperature(self):
return self._max_temp_q
@property
def maximumWavenumber(self):
return self._max_wavenumber
@property
def filePrefix(self):
return f'{self._molecule_slug}__{self._linelist_name}'
@property
def availableBroadeners(self):
return list(self._broadener_defs.keys())
def create_broadeners(self, broadener):
if broadener not in self.availableBroadeners:
raise KeyError(f'Broadener with name {broadener} not available')
else:
return self._broadener_defs[broadener].generate_input()
def create_exocross_input(self, path='.'):
from .exocrosswriter import ExocrossInput
ex = ExocrossInput(self._molecule_slug, linelist=self._linelist_name,
path=path, file_prefix=self.filePrefix)
ex.set_molar_mass(self._mass)
ex.set_range([0.01, self.maximumWavenumber])
return ex
| [
"re.findall",
"os.path.join"
] | [((1800, 1844), 'os.path.join', 'os.path.join', (['broadener_path', 'self._filename'], {}), '(broadener_path, self._filename)\n', (1812, 1844), False, 'import os\n'), ((6644, 6668), 're.findall', 're.findall', (['"""\\\\d+"""', 'form'], {}), "('\\\\d+', form)\n", (6654, 6668), False, 'import re\n')] |
import os
import json
from flask_sqlalchemy import SQLAlchemy
from flask import Flask, request, jsonify
from flask.views import MethodView
from flask.ext.cors import CORS
from database import ElasticStorage, RedisClient
from article import Article as ESArticle
app = Flask(__name__)
CORS(app)
#sql_config = json.loads(os.getenv('VCAP_SERVICES'))
#app.config['SQLALCHEMY_DATABASE_URI'] = sql_config['sqldb'][0]['credentials']['uri']
app.config['SQLALCHEMY_DATABASE_URI'] = 'postgresql+psycopg2://alius_admin:<EMAIL>:5432/alius'
db = SQLAlchemy(app)
source_map = {
'cnn.com': 'CNN', 'nytimes.com': 'New York Times',
'huffingtonpost.com': 'The Huffington Post',
'huffingtonpost.ca': 'The Huffington Post', 'theguardian.com': 'The Guardian',
'foxnews.com': 'Fox News', 'forbes.com': 'Forbes',
'timesofindia.indiatimes.com': 'The Times of India', 'bbc.co.uk': 'BBC',
'usatoday.com': 'USA Today', 'bloomberg.com': 'Bloomberg',
'wsj.com': 'The Wall Street Journal', 'reuters.com': 'Reuters',
'nbcnews.com': 'NBC News', 'money.cnn.com': 'CNN Money',
'indianexpress.com': 'The Indian Express', 'cbsnews.com': 'CBS News',
'abcnews.go.com': 'ABC News', 'latimes.com': 'LA Times',
'time.com': 'Time', 'nypost.com': 'NY Post', 'cnbc.com': 'CNBC',
'thehindu.com': 'The Hindu', 'chron.com': 'CHRON',
'theatlantic.com': 'The Atlantic', 'breitbart.com': 'Breitbart',
'sfgate.com': 'SF Gate', 'usnews.com': 'US News',
'hindustantimes.com': 'Hindustan Times', 'hollywoodreporter.com': 'The Hollywood Reporter',
'fortune.com': 'Fortune', 'chicagotribune.com': 'Chicago Tribune',
'news.com.au': 'news.com.au'
}
class Users(db.Model):
user_id = db.Column(db.String(1024), primary_key=True)
anger = db.Column(db.Float)
disgust = db.Column(db.Float)
fear = db.Column(db.Float)
joy = db.Column(db.Float)
sadness = db.Column(db.Float)
total_articles = db.Column(db.Integer)
def __init__(self, user_id, article_id):
self.user_id = user_id
es = ElasticStorage.get_instance(dev=False)
doc = ESArticle.get(article_id)
self.anger = doc.tone.anger
self.disgust = doc.tone.disgust
self.fear = doc.tone.fear
self.joy = doc.tone.anger
self.sadness = doc.tone.anger
self.total_articles = 1
def to_dict(self):
return {'user_id': self.user_id, 'anger': self.anger, 'disgust': self.disgust, 'fear': self.fear,
'joy': self.joy, 'sadness': self.sadness, 'total_articles': self.total_articles}
class Articles(db.Model):
article_id = db.Column(db.String(1024), primary_key=True)
clicks = db.Column(db.Integer)
def __init__(self, article_id):
self.article_id = article_id
self.clicks = 1
def to_dict(self):
return {'article_id': self.article_id, 'clicks': self.clicks}
class UserAPI(MethodView):
def get(self):
all_users = Users.query.all()
return json.dumps([x.to_dict() for x in all_users])
def post(self):
user_id = json.loads(request.data.decode('utf-8'))['user_id']
article_id = json.loads(request.data.decode('utf-8'))['article_id']
user = Users.query.filter_by(user_id=user_id).first()
if user:
es = ElasticStorage.get_instance(dev=False)
doc = ESArticle.get(article_id)
user.anger += doc.tone.anger
user.disgust += doc.tone.disgust
user.fear += doc.tone.fear
user.joy += doc.tone.anger
user.sadness += doc.tone.anger
user.total_articles += 1
else:
u = Users(user_id, article_id)
db.session.add(u)
db.session.commit()
return ('', 204)
app.add_url_rule('/users/', view_func=UserAPI.as_view('users'))
class ArticlesAPI(MethodView):
def get(self):
all_articles = Articles.query.all()
return json.dumps([x.to_dict() for x in all_articles])
def post(self):
article_id = json.loads(request.data.decode('utf-8'))['article_id']
article = Articles.query.filter_by(article_id=article_id).first()
if article:
article.clicks += 1
db.session.add(article)
else:
a = Articles(article_id)
db.session.add(a)
db.session.commit()
return ('', 204)
app.add_url_rule('/articles/', view_func=ArticlesAPI.as_view('articles'))
@app.route('/search', methods=['POST'])
def search():
data = json.loads(request.data.decode('utf-8'))
query = data['q']
prefs = data['prefs']
es = ElasticStorage.get_instance(dev=False)
r = RedisClient.get_instance(dev=False)
if r.hexists('popular', query.lower()):
r.hincrby('popular', query.lower())
else:
r.hset('popular', query.lower(), 1)
articles = es.query_articles(query, prefs)
articles = list(articles)
articles = list({article['title']:article for article in articles}.values())
for article in articles:
for key, value in source_map.items():
if key in article['url']:
article['source'] = value
return jsonify(
articles=articles
)
@app.route('/popular')
def popular():
r = RedisClient.get_instance(dev=False)
pop = r.hgetall('popular')
sorted_searches = sorted(pop.items(), key=lambda x:int(x[1]), reverse=True)[0:10]
final_dict = {}
for sorted_search in sorted_searches:
final_dict[sorted_search[0].decode('utf-8')] = int(sorted_search[1].decode('utf-8'))
return jsonify(final_dict)
if __name__ == "__main__":
port = os.getenv('VCAP_APP_PORT', '5000')
app.run(host='0.0.0.0', port=int(port), debug=True)
| [
"os.getenv",
"flask.Flask",
"database.RedisClient.get_instance",
"flask.ext.cors.CORS",
"flask.request.data.decode",
"database.ElasticStorage.get_instance",
"article.Article.get",
"flask_sqlalchemy.SQLAlchemy",
"flask.jsonify"
] | [((268, 283), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (273, 283), False, 'from flask import Flask, request, jsonify\n'), ((284, 293), 'flask.ext.cors.CORS', 'CORS', (['app'], {}), '(app)\n', (288, 293), False, 'from flask.ext.cors import CORS\n'), ((534, 549), 'flask_sqlalchemy.SQLAlchemy', 'SQLAlchemy', (['app'], {}), '(app)\n', (544, 549), False, 'from flask_sqlalchemy import SQLAlchemy\n'), ((4611, 4649), 'database.ElasticStorage.get_instance', 'ElasticStorage.get_instance', ([], {'dev': '(False)'}), '(dev=False)\n', (4638, 4649), False, 'from database import ElasticStorage, RedisClient\n'), ((4658, 4693), 'database.RedisClient.get_instance', 'RedisClient.get_instance', ([], {'dev': '(False)'}), '(dev=False)\n', (4682, 4693), False, 'from database import ElasticStorage, RedisClient\n'), ((5163, 5189), 'flask.jsonify', 'jsonify', ([], {'articles': 'articles'}), '(articles=articles)\n', (5170, 5189), False, 'from flask import Flask, request, jsonify\n'), ((5252, 5287), 'database.RedisClient.get_instance', 'RedisClient.get_instance', ([], {'dev': '(False)'}), '(dev=False)\n', (5276, 5287), False, 'from database import ElasticStorage, RedisClient\n'), ((5573, 5592), 'flask.jsonify', 'jsonify', (['final_dict'], {}), '(final_dict)\n', (5580, 5592), False, 'from flask import Flask, request, jsonify\n'), ((5633, 5667), 'os.getenv', 'os.getenv', (['"""VCAP_APP_PORT"""', '"""5000"""'], {}), "('VCAP_APP_PORT', '5000')\n", (5642, 5667), False, 'import os\n'), ((2039, 2077), 'database.ElasticStorage.get_instance', 'ElasticStorage.get_instance', ([], {'dev': '(False)'}), '(dev=False)\n', (2066, 2077), False, 'from database import ElasticStorage, RedisClient\n'), ((2092, 2117), 'article.Article.get', 'ESArticle.get', (['article_id'], {}), '(article_id)\n', (2105, 2117), True, 'from article import Article as ESArticle\n'), ((4523, 4551), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (4542, 4551), False, 'from flask import Flask, request, jsonify\n'), ((3285, 3323), 'database.ElasticStorage.get_instance', 'ElasticStorage.get_instance', ([], {'dev': '(False)'}), '(dev=False)\n', (3312, 3323), False, 'from database import ElasticStorage, RedisClient\n'), ((3342, 3367), 'article.Article.get', 'ESArticle.get', (['article_id'], {}), '(article_id)\n', (3355, 3367), True, 'from article import Article as ESArticle\n'), ((3072, 3100), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (3091, 3100), False, 'from flask import Flask, request, jsonify\n'), ((3145, 3173), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (3164, 3173), False, 'from flask import Flask, request, jsonify\n'), ((4030, 4058), 'flask.request.data.decode', 'request.data.decode', (['"""utf-8"""'], {}), "('utf-8')\n", (4049, 4058), False, 'from flask import Flask, request, jsonify\n')] |
import os
from qaviton.utils import filer
from qaviton.utils import path
from qaviton.utils.operating_system import s
from qaviton.version import __version__
cwd = os.getcwd()
examples = path.of(__file__)('examples')
def initial_msg(f):
def dec(*args, **kwargs):
print("""
QAVITON VERSION {}
creating qaviton framework and test examples
""".format(__version__))
f(*args, **kwargs)
return dec
def install_is_done(tests_dir):
print("""
@@@@@@@@@@@@@@@@@@@@@
@ installation done @
@@@@@@@@@@@@@@@@@@@@@
# use pip install, uninstall & freeze
# to manage your dependencies:
(venv) path/to/project> pip freeze > requirements-test.txt
# to install your requirements on a new machine(consider using git):
(venv) path/to/project> pip install -r requirements-test.txt
* your testing framework is done!
* start testing like a boss ⚛
* ______________
* / __________ \ ______
* / / \ \ / ____ \
* / / \ / \ \ / / \ \ __ __ _ ___________ _______ _ _
* | | O \ / O | | / |______| \ \ \ / / |_| |____ ____| / _____ \ | \ | |
* | | | | | ________ | \ \ / / |-| | | | | | | | \ | |
* \ \ \________/ / \ | | | | \ \ / / | | | | | | | | | | \ | |
* \ \____________/ /\ \_ | | | | \ \/ / | | | | | |_____| | | |\ \| |
* \________________/ \__| |_| |_| \__/ |_| |_| \_______/ |_| \___|
""")
def add_readme(tests_dir):
with open(cwd + s + tests_dir + s + 'README.rst', 'w+') as f:
f.write("this should be changed to a custom README file for your project\n"
"you have a nice starting point from here.\n"
"\n"
"requirements\n"
"------------\n"
"python 3.7 and above\n"
"pytest latest\n"
"\n"
"\n"
"testing examples\n"
"----------------\n"
"checkout under execute_tests/end_to_end_tests to see testing examples.\n"
"\n"
"\n"
"model-based examples\n"
"--------------------\n"
"check out the pages directory for page model examples\n"
"and services/app for a model-based-app service for testing.\n"
"\n"
"\n"
"conftest & pytest.ini\n"
"---------------------\n"
"look at the conftest.py to see how to add model-based fixtures\n"
"you can set parallel testing & reporting with pytest.ini file\n"
"\n"
"\n"
"setup your hub\n"
"--------------\n"
"check out in the data dir to set your secret user key and remote hub\n"
"and customize your supported platforms under the data/supported_platfoms.py file.\n"
"\n"
"\n"
"local hub\n"
"---------\n"
"install docker:\n"
"https://docs.docker.com/install/\n"
"\n"
"install selenoid:\n"
"go to option 2 to install with docker\n"
"https://github.com/aerokube/selenoid/blob/master/docs/quick-start-guide.adoc\n"
"\n"
"go to your secret file and change your hub url to local host:\n"
"/project/tests/data/secret.py\n"
"hub='http://localhost:4444/wd/hub'\n"
"\n"
"\n"
"run tests examples\n"
"------------------\n"
"cd to your project(in my case it's called myapp and my tests are under tests) and simply\n"
"(env) C:\\Users\\user\\PycharmProjects\\myapp>python -m pytest tests\n"
"\n"
"or run with pycharm:\n"
"https://www.jetbrains.com/pycharm/download/#section=windows\n"
"https://www.jetbrains.com/help/pycharm/pytest.html")
def add_gitignore(tests_dir):
if not filer.os.path.exists(cwd + s + '.gitignore'):
with open(cwd + s + tests_dir + s + '.gitignore', 'w+') as f:
f.write("# Byte-compiled / optimized / DLL files\n"
"__pycache__/\n"
"*.py[cod]\n"
"*$py.class\n"
"\n"
"# C extensions\n"
"*.so\n"
"\n"
"# Distribution / packaging\n"
".Python\n"
"build/\n"
"develop-eggs/\n"
"dist/\n"
"downloads/\n"
"eggs/\n"
".eggs/\n"
"lib/\n"
"lib64/\n"
"parts/\n"
"sdist/\n"
"var/\n"
"wheels/\n"
"*.egg-info/\n"
".installed.cfg\n"
"*.egg\n"
"MANIFEST\n"
"\n"
"# PyInstaller\n"
"# Usually these files are written by a python script from a template\n"
"# before PyInstaller builds the exe, so as to inject date/other infos into it.\n"
"*.manifest\n"
"*.spec\n"
"\n"
"# Installer logs\n"
"pip-log.txt\n"
"pip-delete-this-directory.txt\n"
"\n"
"# Unit test / coverage reports\n"
"htmlcov/\n"
".tox/\n"
".coverage\n"
".coverage.*\n"
".cache\n"
"nosetests.xml\n"
"coverage.xml\n"
"*.cover\n"
".hypothesis/\n"
".pytest_cache/\n"
"\n"
"# Translations\n"
"*.mo\n"
"*.pot\n"
"\n"
"# Django stuff:\n"
"*.log\n"
"\n"
"# Scrapy stuff:\n"
".scrapy\n"
"\n"
"# Sphinx documentation\n"
"docs/_build/\n"
"\n"
"# PyBuilder\n"
"target/\n"
"\n"
"# Jupyter Notebook\n"
".ipynb_checkpoints\n"
"\n"
"# pyenv\n"
".python-version\n"
"\n"
"# celery beat schedule file\n"
"celerybeat-schedule\n"
"\n"
"# SageMath parsed files\n"
"*.sage.py\n"
"\n"
"# Environments\n"
".env\n"
".venv\n"
"env/\n"
"venv/\n"
"ENV/\n"
"env.bak/\n"
"venv.bak/\n"
"\n"
"# Spyder project settings\n"
".spyderproject\n"
".spyproject\n"
"\n"
"# Rope project settings\n"
".ropeproject\n"
"\n"
"# mkdocs documentation\n"
"/site\n"
"\n"
"# mypy\n"
".mypy_cache/\n"
"\n"
"# private\n"
"*secret*")
def add_pytest_ini(tests_dir):
with open(cwd + s + tests_dir + s + 'pytest.ini', 'w+') as f:
f.write("[pytest]\n"
";addopts = -n 3\n"
";addopts = --html=report.html\n"
";addopts = --junitxml=\path\\to\\reports\n"
";addopts = --collect-only\n"
";addopts = --cov=your_app")
def add_requirements(tests_dir):
if not filer.os.path.exists(cwd + s + 'requirements-test.txt'):
open(cwd + s + 'requirements-test.txt', 'w+').close()
os.system('pip freeze > requirements-test.txt')
# TODO: add more content for different frameworks
@initial_msg
def framework(frameworks, tests_dir, params):
if '--example' in params:
filer.copy_directory(examples + s + 'simple_web', cwd + s + tests_dir)
add_readme(tests_dir)
else:
filer.copy_directory(examples + s + 'new_project', cwd + s + tests_dir)
if tests_dir != 'tests':
filer.find_replace(cwd + s + tests_dir, 'from tests.', 'from ' + tests_dir + '.', "*.py")
add_pytest_ini(tests_dir)
add_gitignore(tests_dir)
add_requirements(tests_dir)
install_is_done(tests_dir)
| [
"qaviton.utils.filer.find_replace",
"qaviton.utils.path.of",
"os.getcwd",
"qaviton.utils.filer.os.path.exists",
"os.system",
"qaviton.utils.filer.copy_directory"
] | [((165, 176), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (174, 176), False, 'import os\n'), ((188, 205), 'qaviton.utils.path.of', 'path.of', (['__file__'], {}), '(__file__)\n', (195, 205), False, 'from qaviton.utils import path\n'), ((4869, 4913), 'qaviton.utils.filer.os.path.exists', 'filer.os.path.exists', (["(cwd + s + '.gitignore')"], {}), "(cwd + s + '.gitignore')\n", (4889, 4913), False, 'from qaviton.utils import filer\n'), ((8971, 9026), 'qaviton.utils.filer.os.path.exists', 'filer.os.path.exists', (["(cwd + s + 'requirements-test.txt')"], {}), "(cwd + s + 'requirements-test.txt')\n", (8991, 9026), False, 'from qaviton.utils import filer\n'), ((9098, 9145), 'os.system', 'os.system', (['"""pip freeze > requirements-test.txt"""'], {}), "('pip freeze > requirements-test.txt')\n", (9107, 9145), False, 'import os\n'), ((9295, 9365), 'qaviton.utils.filer.copy_directory', 'filer.copy_directory', (["(examples + s + 'simple_web')", '(cwd + s + tests_dir)'], {}), "(examples + s + 'simple_web', cwd + s + tests_dir)\n", (9315, 9365), False, 'from qaviton.utils import filer\n'), ((9415, 9486), 'qaviton.utils.filer.copy_directory', 'filer.copy_directory', (["(examples + s + 'new_project')", '(cwd + s + tests_dir)'], {}), "(examples + s + 'new_project', cwd + s + tests_dir)\n", (9435, 9486), False, 'from qaviton.utils import filer\n'), ((9525, 9618), 'qaviton.utils.filer.find_replace', 'filer.find_replace', (['(cwd + s + tests_dir)', '"""from tests."""', "('from ' + tests_dir + '.')", '"""*.py"""'], {}), "(cwd + s + tests_dir, 'from tests.', 'from ' + tests_dir +\n '.', '*.py')\n", (9543, 9618), False, 'from qaviton.utils import filer\n')] |
"""
Tests for the reference loader for Buyback Authorizations.
"""
from functools import partial
from unittest import TestCase
import blaze as bz
from blaze.compute.core import swap_resources_into_scope
from contextlib2 import ExitStack
import pandas as pd
from six import iteritems
from zipline.pipeline.common import(
BUYBACK_ANNOUNCEMENT_FIELD_NAME,
CASH_FIELD_NAME,
DAYS_SINCE_PREV,
PREVIOUS_BUYBACK_ANNOUNCEMENT,
PREVIOUS_BUYBACK_CASH,
PREVIOUS_BUYBACK_SHARE_COUNT,
SHARE_COUNT_FIELD_NAME,
SID_FIELD_NAME,
TS_FIELD_NAME)
from zipline.pipeline.data import (CashBuybackAuthorizations,
ShareBuybackAuthorizations)
from zipline.pipeline.factors.events import (
BusinessDaysSinceCashBuybackAuth,
BusinessDaysSinceShareBuybackAuth
)
from zipline.pipeline.loaders.buyback_auth import \
CashBuybackAuthorizationsLoader, ShareBuybackAuthorizationsLoader
from zipline.pipeline.loaders.blaze import (
BlazeCashBuybackAuthorizationsLoader,
BlazeShareBuybackAuthorizationsLoader,
)
from zipline.utils.test_utils import (
tmp_asset_finder,
)
from .base import EventLoaderCommonMixin, DATE_FIELD_NAME
buyback_authorizations = [
# K1--K2--A1--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [1, 15],
CASH_FIELD_NAME: [10, 20]
}),
# K1--K2--A2--A1.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [7, 13],
CASH_FIELD_NAME: [10, 22]
}),
# K1--A1--K2--A2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [3, 1],
CASH_FIELD_NAME: [4, 7]
}),
# K1 == K2.
pd.DataFrame({
SHARE_COUNT_FIELD_NAME: [6, 23],
CASH_FIELD_NAME: [1, 2]
}),
pd.DataFrame(
columns=[SHARE_COUNT_FIELD_NAME,
CASH_FIELD_NAME],
dtype='datetime64[ns]'
),
]
def create_buyback_auth_tst_frame(cases, field_to_drop):
buyback_auth_df = {
sid:
pd.concat([df, buyback_authorizations[sid]], axis=1).drop(
field_to_drop, 1)
for sid, df
in enumerate(case.rename(columns={DATE_FIELD_NAME:
BUYBACK_ANNOUNCEMENT_FIELD_NAME}
)
for case in cases
)
}
return buyback_auth_df
class CashBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for cash buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_CASH:
CashBuybackAuthorizations.cash_amount.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
CashBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceCashBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
SHARE_COUNT_FIELD_NAME)
cls.loader_type = CashBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_cash = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[10] * num_days_between_dates('2014-01-15', '2014-01-19') +
[20] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[22] * num_days_between_dates('2014-01-15', '2014-01-19') +
[10] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[4] * num_days_between_dates('2014-01-10', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[1] * num_days_between_dates('2014-01-10', '2014-01-14') +
[2] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[PREVIOUS_BUYBACK_CASH] = _expected_previous_cash
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class ShareBuybackAuthLoaderTestCase(TestCase, EventLoaderCommonMixin):
"""
Test for share buyback authorizations dataset.
"""
pipeline_columns = {
PREVIOUS_BUYBACK_SHARE_COUNT:
ShareBuybackAuthorizations.share_count.latest,
PREVIOUS_BUYBACK_ANNOUNCEMENT:
ShareBuybackAuthorizations.announcement_date.latest,
DAYS_SINCE_PREV:
BusinessDaysSinceShareBuybackAuth(),
}
@classmethod
def setUpClass(cls):
cls._cleanup_stack = stack = ExitStack()
cls.finder = stack.enter_context(
tmp_asset_finder(equities=cls.equity_info),
)
cls.cols = {}
cls.dataset = create_buyback_auth_tst_frame(cls.event_dates_cases,
CASH_FIELD_NAME)
cls.loader_type = ShareBuybackAuthorizationsLoader
@classmethod
def tearDownClass(cls):
cls._cleanup_stack.close()
def setup(self, dates):
zip_with_floats_dates = partial(self.zip_with_floats, dates)
num_days_between_dates = partial(self.num_days_between, dates)
_expected_previous_buyback_share_count = pd.DataFrame({
0: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[1] * num_days_between_dates('2014-01-15', '2014-01-19') +
[15] * num_days_between_dates('2014-01-20', None)
),
1: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-14') +
[13] * num_days_between_dates('2014-01-15', '2014-01-19') +
[7] * num_days_between_dates('2014-01-20', None)
),
2: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[3] * num_days_between_dates('2014-01-10', '2014-01-19') +
[1] * num_days_between_dates('2014-01-20', None)
),
3: zip_with_floats_dates(
['NaN'] * num_days_between_dates(None, '2014-01-09') +
[6] * num_days_between_dates('2014-01-10', '2014-01-14') +
[23] * num_days_between_dates('2014-01-15', None)
),
4: zip_with_floats_dates(['NaN'] * len(dates)),
}, index=dates)
self.cols[
PREVIOUS_BUYBACK_SHARE_COUNT
] = _expected_previous_buyback_share_count
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT] = \
self.get_expected_previous_event_dates(dates)
self.cols[DAYS_SINCE_PREV] = self._compute_busday_offsets(
self.cols[PREVIOUS_BUYBACK_ANNOUNCEMENT]
)
class BlazeCashBuybackAuthLoaderTestCase(CashBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeCashBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeCashBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeCashBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
CASH_FIELD_NAME:
frame[CASH_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderTestCase(ShareBuybackAuthLoaderTestCase):
""" Test case for loading via blaze.
"""
@classmethod
def setUpClass(cls):
super(BlazeShareBuybackAuthLoaderTestCase, cls).setUpClass()
cls.loader_type = BlazeShareBuybackAuthorizationsLoader
def loader_args(self, dates):
_, mapping = super(
BlazeShareBuybackAuthLoaderTestCase,
self,
).loader_args(dates)
return (bz.Data(pd.concat(
pd.DataFrame({
BUYBACK_ANNOUNCEMENT_FIELD_NAME:
frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],
SHARE_COUNT_FIELD_NAME:
frame[SHARE_COUNT_FIELD_NAME],
TS_FIELD_NAME:
frame[TS_FIELD_NAME],
SID_FIELD_NAME: sid,
})
for sid, frame in iteritems(mapping)
).reset_index(drop=True)),)
class BlazeShareBuybackAuthLoaderNotInteractiveTestCase(
BlazeShareBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeShareBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
class BlazeCashBuybackAuthLoaderNotInteractiveTestCase(
BlazeCashBuybackAuthLoaderTestCase):
"""Test case for passing a non-interactive symbol and a dict of resources.
"""
def loader_args(self, dates):
(bound_expr,) = super(
BlazeCashBuybackAuthLoaderNotInteractiveTestCase,
self,
).loader_args(dates)
return swap_resources_into_scope(bound_expr, {})
| [
"zipline.pipeline.factors.events.BusinessDaysSinceCashBuybackAuth",
"zipline.utils.test_utils.tmp_asset_finder",
"blaze.compute.core.swap_resources_into_scope",
"contextlib2.ExitStack",
"zipline.pipeline.factors.events.BusinessDaysSinceShareBuybackAuth",
"functools.partial",
"pandas.DataFrame",
"six.i... | [((1243, 1317), 'pandas.DataFrame', 'pd.DataFrame', (['{SHARE_COUNT_FIELD_NAME: [1, 15], CASH_FIELD_NAME: [10, 20]}'], {}), '({SHARE_COUNT_FIELD_NAME: [1, 15], CASH_FIELD_NAME: [10, 20]})\n', (1255, 1317), True, 'import pandas as pd\n'), ((1367, 1441), 'pandas.DataFrame', 'pd.DataFrame', (['{SHARE_COUNT_FIELD_NAME: [7, 13], CASH_FIELD_NAME: [10, 22]}'], {}), '({SHARE_COUNT_FIELD_NAME: [7, 13], CASH_FIELD_NAME: [10, 22]})\n', (1379, 1441), True, 'import pandas as pd\n'), ((1491, 1562), 'pandas.DataFrame', 'pd.DataFrame', (['{SHARE_COUNT_FIELD_NAME: [3, 1], CASH_FIELD_NAME: [4, 7]}'], {}), '({SHARE_COUNT_FIELD_NAME: [3, 1], CASH_FIELD_NAME: [4, 7]})\n', (1503, 1562), True, 'import pandas as pd\n'), ((1606, 1678), 'pandas.DataFrame', 'pd.DataFrame', (['{SHARE_COUNT_FIELD_NAME: [6, 23], CASH_FIELD_NAME: [1, 2]}'], {}), '({SHARE_COUNT_FIELD_NAME: [6, 23], CASH_FIELD_NAME: [1, 2]})\n', (1618, 1678), True, 'import pandas as pd\n'), ((1706, 1798), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': '[SHARE_COUNT_FIELD_NAME, CASH_FIELD_NAME]', 'dtype': '"""datetime64[ns]"""'}), "(columns=[SHARE_COUNT_FIELD_NAME, CASH_FIELD_NAME], dtype=\n 'datetime64[ns]')\n", (1718, 1798), True, 'import pandas as pd\n'), ((2746, 2780), 'zipline.pipeline.factors.events.BusinessDaysSinceCashBuybackAuth', 'BusinessDaysSinceCashBuybackAuth', ([], {}), '()\n', (2778, 2780), False, 'from zipline.pipeline.factors.events import BusinessDaysSinceCashBuybackAuth, BusinessDaysSinceShareBuybackAuth\n'), ((2868, 2879), 'contextlib2.ExitStack', 'ExitStack', ([], {}), '()\n', (2877, 2879), False, 'from contextlib2 import ExitStack\n'), ((3361, 3397), 'functools.partial', 'partial', (['self.zip_with_floats', 'dates'], {}), '(self.zip_with_floats, dates)\n', (3368, 3397), False, 'from functools import partial\n'), ((3431, 3468), 'functools.partial', 'partial', (['self.num_days_between', 'dates'], {}), '(self.num_days_between, dates)\n', (3438, 3468), False, 'from functools import partial\n'), ((5374, 5409), 'zipline.pipeline.factors.events.BusinessDaysSinceShareBuybackAuth', 'BusinessDaysSinceShareBuybackAuth', ([], {}), '()\n', (5407, 5409), False, 'from zipline.pipeline.factors.events import BusinessDaysSinceCashBuybackAuth, BusinessDaysSinceShareBuybackAuth\n'), ((5497, 5508), 'contextlib2.ExitStack', 'ExitStack', ([], {}), '()\n', (5506, 5508), False, 'from contextlib2 import ExitStack\n'), ((5984, 6020), 'functools.partial', 'partial', (['self.zip_with_floats', 'dates'], {}), '(self.zip_with_floats, dates)\n', (5991, 6020), False, 'from functools import partial\n'), ((6054, 6091), 'functools.partial', 'partial', (['self.num_days_between', 'dates'], {}), '(self.num_days_between, dates)\n', (6061, 6091), False, 'from functools import partial\n'), ((9878, 9919), 'blaze.compute.core.swap_resources_into_scope', 'swap_resources_into_scope', (['bound_expr', '{}'], {}), '(bound_expr, {})\n', (9903, 9919), False, 'from blaze.compute.core import swap_resources_into_scope\n'), ((10299, 10340), 'blaze.compute.core.swap_resources_into_scope', 'swap_resources_into_scope', (['bound_expr', '{}'], {}), '(bound_expr, {})\n', (10324, 10340), False, 'from blaze.compute.core import swap_resources_into_scope\n'), ((2934, 2976), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', ([], {'equities': 'cls.equity_info'}), '(equities=cls.equity_info)\n', (2950, 2976), False, 'from zipline.utils.test_utils import tmp_asset_finder\n'), ((5563, 5605), 'zipline.utils.test_utils.tmp_asset_finder', 'tmp_asset_finder', ([], {'equities': 'cls.equity_info'}), '(equities=cls.equity_info)\n', (5579, 5605), False, 'from zipline.utils.test_utils import tmp_asset_finder\n'), ((1944, 1996), 'pandas.concat', 'pd.concat', (['[df, buyback_authorizations[sid]]'], {'axis': '(1)'}), '([df, buyback_authorizations[sid]], axis=1)\n', (1953, 1996), True, 'import pandas as pd\n'), ((8153, 8354), 'pandas.DataFrame', 'pd.DataFrame', (['{BUYBACK_ANNOUNCEMENT_FIELD_NAME: frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],\n CASH_FIELD_NAME: frame[CASH_FIELD_NAME], TS_FIELD_NAME: frame[\n TS_FIELD_NAME], SID_FIELD_NAME: sid}'], {}), '({BUYBACK_ANNOUNCEMENT_FIELD_NAME: frame[\n BUYBACK_ANNOUNCEMENT_FIELD_NAME], CASH_FIELD_NAME: frame[\n CASH_FIELD_NAME], TS_FIELD_NAME: frame[TS_FIELD_NAME], SID_FIELD_NAME: sid}\n )\n', (8165, 8354), True, 'import pandas as pd\n'), ((9071, 9285), 'pandas.DataFrame', 'pd.DataFrame', (['{BUYBACK_ANNOUNCEMENT_FIELD_NAME: frame[BUYBACK_ANNOUNCEMENT_FIELD_NAME],\n SHARE_COUNT_FIELD_NAME: frame[SHARE_COUNT_FIELD_NAME], TS_FIELD_NAME:\n frame[TS_FIELD_NAME], SID_FIELD_NAME: sid}'], {}), '({BUYBACK_ANNOUNCEMENT_FIELD_NAME: frame[\n BUYBACK_ANNOUNCEMENT_FIELD_NAME], SHARE_COUNT_FIELD_NAME: frame[\n SHARE_COUNT_FIELD_NAME], TS_FIELD_NAME: frame[TS_FIELD_NAME],\n SID_FIELD_NAME: sid})\n', (9083, 9285), True, 'import pandas as pd\n'), ((8509, 8527), 'six.iteritems', 'iteritems', (['mapping'], {}), '(mapping)\n', (8518, 8527), False, 'from six import iteritems\n'), ((9441, 9459), 'six.iteritems', 'iteritems', (['mapping'], {}), '(mapping)\n', (9450, 9459), False, 'from six import iteritems\n')] |
import pymongo
from bson.objectid import ObjectId
# mongo 增加
def main():
client = pymongo.MongoClient(host='172.16.17.32', port=27017)
db = client.test
collection = db.students
# 插入一条数据
student = {
'id': '20170101',
'name': 'Kevin',
'age': 20,
'gender': 'male'
}
# 每条数据其实都有一个 _id 属性来唯一标识。
# 如果没有显式指明该属性,MongoDB 会自动产生一个 ObjectId 类型的 _id 属性。
# insert() 方法会在执行后返回_id 值。
result = collection.insert(student)
print(result)
# 插入多条数据
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 20,
'gender': 'male'
}
# 这里用列表方式传入
# 返回结果也是列表方式
result = collection.insert([student1, student2])
print(result)
# pymongo 官方不推荐使用insert
# 推荐使用 insert_one 和 insert_many 插入一条或者多条记录
student = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
result = collection.insert_one(student)
print(result)
print(result.inserted_id)
student1 = {
'id': '20170101',
'name': 'Jordan',
'age': 20,
'gender': 'male'
}
student2 = {
'id': '20170202',
'name': 'Mike',
'age': 20,
'gender': 'male'
}
result = collection.insert_many([student1, student2])
print(result)
print(result.inserted_ids)
if __name__ == '__main__':
main() | [
"pymongo.MongoClient"
] | [((89, 141), 'pymongo.MongoClient', 'pymongo.MongoClient', ([], {'host': '"""172.16.17.32"""', 'port': '(27017)'}), "(host='172.16.17.32', port=27017)\n", (108, 141), False, 'import pymongo\n')] |
#
# Copyright (C) 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from gerrymander.model import ModelChange
from gerrymander.model import ModelEvent
class OperationBase(object):
def __init__(self, client):
self.client = client
class OperationQuery(OperationBase):
PATCHES_NONE = "none"
PATCHES_CURRENT = "current"
PATCHES_ALL = "all"
STATUS_SUBMITTED = "submitted"
STATUS_REVIEWED = "reviewed"
STATUS_MERGED = "merged"
STATUS_ABANDONED = "abandoned"
STATUS_OPEN = "open"
STATUS_CLOSED = "closed"
def __init__(self, client, terms={}, rawquery=None, patches=PATCHES_NONE,
approvals=False, files=False, comments=False):
OperationBase.__init__(self, client)
self.terms = terms
self.rawquery = rawquery
self.patches = patches
self.approvals = approvals
self.files = files
self.comments = comments
if self.patches == OperationQuery.PATCHES_NONE:
if self.approvals:
raise Exception("approvals cannot be requested without patches")
if self.files:
raise Exception("files cannot be requested without patches")
def get_args(self, limit=None, sortkey=None):
args = ["query", "--format=JSON"]
if self.patches == OperationQuery.PATCHES_CURRENT:
args.append("--current-patch-set")
elif self.patches == OperationQuery.PATCHES_ALL:
args.append("--patch-sets")
if self.approvals:
args.append("--all-approvals")
if self.files:
args.append("--files")
if self.comments:
args.append("--comments")
clauses = []
if limit is not None:
clauses.append("limit:" + str(limit))
if sortkey is not None:
clauses.append("resume_sortkey:" + sortkey)
if self.rawquery is not None:
clauses.append("(" + self.rawquery + ")")
terms = list(self.terms.keys())
terms.sort()
for term in terms:
negateAll = False
terms = self.terms[term]
if len(terms) > 0 and terms[0] == "!":
negateAll = True
terms = terms[1:]
if len(terms) == 0:
continue
subclauses = []
for value in terms:
subclauses.append("%s:%s" % (term, value))
clause = " OR ".join(subclauses)
if negateAll:
clause = "( NOT ( " + clause + " ) )"
else:
clause = "( " + clause + " )"
clauses.append(clause)
args.append(" AND ".join(clauses))
return args
def run(self, cb, limit=None):
class tracker(object):
def __init__(self):
self.gotany = True
self.count = 0
self.sortkey = None
c = tracker()
def mycb(line):
if 'rowCount' in line:
return
if 'type' in line and line['type'] == "error":
raise Exception(line['message'])
change = ModelChange.from_json(line)
if "sortKey" in line:
c.sortkey = line["sortKey"]
c.gotany = True
c.count = c.count + 1
cb(change)
if limit is None:
while c.gotany:
c.gotany = False
self.client.run(self.get_args(500, c.sortkey), mycb)
else:
while c.count < limit and c.gotany:
want = limit - c.count
if want > 500:
want = 500
c.gotany = False
self.client.run(self.get_args(want, c.sortkey), mycb)
return 0
class OperationWatch(OperationBase):
def __init__(self, client):
OperationBase.__init__(self, client)
def run(self, cb):
def mycb(line):
event = ModelEvent.from_json(line)
if event:
cb(event)
return self.client.run(["stream-events"], mycb)
| [
"gerrymander.model.ModelEvent.from_json",
"gerrymander.model.ModelChange.from_json"
] | [((3660, 3687), 'gerrymander.model.ModelChange.from_json', 'ModelChange.from_json', (['line'], {}), '(line)\n', (3681, 3687), False, 'from gerrymander.model import ModelChange\n'), ((4476, 4502), 'gerrymander.model.ModelEvent.from_json', 'ModelEvent.from_json', (['line'], {}), '(line)\n', (4496, 4502), False, 'from gerrymander.model import ModelEvent\n')] |
import rospy
import numpy as np
import cv2
class ScalarStable(object):
"""Represents a stabilized scalar"""
def __init__(self,
x=.0,
vx=.0,
p_cov=.03, m_cov=.01,
time=None):
"""ScalarStabilized constructor"""
self.x = x
self.vx = vx
self.p_cov = p_cov
self.m_cov = m_cov
self.filter = cv2.KalmanFilter(2, 1)
self.filter.statePost = self.to_array()
self.filter.measurementMatrix = np.array([[1, 1]], np.float32)
self.__update_noise_cov(p_cov, m_cov)
if time is None:
self.last_update = rospy.Time().now()
else:
self.last_update = time
def from_array(self, array):
"""Updates the scalar stabilized state from array"""
assert array.shape == (2, 1)
self.x = array[0]
self.vx = array[1]
self.filter.statePre = self.filter.statePost
def to_array(self):
"""Returns the scalar stabilizer state array representation"""
return np.array([[self.x], [self.vx]], np.float32)
def position(self):
"""Returns the scalar's position"""
return self.x
def velocity(self):
"""Returns the scalar's velocity"""
return self.vx
def update(self, x, time=None, m_cov=None):
"""Updates/Filter the scalar"""
if m_cov is not None:
self.__update_noise_cov(self.p_cov, m_cov)
self.__update_time(time=time)
self.filter.predict()
measurement = np.array([[np.float32(x)]])
assert measurement.shape == (1, 1)
self.filter.correct(measurement)
self.from_array(self.filter.statePost)
def predict(self, time=None):
"""Predicts the scalar state"""
self.__update_time(time=time)
self.filter.predict()
self.from_array(self.filter.statePost)
def __update_noise_cov(self, p_cov, m_cov):
"""Updates the process and measurement covariances"""
self.filter.processNoiseCov = np.array([[1, 0],
[0, 1]], np.float32) * p_cov
self.filter.measurementNoiseCov = np.array([[1]], np.float32) * m_cov
def __update_transition(self, dt):
self.filter.transitionMatrix = np.array([[1, dt],
[0, 1]], np.float32)
def __update_time(self, time=None):
if time is None:
now = rospy.Time().now()
else:
now = time
elapsed_time = now - self.last_update
self.last_update = now
self.__update_transition(elapsed_time.to_sec())
def __len__(self):
return 1
def __add__(self, scalar):
return self.x + scalar.x
def __sub__(self, scalar):
return self.x - scalar.x
def __str__(self):
return("{}".format(self.to_array()))
| [
"numpy.array",
"rospy.Time",
"numpy.float32",
"cv2.KalmanFilter"
] | [((411, 433), 'cv2.KalmanFilter', 'cv2.KalmanFilter', (['(2)', '(1)'], {}), '(2, 1)\n', (427, 433), False, 'import cv2\n'), ((522, 552), 'numpy.array', 'np.array', (['[[1, 1]]', 'np.float32'], {}), '([[1, 1]], np.float32)\n', (530, 552), True, 'import numpy as np\n'), ((1073, 1116), 'numpy.array', 'np.array', (['[[self.x], [self.vx]]', 'np.float32'], {}), '([[self.x], [self.vx]], np.float32)\n', (1081, 1116), True, 'import numpy as np\n'), ((2315, 2354), 'numpy.array', 'np.array', (['[[1, dt], [0, 1]]', 'np.float32'], {}), '([[1, dt], [0, 1]], np.float32)\n', (2323, 2354), True, 'import numpy as np\n'), ((2062, 2100), 'numpy.array', 'np.array', (['[[1, 0], [0, 1]]', 'np.float32'], {}), '([[1, 0], [0, 1]], np.float32)\n', (2070, 2100), True, 'import numpy as np\n'), ((2200, 2227), 'numpy.array', 'np.array', (['[[1]]', 'np.float32'], {}), '([[1]], np.float32)\n', (2208, 2227), True, 'import numpy as np\n'), ((655, 667), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (665, 667), False, 'import rospy\n'), ((1575, 1588), 'numpy.float32', 'np.float32', (['x'], {}), '(x)\n', (1585, 1588), True, 'import numpy as np\n'), ((2488, 2500), 'rospy.Time', 'rospy.Time', ([], {}), '()\n', (2498, 2500), False, 'import rospy\n')] |
#!/usr/bin/env python
import os
import sys
import sqlite3
import pandas as pd
import numpy as np
from scraper import create_data_folder, read_config
from collections import OrderedDict
def main():
"""
Mainly for debugging purposes.
"""
config_file = read_config()
# Pick a file
try:
csv_name = os.listdir(config_file["downloaded_data_path"])[0]
except:
print("Could not read csv file.. Please check you've downloaded data beforehand using scraper.py.")
exit(1)
# Read the data
df = read_data(csv_name, config_file)
# Extract information
sanitized_dataframe = extract_event_information(df)
# Save extracted information
create_data_folder(config_file["extracted_data_path"])
save_dataframe(sanitized_dataframe, "test", config_file)
def save_dataframe(df, df_root_name, config_file):
"""
Handles all the saving process into SQL and CSV formats.
@Param df: dataframe to save.
@Param df_root_name: name of the file to create without the extension.
@Param config_file: Configuration file.
"""
sqlite_read_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.db")
csv_save_path = os.path.join(config_file["extracted_data_path"] , f"{df_root_name}.csv")
save_dataframe_to_sqlite(df, sqlite_read_path)
save_dataframe_to_csv(sqlite_read_path, csv_save_path)
def save_dataframe_to_csv(db_path, save_path):
"""
Saves the data as csv in the given path by reading the sqlite3 database.
Makes sure to merge the values with those already existing at the same
location (event, latitude, location).
@Param db_path: path to the sqlite3 database.
@Param save_path: path to the csv file to create.
"""
# Read the SQL database
db = sqlite3.connect(db_path)
db_df = pd.read_sql_query("SELECT * FROM events", db)
# Transforming columns to make them compatible with storing multiple values
db_df["event_document"] = db_df["event_document"].apply(lambda x: [x])
db_df["event_date"] = db_df["event_date"].apply(lambda x: [x])
db_df["event_importance"] = db_df["event_importance"].apply(lambda x: [x])
db_df["event_source_name"] = db_df["event_source_name"].apply(lambda x: [x])
# merge lines with identical position and event.
db_df = db_df.groupby(["event", "event_latitude", "event_longitude"], as_index=False).aggregate({'event_document':np.sum, "event_importance": np.sum, "event_date": np.sum, "event_source_name": np.sum})
# Storing the information
db_df.to_csv(save_path, mode='w', index=False)
# Closing the database connexion
db.commit()
db.close()
def read_data(csv_name, config_file, add_root_dir=True):
"""
Reads the csv file given and returns the associated dataframe.
@Param csv_name: Name of the csv file to read.
@Param config_file: Configuration file.
@Return: Dataframe containing the csv information.
"""
print("Reading the csv file...")
csv = csv_name
if add_root_dir:
data_dir = config_file["downloaded_data_path"]
csv = os.path.join(data_dir, csv_name)
pd.set_option('display.float_format', lambda x: '%.3f' % x) # Avoid scientific notation
dataframe = pd.read_csv(csv,
delimiter = "\t",
names=["ID", "event_date", "source_identifier", "source_name", "document_id", "V1Counts_10", "V2_1Counts", "V1Themes", "V2EnhancedThemes", "V1Locations", "V2EnhancedLocations", "V1Persons",
"V2EnhancedPersons", "V1organizations", "V2EnhancedOrganizations", "V1_5tone", "V2_1EnhancedDates", "V2GCam", "V2_1SharingImage", "V2_1RelatedImages", "V2_1SocialImageEmbeds", "V2_1SocialVideoEmbeds",
"V2_1Quotations", "V2_1AllNames", "V2_1Amounts", "V2_1TranslationInfo", "V2ExtrasXML"],
encoding="ISO-8859-1")
return dataframe
def extract_event_information(dataframe):
"""
Extracts the information related to the events from the dataframe and returns a transformed dataframe.
The new dataframe contains information related to the event type, its importance and position (lat, long).
@Params dataframe: represents all the information contained in the initial csv.
@Return: dataframe containing the extracted information regarding the events.
"""
print("Extracting information from the csv file...")
events_columns = ["event", "event_importance", "event_latitude", "event_longitude"]
sanitized_dataframe = pd.DataFrame(columns=events_columns)
# Removing NaN events
main_dataframe = dataframe[["event_date", "V1Counts_10", "source_name", "document_id"]].copy()
main_series = main_dataframe.dropna(0)
for idx, row in main_series.iterrows():
event_date = row[0]
event_source_name = row[2]
event_document = row[3]
event_details = row[1].split("#")
event_dict = OrderedDict()
event_dict["event_date"] = event_date
event_dict["event_source_name"] = event_source_name
event_dict["event_document"] = event_document
event_dict["event"] = event_details[0]
event_dict["event_importance"] = event_details[1]
event_dict["event_latitude"] = event_details[7]
event_dict["event_longitude"] = event_details[8]
sanitized_dataframe = sanitized_dataframe.append(event_dict, ignore_index=True)
return sanitized_dataframe
def save_dataframe_to_sqlite(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a sqlite3 database.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the database to save the information in.
If the database doesn't exist, creates it.
"""
conn = sqlite3.connect(destination_file)
c = conn.cursor()
# Create table
try:
c.execute('''CREATE TABLE events
(event text, event_importance text, event_latitude real, event_longitude real, event_date integer, event_document text, event_source_name text, unique(event_date, event, event_importance, event_latitude, event_longitude))''')
print("Created event table")
except Exception as e:
print(e)
# Populating the database
for idx, row in sanitized_dataframe.iterrows():
try:
# Before adding, we check if the element has been reported in the same day.
if row[2]=="":
row[2]=0
if row[3]=="":
row[3]=0
c.execute(f"SELECT event, event_importance, event_latitude, event_longitude FROM events WHERE event='{row[0]}' AND event_importance={int(row[1])} AND event_latitude={float(row[2])} AND event_longitude={float(row[3])}")
result = c.fetchall()
if len(result) == 0:
try:
c.execute(f"INSERT INTO events VALUES ('{row[0]}', '{row[1]}', '{row[2]}', '{row[3]}', '{row[4]}', '{row[5]}', '{row[6]}')")
except sqlite3.IntegrityError as e:
# Duplicated row
pass
except:
print("Unexpected error:", sys.exc_info()[0])
exit(1)
except Exception as e:
print("Unexpected error:", sys.exc_info()[0], e)
exit(1)
# Save (commit) the changes
conn.commit()
conn.close()
def save_dataframe_to_txt(sanitized_dataframe, destination_file):
"""
Saves the dataframe information to a txt file.
@Param sanitized_dataframe: Dataframe containing the information to save.
@Param destination_file: Path to the file to save the information in.
"""
# TODO: Change to a sqlite database ?
print("Storing the event information into a txt file...")
np.savetxt(destination_file, sanitized_dataframe.values, fmt='%s', delimiter="\t",
header="event\tevent_importance\tevent_latitude\tevent_longitude")
if __name__ == "__main__":
main()
| [
"pandas.read_sql_query",
"scraper.read_config",
"collections.OrderedDict",
"os.listdir",
"sqlite3.connect",
"pandas.read_csv",
"os.path.join",
"pandas.set_option",
"sys.exc_info",
"scraper.create_data_folder",
"numpy.savetxt",
"pandas.DataFrame"
] | [((269, 282), 'scraper.read_config', 'read_config', ([], {}), '()\n', (280, 282), False, 'from scraper import create_data_folder, read_config\n'), ((701, 755), 'scraper.create_data_folder', 'create_data_folder', (["config_file['extracted_data_path']"], {}), "(config_file['extracted_data_path'])\n", (719, 755), False, 'from scraper import create_data_folder, read_config\n'), ((1123, 1193), 'os.path.join', 'os.path.join', (["config_file['extracted_data_path']", 'f"""{df_root_name}.db"""'], {}), "(config_file['extracted_data_path'], f'{df_root_name}.db')\n", (1135, 1193), False, 'import os\n'), ((1215, 1286), 'os.path.join', 'os.path.join', (["config_file['extracted_data_path']", 'f"""{df_root_name}.csv"""'], {}), "(config_file['extracted_data_path'], f'{df_root_name}.csv')\n", (1227, 1286), False, 'import os\n'), ((1799, 1823), 'sqlite3.connect', 'sqlite3.connect', (['db_path'], {}), '(db_path)\n', (1814, 1823), False, 'import sqlite3\n'), ((1836, 1881), 'pandas.read_sql_query', 'pd.read_sql_query', (['"""SELECT * FROM events"""', 'db'], {}), "('SELECT * FROM events', db)\n", (1853, 1881), True, 'import pandas as pd\n'), ((3154, 3213), 'pandas.set_option', 'pd.set_option', (['"""display.float_format"""', "(lambda x: '%.3f' % x)"], {}), "('display.float_format', lambda x: '%.3f' % x)\n", (3167, 3213), True, 'import pandas as pd\n'), ((3259, 3825), 'pandas.read_csv', 'pd.read_csv', (['csv'], {'delimiter': '"""\t"""', 'names': "['ID', 'event_date', 'source_identifier', 'source_name', 'document_id',\n 'V1Counts_10', 'V2_1Counts', 'V1Themes', 'V2EnhancedThemes',\n 'V1Locations', 'V2EnhancedLocations', 'V1Persons', 'V2EnhancedPersons',\n 'V1organizations', 'V2EnhancedOrganizations', 'V1_5tone',\n 'V2_1EnhancedDates', 'V2GCam', 'V2_1SharingImage', 'V2_1RelatedImages',\n 'V2_1SocialImageEmbeds', 'V2_1SocialVideoEmbeds', 'V2_1Quotations',\n 'V2_1AllNames', 'V2_1Amounts', 'V2_1TranslationInfo', 'V2ExtrasXML']", 'encoding': '"""ISO-8859-1"""'}), "(csv, delimiter='\\t', names=['ID', 'event_date',\n 'source_identifier', 'source_name', 'document_id', 'V1Counts_10',\n 'V2_1Counts', 'V1Themes', 'V2EnhancedThemes', 'V1Locations',\n 'V2EnhancedLocations', 'V1Persons', 'V2EnhancedPersons',\n 'V1organizations', 'V2EnhancedOrganizations', 'V1_5tone',\n 'V2_1EnhancedDates', 'V2GCam', 'V2_1SharingImage', 'V2_1RelatedImages',\n 'V2_1SocialImageEmbeds', 'V2_1SocialVideoEmbeds', 'V2_1Quotations',\n 'V2_1AllNames', 'V2_1Amounts', 'V2_1TranslationInfo', 'V2ExtrasXML'],\n encoding='ISO-8859-1')\n", (3270, 3825), True, 'import pandas as pd\n'), ((4587, 4623), 'pandas.DataFrame', 'pd.DataFrame', ([], {'columns': 'events_columns'}), '(columns=events_columns)\n', (4599, 4623), True, 'import pandas as pd\n'), ((5895, 5928), 'sqlite3.connect', 'sqlite3.connect', (['destination_file'], {}), '(destination_file)\n', (5910, 5928), False, 'import sqlite3\n'), ((7915, 8073), 'numpy.savetxt', 'np.savetxt', (['destination_file', 'sanitized_dataframe.values'], {'fmt': '"""%s"""', 'delimiter': '"""\t"""', 'header': '"""event\tevent_importance\tevent_latitude\tevent_longitude"""'}), "(destination_file, sanitized_dataframe.values, fmt='%s',\n delimiter='\\t', header=\n 'event\\tevent_importance\\tevent_latitude\\tevent_longitude')\n", (7925, 8073), True, 'import numpy as np\n'), ((3116, 3148), 'os.path.join', 'os.path.join', (['data_dir', 'csv_name'], {}), '(data_dir, csv_name)\n', (3128, 3148), False, 'import os\n'), ((4997, 5010), 'collections.OrderedDict', 'OrderedDict', ([], {}), '()\n', (5008, 5010), False, 'from collections import OrderedDict\n'), ((330, 377), 'os.listdir', 'os.listdir', (["config_file['downloaded_data_path']"], {}), "(config_file['downloaded_data_path'])\n", (340, 377), False, 'import os\n'), ((7406, 7420), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7418, 7420), False, 'import sys\n'), ((7288, 7302), 'sys.exc_info', 'sys.exc_info', ([], {}), '()\n', (7300, 7302), False, 'import sys\n')] |
import nengo
import pytest
from nengo_spinnaker.builder import Model
from nengo_spinnaker.builder.ports import OutputPort, InputPort
from nengo_spinnaker.node_io import ethernet as ethernet_io
from nengo_spinnaker.operators import SDPReceiver, SDPTransmitter
@pytest.mark.parametrize("transmission_period", [0.001, 0.002])
def test_Ethernet_init(transmission_period):
"""Test that the Ethernet initialisation creates a host network and stores
appropriate rates.
"""
# Create the EthernetIO
io = ethernet_io.Ethernet(transmission_period=transmission_period)
# Check that we stored the transmission period
assert io.transmission_period == transmission_period
# Check that there is a (empty) host network
assert io.host_network.all_objects == list()
assert io.host_network.all_connections == list()
assert io.host_network.all_probes == list()
# Check that the node input dictionary and lock are present
with io.node_input_lock:
assert io.node_input == dict()
def test_get_spinnaker_source_for_node():
"""Check that getting the SpiNNaker source for a Node returns an SDP Rx
operator as the source object with OutputPort.standard as the port. The
spec should indicate that the connection should be latching.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_source(model, a_b)
assert isinstance(spec.target.obj, SDPReceiver)
assert spec.target.port is OutputPort.standard
assert spec.latching
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_source_for_node_repeated():
"""Getting the source twice for the same Node should return the same
object.
"""
with nengo.Network():
a = nengo.Node(lambda t: t**2, size_out=1)
b = nengo.Ensemble(100, 1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, transform=-0.5)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_source(model, a_b0)
spec1 = io.get_node_source(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
def test_get_spinnaker_sink_for_node():
"""Check that getting the SpiNNaker sink for a Node returns an SDP Tx
operator as the sink object with InputPort.standard as the port.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b = nengo.Connection(a, b)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec = io.get_node_sink(model, a_b)
assert isinstance(spec.target.obj, SDPTransmitter)
assert spec.target.port is InputPort.standard
assert model.extra_operators == [spec.target.obj]
def test_get_spinnaker_sink_for_node_repeated():
"""Check that getting the SpiNNaker sink for a Node twice returns the same
target.
"""
with nengo.Network():
a = nengo.Ensemble(100, 1)
b = nengo.Node(lambda t, x: None, size_in=1)
a_b0 = nengo.Connection(a, b)
a_b1 = nengo.Connection(a, b, synapse=0.3)
# Create an empty model and an Ethernet object
model = Model()
io = ethernet_io.Ethernet()
spec0 = io.get_node_sink(model, a_b0)
spec1 = io.get_node_sink(model, a_b1)
assert spec0.target.obj is spec1.target.obj
assert model.extra_operators == [spec0.target.obj]
| [
"nengo.Network",
"nengo_spinnaker.node_io.ethernet.Ethernet",
"nengo.Ensemble",
"nengo_spinnaker.builder.Model",
"nengo.Node",
"pytest.mark.parametrize",
"nengo.Connection"
] | [((263, 325), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""transmission_period"""', '[0.001, 0.002]'], {}), "('transmission_period', [0.001, 0.002])\n", (286, 325), False, 'import pytest\n'), ((518, 579), 'nengo_spinnaker.node_io.ethernet.Ethernet', 'ethernet_io.Ethernet', ([], {'transmission_period': 'transmission_period'}), '(transmission_period=transmission_period)\n', (538, 579), True, 'from nengo_spinnaker.node_io import ethernet as ethernet_io\n'), ((1505, 1512), 'nengo_spinnaker.builder.Model', 'Model', ([], {}), '()\n', (1510, 1512), False, 'from nengo_spinnaker.builder import Model\n'), ((1522, 1544), 'nengo_spinnaker.node_io.ethernet.Ethernet', 'ethernet_io.Ethernet', ([], {}), '()\n', (1542, 1544), True, 'from nengo_spinnaker.node_io import ethernet as ethernet_io\n'), ((2184, 2191), 'nengo_spinnaker.builder.Model', 'Model', ([], {}), '()\n', (2189, 2191), False, 'from nengo_spinnaker.builder import Model\n'), ((2201, 2223), 'nengo_spinnaker.node_io.ethernet.Ethernet', 'ethernet_io.Ethernet', ([], {}), '()\n', (2221, 2223), True, 'from nengo_spinnaker.node_io import ethernet as ethernet_io\n'), ((2824, 2831), 'nengo_spinnaker.builder.Model', 'Model', ([], {}), '()\n', (2829, 2831), False, 'from nengo_spinnaker.builder import Model\n'), ((2841, 2863), 'nengo_spinnaker.node_io.ethernet.Ethernet', 'ethernet_io.Ethernet', ([], {}), '()\n', (2861, 2863), True, 'from nengo_spinnaker.node_io import ethernet as ethernet_io\n'), ((3481, 3488), 'nengo_spinnaker.builder.Model', 'Model', ([], {}), '()\n', (3486, 3488), False, 'from nengo_spinnaker.builder import Model\n'), ((3498, 3520), 'nengo_spinnaker.node_io.ethernet.Ethernet', 'ethernet_io.Ethernet', ([], {}), '()\n', (3518, 3520), True, 'from nengo_spinnaker.node_io import ethernet as ethernet_io\n'), ((1301, 1316), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (1314, 1316), False, 'import nengo\n'), ((1330, 1370), 'nengo.Node', 'nengo.Node', (['(lambda t: t ** 2)'], {'size_out': '(1)'}), '(lambda t: t ** 2, size_out=1)\n', (1340, 1370), False, 'import nengo\n'), ((1381, 1403), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (1395, 1403), False, 'import nengo\n'), ((1418, 1440), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {}), '(a, b)\n', (1434, 1440), False, 'import nengo\n'), ((1925, 1940), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (1938, 1940), False, 'import nengo\n'), ((1954, 1994), 'nengo.Node', 'nengo.Node', (['(lambda t: t ** 2)'], {'size_out': '(1)'}), '(lambda t: t ** 2, size_out=1)\n', (1964, 1994), False, 'import nengo\n'), ((2005, 2027), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (2019, 2027), False, 'import nengo\n'), ((2043, 2065), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {}), '(a, b)\n', (2059, 2065), False, 'import nengo\n'), ((2081, 2119), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {'transform': '(-0.5)'}), '(a, b, transform=-0.5)\n', (2097, 2119), False, 'import nengo\n'), ((2618, 2633), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (2631, 2633), False, 'import nengo\n'), ((2647, 2669), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (2661, 2669), False, 'import nengo\n'), ((2682, 2722), 'nengo.Node', 'nengo.Node', (['(lambda t, x: None)'], {'size_in': '(1)'}), '(lambda t, x: None, size_in=1)\n', (2692, 2722), False, 'import nengo\n'), ((2737, 2759), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {}), '(a, b)\n', (2753, 2759), False, 'import nengo\n'), ((3223, 3238), 'nengo.Network', 'nengo.Network', ([], {}), '()\n', (3236, 3238), False, 'import nengo\n'), ((3252, 3274), 'nengo.Ensemble', 'nengo.Ensemble', (['(100)', '(1)'], {}), '(100, 1)\n', (3266, 3274), False, 'import nengo\n'), ((3287, 3327), 'nengo.Node', 'nengo.Node', (['(lambda t, x: None)'], {'size_in': '(1)'}), '(lambda t, x: None, size_in=1)\n', (3297, 3327), False, 'import nengo\n'), ((3343, 3365), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {}), '(a, b)\n', (3359, 3365), False, 'import nengo\n'), ((3381, 3416), 'nengo.Connection', 'nengo.Connection', (['a', 'b'], {'synapse': '(0.3)'}), '(a, b, synapse=0.3)\n', (3397, 3416), False, 'import nengo\n')] |
from rest_framework import serializers
from api.model.foodComment import FoodComment
from api.model.food import Food
from django.contrib.auth.models import User
from api.serializer.user import UserSerializer
class FoodCommentSerializer(serializers.ModelSerializer):
comment = serializers.CharField(max_length=255)
photo = serializers.CharField(max_length=255, allow_null=True, required=False)
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentReadSerializer(serializers.ModelSerializer):
user = UserSerializer()
class Meta:
model = FoodComment
fields = '__all__'
depth = 1
class FoodCommentPureSerializer(serializers.ModelSerializer):
user = serializers.PrimaryKeyRelatedField(queryset=User.objects.all())
food = serializers.PrimaryKeyRelatedField(queryset=Food.objects.all())
class Meta:
model = FoodComment
fields = ('comment', 'user', 'food')
depth = 1
| [
"api.model.food.Food.objects.all",
"rest_framework.serializers.CharField",
"django.contrib.auth.models.User.objects.all",
"api.serializer.user.UserSerializer"
] | [((284, 321), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(255)'}), '(max_length=255)\n', (305, 321), False, 'from rest_framework import serializers\n'), ((334, 404), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(255)', 'allow_null': '(True)', 'required': '(False)'}), '(max_length=255, allow_null=True, required=False)\n', (355, 404), False, 'from rest_framework import serializers\n'), ((722, 738), 'api.serializer.user.UserSerializer', 'UserSerializer', ([], {}), '()\n', (736, 738), False, 'from api.serializer.user import UserSerializer\n'), ((461, 479), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (477, 479), False, 'from django.contrib.auth.models import User\n'), ((536, 554), 'api.model.food.Food.objects.all', 'Food.objects.all', ([], {}), '()\n', (552, 554), False, 'from api.model.food import Food\n'), ((949, 967), 'django.contrib.auth.models.User.objects.all', 'User.objects.all', ([], {}), '()\n', (965, 967), False, 'from django.contrib.auth.models import User\n'), ((1024, 1042), 'api.model.food.Food.objects.all', 'Food.objects.all', ([], {}), '()\n', (1040, 1042), False, 'from api.model.food import Food\n')] |