code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import tkinter
from tkinter import ttk
import mqtt_remote_method_calls as mqtt
import m2_laptop_code as m2
import m3_laptop_code as m3
def get_my_frame(root, window, mqtt_sender):
# Construct your frame:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame_label = ttk.Label(frame, text="Zixin")
frame_label.grid()
# DONE 2: Put your name in the above.
# Add the rest of your GUI to your frame:
# DONE: Put your GUI onto your frame (using sub-frames if you wish).
entry_distance = ttk.Entry(frame, width = 8)
entry_distance.grid()
entry_distance.insert(0, '12')
entry_speed = ttk.Entry(frame, width = 8)
entry_speed.grid()
entry_speed.insert(0, '100')
entry_delta = ttk.Entry(frame, width = 8)
entry_delta.grid()
entry_delta.insert(0, 5)
button_forward = ttk.Button(frame, text = "Forward")
button_forward.grid()
button_forward['command'] = lambda: Handle_forward(mqtt_sender, entry_distance, entry_speed, entry_delta)
button_backward = ttk.Button(frame, text = "Backward")
button_backward.grid()
button_backward['command'] = lambda : Handle_backward(mqtt_sender, entry_distance, entry_speed, entry_delta)
entry_until_distance = ttk.Entry(frame, width = 8)
entry_until_distance.grid()
entry_until_distance.insert(0, '40')
button_until_distance = ttk.Button(frame, text = "Go until Distance")
button_until_distance.grid()
button_until_distance['command'] = lambda: Handle_go_until_distance(mqtt_sender, entry_until_distance, entry_speed, entry_delta)
# Return your frame:
return frame
def Handle_forward(mqtt_sender, entry_distance, entry_speed, entry_delta):
distance = int(entry_distance.get())
print('The robot goes forward', distance)
speed = int(entry_speed.get())
print('with speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Forward_or_Backward', [distance, speed, delta])
def Handle_backward(mqtt_sender, entry_distance, entry_speed, entry_delta):
distance = int(entry_distance.get())
print('The robot goes backward', distance)
speed = int(entry_speed.get())
print('with speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Forward_or_Backward', [- distance, - speed, delta])
def Handle_go_until_distance(mqtt_sender, entry_until_distance, entry_speed, entry_delta):
until_distance = int(entry_until_distance.get())
print('The robot goes until distance', until_distance)
speed = int(entry_speed.get())
print('with initial speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Go_until_distance', [until_distance, speed, delta])
class MyLaptopDelegate(object):
"""
Defines methods that are called by the MQTT listener when that listener
gets a message (name of the method, plus its arguments)
from the ROBOT via MQTT.
"""
def __init__(self, root):
self.root = root # type: tkinter.Tk
self.mqtt_sender = None # type: mqtt.MqttClient
def set_mqtt_sender(self, mqtt_sender):
self.mqtt_sender = mqtt_sender
# TODO: Add methods here as needed.
# TODO: Add functions here as needed. | src/m1_laptop_code.py |
import tkinter
from tkinter import ttk
import mqtt_remote_method_calls as mqtt
import m2_laptop_code as m2
import m3_laptop_code as m3
def get_my_frame(root, window, mqtt_sender):
# Construct your frame:
frame = ttk.Frame(window, padding=10, borderwidth=5, relief="ridge")
frame_label = ttk.Label(frame, text="Zixin")
frame_label.grid()
# DONE 2: Put your name in the above.
# Add the rest of your GUI to your frame:
# DONE: Put your GUI onto your frame (using sub-frames if you wish).
entry_distance = ttk.Entry(frame, width = 8)
entry_distance.grid()
entry_distance.insert(0, '12')
entry_speed = ttk.Entry(frame, width = 8)
entry_speed.grid()
entry_speed.insert(0, '100')
entry_delta = ttk.Entry(frame, width = 8)
entry_delta.grid()
entry_delta.insert(0, 5)
button_forward = ttk.Button(frame, text = "Forward")
button_forward.grid()
button_forward['command'] = lambda: Handle_forward(mqtt_sender, entry_distance, entry_speed, entry_delta)
button_backward = ttk.Button(frame, text = "Backward")
button_backward.grid()
button_backward['command'] = lambda : Handle_backward(mqtt_sender, entry_distance, entry_speed, entry_delta)
entry_until_distance = ttk.Entry(frame, width = 8)
entry_until_distance.grid()
entry_until_distance.insert(0, '40')
button_until_distance = ttk.Button(frame, text = "Go until Distance")
button_until_distance.grid()
button_until_distance['command'] = lambda: Handle_go_until_distance(mqtt_sender, entry_until_distance, entry_speed, entry_delta)
# Return your frame:
return frame
def Handle_forward(mqtt_sender, entry_distance, entry_speed, entry_delta):
distance = int(entry_distance.get())
print('The robot goes forward', distance)
speed = int(entry_speed.get())
print('with speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Forward_or_Backward', [distance, speed, delta])
def Handle_backward(mqtt_sender, entry_distance, entry_speed, entry_delta):
distance = int(entry_distance.get())
print('The robot goes backward', distance)
speed = int(entry_speed.get())
print('with speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Forward_or_Backward', [- distance, - speed, delta])
def Handle_go_until_distance(mqtt_sender, entry_until_distance, entry_speed, entry_delta):
until_distance = int(entry_until_distance.get())
print('The robot goes until distance', until_distance)
speed = int(entry_speed.get())
print('with initial speed', speed)
print()
delta = int(entry_delta.get())
mqtt_sender.send_message('Go_until_distance', [until_distance, speed, delta])
class MyLaptopDelegate(object):
"""
Defines methods that are called by the MQTT listener when that listener
gets a message (name of the method, plus its arguments)
from the ROBOT via MQTT.
"""
def __init__(self, root):
self.root = root # type: tkinter.Tk
self.mqtt_sender = None # type: mqtt.MqttClient
def set_mqtt_sender(self, mqtt_sender):
self.mqtt_sender = mqtt_sender
# TODO: Add methods here as needed.
# TODO: Add functions here as needed. | 0.509032 | 0.234889 |
import numpy as np
import visgeom as vg
from camera import PerspectiveCamera
from measurements import PrecalibratedCameraMeasurementsFixedWorld
from optim import levenberg_marquardt
from visualise_ba import visualise_moba
"""Example 1 - Motion-only Bundle Adjustment"""
class PrecalibratedMotionOnlyBAObjective:
"""Implements linearisation of motion-only BA objective function"""
def __init__(self, measurement):
"""Constructs the objective
:param measurement: A PrecalibratedCameraMeasurementsFixedWorld object.
"""
self.measurement = measurement
@staticmethod
def extract_measurement_jacobian(point_index, pose_state_c_w, measurement):
"""Computes the measurement Jacobian for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement Jacobian
"""
A = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.jac_project_world_to_normalised_wrt_pose_w_c(pose_state_c_w,
measurement.x_w[:, [point_index]])
return A
@staticmethod
def extract_measurement_error(point_index, pose_state_c_w, measurement):
"""Computes the measurement error for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement error
"""
b = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.reprojection_error_normalised(pose_state_c_w * measurement.x_w[:, [point_index]],
measurement.xn[:, [point_index]])
return b
def linearise(self, pose_state_w_c):
"""Linearises the objective over all states and measurements
:param pose_state_w_c: The current camera pose state in the world frame.
:return:
A - The full measurement Jacobian
b - The full measurement error
cost - The current cost
"""
num_points = self.measurement.num
A = np.zeros((2 * num_points, 6))
b = np.zeros((2 * num_points, 1))
pose_state_c_w = pose_state_w_c.inverse()
for j in range(num_points):
rows = slice(j * 2, (j + 1) * 2)
A[rows, :] = self.extract_measurement_jacobian(j, pose_state_c_w, self.measurement)
b[rows, :] = self.extract_measurement_error(j, pose_state_c_w, self.measurement)
return A, b, b.T.dot(b)
def main():
# World box.
points_w = vg.utils.generate_box()
# Define common camera.
w = 640
h = 480
focal_lengths = 0.75 * h * np.ones((2, 1))
principal_point = 0.5 * np.array([[w, h]]).T
camera = PerspectiveCamera(focal_lengths, principal_point)
# Generate a set of camera measurements.
true_pose_w_c = PerspectiveCamera.looks_at_pose(np.array([[3, -4, 0]]).T, np.zeros((3, 1)), np.array([[0, 0, 1]]).T)
measurement = PrecalibratedCameraMeasurementsFixedWorld.generate(camera, true_pose_w_c, points_w)
# Construct model from measurements.
model = PrecalibratedMotionOnlyBAObjective(measurement)
# Perturb camera pose and use as initial state.
init_pose_wc = true_pose_w_c + 0.3 * np.random.randn(6, 1)
# Estimate pose in the world frame from point correspondences.
x, cost, A, b = levenberg_marquardt(init_pose_wc, model)
cov_x_final = np.linalg.inv(A.T @ A)
# Print covariance.
with np.printoptions(precision=3, suppress=True):
print('Covariance:')
print(cov_x_final)
# Visualise
visualise_moba(true_pose_w_c, points_w, measurement, x, cost)
if __name__ == "__main__":
main() | ex_1_motion_only_ba.py | import numpy as np
import visgeom as vg
from camera import PerspectiveCamera
from measurements import PrecalibratedCameraMeasurementsFixedWorld
from optim import levenberg_marquardt
from visualise_ba import visualise_moba
"""Example 1 - Motion-only Bundle Adjustment"""
class PrecalibratedMotionOnlyBAObjective:
"""Implements linearisation of motion-only BA objective function"""
def __init__(self, measurement):
"""Constructs the objective
:param measurement: A PrecalibratedCameraMeasurementsFixedWorld object.
"""
self.measurement = measurement
@staticmethod
def extract_measurement_jacobian(point_index, pose_state_c_w, measurement):
"""Computes the measurement Jacobian for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement Jacobian
"""
A = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.jac_project_world_to_normalised_wrt_pose_w_c(pose_state_c_w,
measurement.x_w[:, [point_index]])
return A
@staticmethod
def extract_measurement_error(point_index, pose_state_c_w, measurement):
"""Computes the measurement error for a specific point and camera measurement.
:param point_index: Index of current point.
:param pose_state_c_w: Current pose state given as the pose of the world in the camera frame.
:param measurement: The measurement
:return: The measurement error
"""
b = measurement.sqrt_inv_covs[point_index] @ \
measurement.camera.reprojection_error_normalised(pose_state_c_w * measurement.x_w[:, [point_index]],
measurement.xn[:, [point_index]])
return b
def linearise(self, pose_state_w_c):
"""Linearises the objective over all states and measurements
:param pose_state_w_c: The current camera pose state in the world frame.
:return:
A - The full measurement Jacobian
b - The full measurement error
cost - The current cost
"""
num_points = self.measurement.num
A = np.zeros((2 * num_points, 6))
b = np.zeros((2 * num_points, 1))
pose_state_c_w = pose_state_w_c.inverse()
for j in range(num_points):
rows = slice(j * 2, (j + 1) * 2)
A[rows, :] = self.extract_measurement_jacobian(j, pose_state_c_w, self.measurement)
b[rows, :] = self.extract_measurement_error(j, pose_state_c_w, self.measurement)
return A, b, b.T.dot(b)
def main():
# World box.
points_w = vg.utils.generate_box()
# Define common camera.
w = 640
h = 480
focal_lengths = 0.75 * h * np.ones((2, 1))
principal_point = 0.5 * np.array([[w, h]]).T
camera = PerspectiveCamera(focal_lengths, principal_point)
# Generate a set of camera measurements.
true_pose_w_c = PerspectiveCamera.looks_at_pose(np.array([[3, -4, 0]]).T, np.zeros((3, 1)), np.array([[0, 0, 1]]).T)
measurement = PrecalibratedCameraMeasurementsFixedWorld.generate(camera, true_pose_w_c, points_w)
# Construct model from measurements.
model = PrecalibratedMotionOnlyBAObjective(measurement)
# Perturb camera pose and use as initial state.
init_pose_wc = true_pose_w_c + 0.3 * np.random.randn(6, 1)
# Estimate pose in the world frame from point correspondences.
x, cost, A, b = levenberg_marquardt(init_pose_wc, model)
cov_x_final = np.linalg.inv(A.T @ A)
# Print covariance.
with np.printoptions(precision=3, suppress=True):
print('Covariance:')
print(cov_x_final)
# Visualise
visualise_moba(true_pose_w_c, points_w, measurement, x, cost)
if __name__ == "__main__":
main() | 0.908425 | 0.82748 |
import os
import sys
import numpy as np
import argparse
import functools
import paddle
import paddle.fluid as fluid
from utility import add_arguments, print_arguments
from se_resnext import SE_ResNeXt
import reader
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 1, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('test_list', str, '', "The testing data lists.")
add_arg('num_layers', int, 50, "How many layers for SE-ResNeXt model.")
add_arg('model_dir', str, '', "The model path.")
# yapf: enable
def infer(args):
class_dim = 1000
image_shape = [3, 224, 224]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
out = SE_ResNeXt(input=image, class_dim=class_dim, layers=args.num_layers)
out = fluid.layers.softmax(input=out)
inference_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
if not os.path.exists(args.model_dir):
raise ValueError("The model path [%s] does not exist." %
(args.model_dir))
if not os.path.exists(args.test_list):
raise ValueError("The test lists [%s] does not exist." %
(args.test_list))
def if_exist(var):
return os.path.exists(os.path.join(args.model_dir, var.name))
fluid.io.load_vars(exe, args.model_dir, predicate=if_exist)
test_reader = paddle.batch(
reader.infer(args.test_list), batch_size=args.batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
fetch_list = [out]
TOPK = 1
for batch_id, data in enumerate(test_reader()):
result = exe.run(inference_program,
feed=feeder.feed(data),
fetch_list=fetch_list)
result = result[0]
pred_label = np.argsort(result)[::-1][0][0]
print("Test {0}-score {1}, class {2}: "
.format(batch_id, result[0][pred_label], pred_label))
sys.stdout.flush()
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
infer(args) | fluid/image_classification/infer.py | import os
import sys
import numpy as np
import argparse
import functools
import paddle
import paddle.fluid as fluid
from utility import add_arguments, print_arguments
from se_resnext import SE_ResNeXt
import reader
parser = argparse.ArgumentParser(description=__doc__)
add_arg = functools.partial(add_arguments, argparser=parser)
# yapf: disable
add_arg('batch_size', int, 1, "Minibatch size.")
add_arg('use_gpu', bool, True, "Whether to use GPU or not.")
add_arg('test_list', str, '', "The testing data lists.")
add_arg('num_layers', int, 50, "How many layers for SE-ResNeXt model.")
add_arg('model_dir', str, '', "The model path.")
# yapf: enable
def infer(args):
class_dim = 1000
image_shape = [3, 224, 224]
image = fluid.layers.data(name='image', shape=image_shape, dtype='float32')
out = SE_ResNeXt(input=image, class_dim=class_dim, layers=args.num_layers)
out = fluid.layers.softmax(input=out)
inference_program = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0) if args.use_gpu else fluid.CPUPlace()
exe = fluid.Executor(place)
if not os.path.exists(args.model_dir):
raise ValueError("The model path [%s] does not exist." %
(args.model_dir))
if not os.path.exists(args.test_list):
raise ValueError("The test lists [%s] does not exist." %
(args.test_list))
def if_exist(var):
return os.path.exists(os.path.join(args.model_dir, var.name))
fluid.io.load_vars(exe, args.model_dir, predicate=if_exist)
test_reader = paddle.batch(
reader.infer(args.test_list), batch_size=args.batch_size)
feeder = fluid.DataFeeder(place=place, feed_list=[image])
fetch_list = [out]
TOPK = 1
for batch_id, data in enumerate(test_reader()):
result = exe.run(inference_program,
feed=feeder.feed(data),
fetch_list=fetch_list)
result = result[0]
pred_label = np.argsort(result)[::-1][0][0]
print("Test {0}-score {1}, class {2}: "
.format(batch_id, result[0][pred_label], pred_label))
sys.stdout.flush()
if __name__ == '__main__':
args = parser.parse_args()
print_arguments(args)
infer(args) | 0.362179 | 0.161122 |
import asyncio
import itertools
import os
import random
import re
import subprocess
from collections import UserDict
from typing import Any, Iterable, List, Optional
HASH_BYTES = 32
def get_package_path():
"""
Returns ROOT_PATH s.t. $ROOT_PATH/starkware is the package folder.
"""
import starkware.python
return os.path.abspath(os.path.join(os.path.dirname(starkware.python.__file__), '../../'))
def get_build_dir_path(rel_path=''):
"""
Returns a path to a file inside the build directory (or the docker).
rel_path is the relative path of the file with respect to the build directory.
"""
build_root = os.environ['BUILD_ROOT']
return os.path.join(build_root, rel_path)
def get_source_dir_path(rel_path=''):
"""
Returns a path to a file inside the source directory. Does not work in docker.
rel_path is the relative path of the file with respect to the source directory.
"""
source_root = os.path.join(os.environ['BUILD_ROOT'], '../../')
assert os.path.exists(os.path.join(source_root, 'src'))
return os.path.join(source_root, rel_path)
def assert_same_and_get(*args):
"""
Verifies that all the arguments are the same, and returns this value.
For example, assert_same_and_get(5, 5, 5) will return 5, and assert_same_and_get(0, 1) will
raise an AssertionError.
"""
assert len(set(args)) == 1, 'Values are not the same (%s)' % (args,)
return args[0]
def unique(x):
"""
Removes duplicates while preserving order.
"""
return list(dict.fromkeys(x).keys())
def unique_ordered_union(x, y):
"""
Returns a list containing the union of 'x' and 'y', preserving order and removing duplicates.
"""
return list(dict.fromkeys(list(x) + list(y)).keys())
def add_counters(x, y):
"""
Given two dicts x, y, returns a dict d s.t.
d[a] = d[x] + d[y]
"""
return {k: x.get(k, 0) + y.get(k, 0) for k in unique_ordered_union(x.keys(), y.keys())}
def sub_counters(x, y):
"""
Given two dicts x, y, returns a dict d s.t.
d[a] = d[x] - d[y]
"""
return {k: x.get(k, 0) - y.get(k, 0) for k in unique_ordered_union(x.keys(), y.keys())}
def indent(code, indentation):
"""
Indent code by 'indentation' spaces.
For example, indent('hello\nworld\n', 2) -> ' hello\n world\n'.
"""
if len(code) == 0:
return code
if isinstance(indentation, int):
indentation = ' ' * indentation
elif not isinstance(indentation, str):
raise TypeError(f'Supports only int or str, got {type(indentation).__name__}')
# Replace every occurrence of \n, with \n followed by indentation,
# unless the \n is the last characther of the string or is followed by another \n.
# We enforce the "not followed by ..." condition using negative lookahead (?!\n|$),
# looking for end of string ($) or another \n.
return indentation + re.sub(r'\n(?!\n|$)', '\n' + indentation, code)
def get_random_instance() -> random.Random:
"""
Returns the Random instance in the random module level.
"""
return random._inst # type: ignore[attr-defined]
def initialize_random(
random_object: Optional[random.Random] = None, seed: Optional[int] = None) -> random.Random:
"""
Returns a Random object initialized according to the given parameters.
If both are None, the Random instance instantiated in the random module is returned.
"""
if random_object is not None:
return random_object
return random.Random(seed) if seed is not None else get_random_instance()
def get_random_bytes(random_object: Optional[random.Random] = None, *, n: int):
"""
Returns a random bytes object of length n.
NOTE: This function is unsafe and should only be used for testing.
"""
r = initialize_random(random_object=random_object)
return bytes(r.getrandbits(8) for _ in range(n))
def compare_files(src, dst, fix):
"""
If 'fix' is False, checks that the files are the same.
If 'fix' is True, overrides dst with src.
"""
subprocess.check_call(['cp' if fix else 'diff', src, dst])
def remove_trailing_spaces(code):
"""
Removes spaces from end of lines.
For example, remove_trailing_spaces('hello \nworld \n') -> 'hello\nworld\n'.
"""
return re.sub(' +$', '', code, flags=re.MULTILINE)
def should_discard_key(key, exclude: List[str]) -> bool:
return any(field_to_discard in key for field_to_discard in exclude)
def discard_key(d: dict, key, to_replace_by: Optional[str]):
if to_replace_by is None:
del d[key]
else:
d[key] = to_replace_by
class WriteOnceDict(UserDict):
"""
Write once dictionary.
A Dict that enforces that each key is set only once.
Trying to set an existing key to its current value also raises an AssertionError.
"""
def __setitem__(self, key, value):
assert key not in self.data, \
f"Trying to set key={key} to '{value}' but key={key} is already set to '{self[key]}'."
self.data[key] = value
def camel_to_snake_case(camel_case_name: str) -> str:
"""
Converts a name with Capital first letters to lower case with '_' as separators.
For example, CamelToSnakeCase -> camel_to_snake_case.
"""
return (camel_case_name[0] + re.sub(r'([A-Z])', r'_\1', camel_case_name[1:])).lower()
def snake_to_camel_case(snake_case_name: str) -> str:
"""
Converts the first letter to upper case (if possible) and all the '_l' to 'L'.
For example snake_to_camel_case -> SnakeToCamelCase.
"""
return re.subn(r'(^|_)([a-z])', lambda m: m.group(2).upper(), snake_case_name)[0]
async def cancel_futures(*futures: asyncio.Future):
"""
Cancels given futures and awaits on them in order to reveal exceptions.
Used in a process' teardown.
"""
for future in futures:
future.cancel()
for future in futures:
try:
await future
except asyncio.CancelledError:
pass
def safe_zip(*iterables: Iterable[Any]) -> Iterable:
"""
Zips iterables. Makes sure the lengths of all iterables are equal.
"""
sentinel = object()
for combo in itertools.zip_longest(*iterables, fillvalue=sentinel):
assert sentinel not in combo, 'Iterables to safe_zip are not equal in length.'
yield combo
def composite(*funcs):
"""
Returns the composition of all the given functions, which is a function that runs the last
function with the input args, and then runs the function before that with the return value of
the last function and so on. Finally, the composition function will return the return value of
the first function.
Every function beside the last function should receive one argument.
For example:
f = composite(lambda x: x * 5, lambda x, y: x + y)
assert f(2, 3) == (2 + 3) * 5
"""
assert len(funcs) > 0
def composition_function(*args, **kwargs):
return_value: Any = funcs[-1](*args, **kwargs)
for func in reversed(funcs[:-1]):
return_value = func(return_value)
return return_value
return composition_function
def to_bytes(value: int, length: Optional[int] = None, byte_order: Optional[str] = None) -> bytes:
"""
Converts the given integer to a bytes object of given length and byte order.
The default values are 32B width (which is the hash result width) and 'big', respectively.
"""
if length is None:
length = HASH_BYTES
if byte_order is None:
byte_order = 'big'
return int.to_bytes(value, length=length, byteorder=byte_order)
def from_bytes(value: bytes, byte_order: Optional[str] = None) -> int:
"""
Converts the given bytes object (parsed according to the given byte order) to an integer.
Default byte order is 'big'.
"""
if byte_order is None:
byte_order = 'big'
return int.from_bytes(value, byteorder=byte_order)
def blockify(data, chunk_size: int):
"""
Returns the given data partitioned to chunks of chunks_size (last chunk might be smaller).
"""
return (data[i:i + chunk_size] for i in range(0, len(data), chunk_size)) | src/starkware/python/utils.py | import asyncio
import itertools
import os
import random
import re
import subprocess
from collections import UserDict
from typing import Any, Iterable, List, Optional
HASH_BYTES = 32
def get_package_path():
"""
Returns ROOT_PATH s.t. $ROOT_PATH/starkware is the package folder.
"""
import starkware.python
return os.path.abspath(os.path.join(os.path.dirname(starkware.python.__file__), '../../'))
def get_build_dir_path(rel_path=''):
"""
Returns a path to a file inside the build directory (or the docker).
rel_path is the relative path of the file with respect to the build directory.
"""
build_root = os.environ['BUILD_ROOT']
return os.path.join(build_root, rel_path)
def get_source_dir_path(rel_path=''):
"""
Returns a path to a file inside the source directory. Does not work in docker.
rel_path is the relative path of the file with respect to the source directory.
"""
source_root = os.path.join(os.environ['BUILD_ROOT'], '../../')
assert os.path.exists(os.path.join(source_root, 'src'))
return os.path.join(source_root, rel_path)
def assert_same_and_get(*args):
"""
Verifies that all the arguments are the same, and returns this value.
For example, assert_same_and_get(5, 5, 5) will return 5, and assert_same_and_get(0, 1) will
raise an AssertionError.
"""
assert len(set(args)) == 1, 'Values are not the same (%s)' % (args,)
return args[0]
def unique(x):
"""
Removes duplicates while preserving order.
"""
return list(dict.fromkeys(x).keys())
def unique_ordered_union(x, y):
"""
Returns a list containing the union of 'x' and 'y', preserving order and removing duplicates.
"""
return list(dict.fromkeys(list(x) + list(y)).keys())
def add_counters(x, y):
"""
Given two dicts x, y, returns a dict d s.t.
d[a] = d[x] + d[y]
"""
return {k: x.get(k, 0) + y.get(k, 0) for k in unique_ordered_union(x.keys(), y.keys())}
def sub_counters(x, y):
"""
Given two dicts x, y, returns a dict d s.t.
d[a] = d[x] - d[y]
"""
return {k: x.get(k, 0) - y.get(k, 0) for k in unique_ordered_union(x.keys(), y.keys())}
def indent(code, indentation):
"""
Indent code by 'indentation' spaces.
For example, indent('hello\nworld\n', 2) -> ' hello\n world\n'.
"""
if len(code) == 0:
return code
if isinstance(indentation, int):
indentation = ' ' * indentation
elif not isinstance(indentation, str):
raise TypeError(f'Supports only int or str, got {type(indentation).__name__}')
# Replace every occurrence of \n, with \n followed by indentation,
# unless the \n is the last characther of the string or is followed by another \n.
# We enforce the "not followed by ..." condition using negative lookahead (?!\n|$),
# looking for end of string ($) or another \n.
return indentation + re.sub(r'\n(?!\n|$)', '\n' + indentation, code)
def get_random_instance() -> random.Random:
"""
Returns the Random instance in the random module level.
"""
return random._inst # type: ignore[attr-defined]
def initialize_random(
random_object: Optional[random.Random] = None, seed: Optional[int] = None) -> random.Random:
"""
Returns a Random object initialized according to the given parameters.
If both are None, the Random instance instantiated in the random module is returned.
"""
if random_object is not None:
return random_object
return random.Random(seed) if seed is not None else get_random_instance()
def get_random_bytes(random_object: Optional[random.Random] = None, *, n: int):
"""
Returns a random bytes object of length n.
NOTE: This function is unsafe and should only be used for testing.
"""
r = initialize_random(random_object=random_object)
return bytes(r.getrandbits(8) for _ in range(n))
def compare_files(src, dst, fix):
"""
If 'fix' is False, checks that the files are the same.
If 'fix' is True, overrides dst with src.
"""
subprocess.check_call(['cp' if fix else 'diff', src, dst])
def remove_trailing_spaces(code):
"""
Removes spaces from end of lines.
For example, remove_trailing_spaces('hello \nworld \n') -> 'hello\nworld\n'.
"""
return re.sub(' +$', '', code, flags=re.MULTILINE)
def should_discard_key(key, exclude: List[str]) -> bool:
return any(field_to_discard in key for field_to_discard in exclude)
def discard_key(d: dict, key, to_replace_by: Optional[str]):
if to_replace_by is None:
del d[key]
else:
d[key] = to_replace_by
class WriteOnceDict(UserDict):
"""
Write once dictionary.
A Dict that enforces that each key is set only once.
Trying to set an existing key to its current value also raises an AssertionError.
"""
def __setitem__(self, key, value):
assert key not in self.data, \
f"Trying to set key={key} to '{value}' but key={key} is already set to '{self[key]}'."
self.data[key] = value
def camel_to_snake_case(camel_case_name: str) -> str:
"""
Converts a name with Capital first letters to lower case with '_' as separators.
For example, CamelToSnakeCase -> camel_to_snake_case.
"""
return (camel_case_name[0] + re.sub(r'([A-Z])', r'_\1', camel_case_name[1:])).lower()
def snake_to_camel_case(snake_case_name: str) -> str:
"""
Converts the first letter to upper case (if possible) and all the '_l' to 'L'.
For example snake_to_camel_case -> SnakeToCamelCase.
"""
return re.subn(r'(^|_)([a-z])', lambda m: m.group(2).upper(), snake_case_name)[0]
async def cancel_futures(*futures: asyncio.Future):
"""
Cancels given futures and awaits on them in order to reveal exceptions.
Used in a process' teardown.
"""
for future in futures:
future.cancel()
for future in futures:
try:
await future
except asyncio.CancelledError:
pass
def safe_zip(*iterables: Iterable[Any]) -> Iterable:
"""
Zips iterables. Makes sure the lengths of all iterables are equal.
"""
sentinel = object()
for combo in itertools.zip_longest(*iterables, fillvalue=sentinel):
assert sentinel not in combo, 'Iterables to safe_zip are not equal in length.'
yield combo
def composite(*funcs):
"""
Returns the composition of all the given functions, which is a function that runs the last
function with the input args, and then runs the function before that with the return value of
the last function and so on. Finally, the composition function will return the return value of
the first function.
Every function beside the last function should receive one argument.
For example:
f = composite(lambda x: x * 5, lambda x, y: x + y)
assert f(2, 3) == (2 + 3) * 5
"""
assert len(funcs) > 0
def composition_function(*args, **kwargs):
return_value: Any = funcs[-1](*args, **kwargs)
for func in reversed(funcs[:-1]):
return_value = func(return_value)
return return_value
return composition_function
def to_bytes(value: int, length: Optional[int] = None, byte_order: Optional[str] = None) -> bytes:
"""
Converts the given integer to a bytes object of given length and byte order.
The default values are 32B width (which is the hash result width) and 'big', respectively.
"""
if length is None:
length = HASH_BYTES
if byte_order is None:
byte_order = 'big'
return int.to_bytes(value, length=length, byteorder=byte_order)
def from_bytes(value: bytes, byte_order: Optional[str] = None) -> int:
"""
Converts the given bytes object (parsed according to the given byte order) to an integer.
Default byte order is 'big'.
"""
if byte_order is None:
byte_order = 'big'
return int.from_bytes(value, byteorder=byte_order)
def blockify(data, chunk_size: int):
"""
Returns the given data partitioned to chunks of chunks_size (last chunk might be smaller).
"""
return (data[i:i + chunk_size] for i in range(0, len(data), chunk_size)) | 0.786869 | 0.366363 |
import csv
import h5py
import logging
import logging.config
import numpy as np
import os
from os import path
import pandas as pd
import pathlib
import re
import xlsxwriter
class CreateCsv:
"""Class combines csv linking IDs to csv containing feature info.
Merge linked track and object IDs to corresponding feature data to
produce a csv output file that can be visualized in FlowJo.
"""
def __init__(self, ims_filename, dir_name, meta_dir_name, logger=None):
"""Open .ims file for reading; h5py.File acts like a dictionary.
Args:
ims_filename (:obj:`str`): Name of the selected Imaris file
dir_name (:obj:`str`): Output csv collection
meta_dir_name (:obj:`str`): Output metadata directory
"""
#: Set up the logger
logging.basicConfig(
format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',
datefmt='%b-%d-%y %H:%M:%S')
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.ims_filename = ims_filename
self.dir_name = dir_name
self.meta_dir_name = meta_dir_name
self.f = h5py.File(self.ims_filename, 'r')
def round_to_six(self, num):
"""Round values to six significant figures
Args:
num (:obj:`int`): Num to be rounded to 6 significant figures
Returns:
A number rounded to six significant figures
"""
if num != 0:
if np.isnan(num) != True:
num = np.round(num, -int(np.floor(np.log10(abs(num)))) + 5)
elif num == 0:
pass
return num
def get_df_from_csv(self, dirname, chan, chan_name, csv_substring):
"""Read intermediate csv files containing feature data
(``extract_ims_data.py`` output) or ID data (``link_ims_ids.py``
output) for each channel, and store in dataframes. ``chan``
represents attribute names within the ``Scene8/Content`` keys
of the .ims files with names ``MegaSurfaces0`` or ``Points0``.
Each attribute contains data belonging to particular channels.
The argument chan_name differs from chan because while chan
might have a more general name such as ``Points0``, chan_name is
extracted from the Imaris file's metadata, converted from byte to
string, and stored as **chan_name**. csv_substring is the
substring of the csv file that gets read in; can be `trackdf_`/
`objectdf_` for outputs of the first module
(``extract_ims_data.py``), or empty string for outputs of the
second module, ``link_ims_ids.py`` (links `ID_Object` and
`TrackID`).
Args:
dirname (:obj:`str`): Output csv collection provided by user
chan (:obj:`str`): attribute name in ``Scene8/Content``
chan_name (:obj:`str`): Channel name entered in Imaris
csv_substring (:obj:`str`): Temp .csv prefix (ex: trackdf_)
Returns:
Pandas dataframe created from intermediate csv files.
"""
keepsame = {'ID_Time'}
#: Read csv
temp_string = csv_substring + chan + ".csv"
temp_path = dirname/temp_string
df = pd.read_csv(temp_path)
#: Remove "Unnamed" columns:
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
#: suffix chan name to all headers except 'ID_Object, 'ID_Time'
if len(df.columns) > 0:
#: Suffix col names except some columns (keep same=ID_Time)
df.columns = ['{}{}'.format(
c, '' if c in keepsame else chan_name) for c in df.columns]
df.columns = df.columns.str.replace('__', '_')
#: Remove intermediate csv file
self.logger.debug("Remove line file_to_remove.unlink() to debug.")
self.logger.debug(
"CSV files can indicate where in the process an issue begins.")
file_to_remove = temp_path
file_to_remove.unlink()
return df
def get_overall(self, overall_df, chan_name):
"""Extract overall data from object data
This reads the output from the ``get_df_from_csv()`` function
and extracts overall data from the df containing non-track data.
Note that in the second module, ``extract_ims_data.py``,
data tagged `Overall` was assigned an `ID_Object` of -1
Args:
overall_df: Df obtained `ID_Object` of object df < 0.
chan_name (:obj:`str`): Channel name entered in Imaris
Returns:
DF containing overall data; formatted like Imaris version
"""
overall_df.dropna(axis=1, how='all', inplace=True)
#: All ID_Objects == -1 belong to Overall. Replace with np.NaN
overall_df['ID_Object' + chan_name] = \
overall_df['ID_Object'+chan_name].replace(
-1.0, np.NaN, inplace=True)
#: Replace time = -1.0 with np.NaN
overall_df['ID_Time'].replace(-1.0, np.NaN, inplace=True)
overall_df.reset_index()
#: Rearrange df to match exact format exported by Imaris file
overall_df = pd.melt(
overall_df, id_vars=['ID_Time', 'ID_Object' + chan_name],
var_name='Variable', value_name='Value')
overall_df = overall_df[
['Variable','Value','ID_Time','ID_Object'+chan_name]]
overall_df.rename(
{'ID_Time': 'Time', 'ID_Object' + chan_name: 'ID'},
axis='columns', inplace=True)
overall_df.dropna(subset=['Value'], inplace=True)
overall_df['Variable'] = overall_df['Variable'].str.replace('_', ' ')
overall_df=overall_df.dropna(axis=1,how='all')
return overall_df
def create_overall_xlsx(self,imaris_filename,meta_dirname,all_overall_dict):
"""Create overall xlsx. Each sheet represents one channel.
This function merges all Overall DFs together and write each
channel to an xlsx notebook that uses sheets to represent
individual channels
Args:
imaris_filename (:obj:`str`): Filename of Imaris file
meta_dirname (:obj:`str`): Output metadata directory
all_overall_dict: Dict key=Imaris channel, value=overall df
"""
#: Get basename from imaris filename, to prepend to Overall.xlsx
imaris_basename = imaris_filename.stem
#: Remove .ims extension
imaris_basename = imaris_basename[:-4]
#: Create a Pandas Excel writer using XlsxWriter as the engine
temp_string = imaris_basename + "_" + 'Overall.xlsx'
temp_path = meta_dirname/temp_string
writer = pd.ExcelWriter(temp_path, engine='xlsxwriter')
count = 1
for chan_name, overall_df_list in all_overall_dict.items():
for i in range(0, len(overall_df_list)):
str_i = "_"
if i >= 1:
str_i = "_" + str(i) + "_"
str_channel_name = re.sub('[^A-Za-z0-9]+', '_', chan_name)
#: Convert the dataframe to an XlsxWriter Excel object
str_channel_name_shortened = ""
if len(str_channel_name) > 25:
str_channel_name_shortened = str_channel_name[:25]
else:
str_channel_name_shortened = str_channel_name
#: Round Overall "Values" column to 6 significant digits
self.logger.debug("Converting data to 6 significant figures...")
overall_df_list[i]['Value'] = overall_df_list[i]\
['Value'].apply(self.round_to_six)
overall_df_list[i].to_excel(
writer,
sheet_name=str_channel_name_shortened + str_i + str(count),
index=False, startrow=2, startcol=0)
#: Get the xlsxwriter workbook and worksheet objects
worksheet = writer.sheets[
str_channel_name_shortened + str_i + str(count)]
#: Add original, unmodified channel name to first row
worksheet.write(0, 0, chan_name)
#: Set the column width and format.
worksheet.set_column(0, 0, 50) #: 1st, last col, width
#: Close the Pandas Excel writer and output the Excel file.
count = count + 1
writer.save()
def create_final_output(self, imaris_filename, non_overall_dfs, dirname):
"""Stores non-overall data in dataframes
Store remaining non-overall data with `TrackID` (if applicable),
`ID_Object` (if applicable), and feature data in a Pandas
dataframe.
Args:
imaris_filename (:obj:`str`): Filename of Imaris file
non_overall_dfs: dict key=channel name, value=non-overall df
dirname (:obj:`str`): Output csv collection provided by user
"""
#: Get basename from imaris filename, to prepend to channel.csv
imaris_basename = imaris_filename.stem
#: Remove .ims extension
imaris_basename = imaris_basename[:-4]
for chan_name, non_ov in non_overall_dfs.items():
#: Replace special characters from channel name (key) with _
chan_mod = re.sub('[^0-9a-zA-Z]+', '_', chan_name)
for i in range(0, len(non_ov)):
str_i = ""
if i == 1:
str_i = "_copy"
if i > 1:
str_i = "_copy " + str(i)
#: Remove _ from the front of file (due to some plugins)
for col in non_ov[i].columns:
if col[:1] == "_":
col_mod = col[1:]
non_ov[i].rename(columns={col:col_mod}, inplace=True)
#: Sort header names alphabetically
header_names = non_ov[i].columns
header_names = header_names.sort_values()
non_ov[i] = non_ov[i][header_names]
for c in non_ov[i].columns:
#: Round all but ID, TrackID, Time to 6 sigfigs
if c != "TrackID_"+chan_mod and c != "ID_Object_"+chan_mod:
if c!="ID_Time" and "TrackID" not in c:
non_ov[i][c]=non_ov[i][c].apply(self.round_to_six)
non_ov[i].columns = non_ov[i].columns.str.replace("___", "_")
non_ov[i].columns = non_ov[i].columns.str.replace("__", "_")
non_ov[i].columns = non_ov[i].columns.str.replace(
"ID_Time", "Time")
non_ov[i].columns = non_ov[i].columns.str.replace(
"ID_Object", "ID")
#: Display np.NaN values as as 'NaN' so FlowJo can view
temp_string = imaris_basename + "_" + chan_name + str_i + ".csv"
temp_path = dirname/temp_string
non_ov[i].to_csv(temp_path, index=False, na_rep='NaN')
def create_csv_fun(self):
"""Main function; combines intermediate files to produce output.
This function combines all intermediate files
(``extract_ims_data.py`` and ``link_ims_ids.py`` outputs)
to produce csv files that link IDs to features for each channel
and an xlsx file containing overall summary statistics.
It takes in as inputs the csv files created from
``link_ims_ids.py`` and ``extract_ims_data.py``. It outputs an
``Overall.xlsx`` file containing summary data for each channel.
The remaining feature data is exported within individual csv
files for each channel. For example: ``Red.csv``, ``Green.csv``,
and ``ColocSurfaces.csv``
"""
#: Open the file for reading; h5py.File acts like a dictionary
self.logger.debug(
"Opening .ims file {}...".format(str(self.ims_filename)))
self.f = h5py.File(self.ims_filename, 'r')
#: Determine # of groups (channel_names) in 'Scene8/Content'
logging.debug("Counting channel names in Scene8/Content...")
channel_names = list(self.f['Scene8']['Content'].keys())
# Ignore irrelevant channel types
channel_names = [
chan for chan in channel_names if chan.startswith(
"Points") or chan.startswith("MegaSurfaces")]
#: Combine objectdf, trackdf, track_id_object_df csv into 1 df
all_overall_dfs = {}
non_overall_dfs = {}
for i in range(0,len(channel_names)):
#: Loop through each attribute in Scene8/Content/
self.logger.debug(
"\n\nITERATION {}/{} OF FILE {}".format(
i+1, len(channel_names), self.ims_filename))
current_channel = channel_names[i]
self.logger.debug("Reading {}...".format(current_channel))
#: Read 'Name' attribute of each channel to get channel name
chan_name=self.f['Scene8']['Content'][current_channel].attrs['Name']
chan_name = chan_name.tostring(order='C')
#: Convert channel name from class byte to string
chan_name = str(chan_name, "utf-8")
excel_channel = chan_name
#: Remove special characters from channel name using regex
regex = re.compile('[^a-zA-Z0-9]+')
#: Replaces special characters with _
chan_name = regex.sub('_', chan_name)
chan_name = "_" + chan_name
#: Skip empty channels
if chan_name == "__":
pass
#: Read the required input files
else:
temp_string1 = "trackdf_" + current_channel + ".csv"
path1 = self.dir_name / temp_string1
temp_string2 = "objectdf_" + current_channel + ".csv"
path2 = self.dir_name / temp_string2
if path.exists(path1)==True and path.exists(path2)==True:
#: Load Track Data
track_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "trackdf_")
#: Load Object Data
object_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "objectdf_")
#: Load Track ID: Object ID data
track_id_object_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "")
has_track = True
has_object = True
has_track_id_object = True
#: Determine if track_df or object_df is empty.
if track_df.empty == True:
#: If so, set has_object or has_track to False.
has_track = False
if object_df.empty == True:
has_object = False
if track_id_object_df.empty == True:
track_id_object_df = pd.DataFrame(
{'TrackID' + chan_name:np.NaN, 'ID_Object' + \
chan_name:np.NaN}, index=[0])
has_track_id_object == True
#: Isolate "Overall" data
if (has_track_id_object == True and has_object == True) or \
(has_track_id_object == True and has_object == False):
#: Add 1 to all time chans (sets t=0 to t=1)
object_df['ID_Time'] = object_df['ID_Time'] + 1
#: Where Object ID < 0, save as "Overall"
overall_df = object_df.loc[object_df[
'ID_Object' + chan_name] < 0].copy()
#: Where Object ID > -1, save as "Object"
object_df = object_df.loc[
object_df['ID_Object' + chan_name] >= 0]
#: Flag empty dfs after moving object to overall
if object_df.empty == True:
has_object = False
overall_df = self.get_overall(overall_df, chan_name)
#: Make dict key=.ims channel, val=overall df
if excel_channel in all_overall_dfs:
all_overall_dfs[excel_channel].append(overall_df)
else:
all_overall_dfs[excel_channel] = []
all_overall_dfs[excel_channel].append(overall_df)
#: Merge dict of IDs and tracks/objects together
if has_object == True:
#: Wherever Object ID >= 1, save as object data
object_df=object_df[object_df['ID_Object'+chan_name]>=0]
object_df.dropna(axis=1, how='all', inplace=True)
#: Combine ID dictionary, Track, and/or Object data
if has_object == True and has_track == False:
track_id_object_df = pd.merge(
track_id_object_df, object_df,
how='outer', on='ID_Object' + chan_name)
track_id_object_df.dropna(
axis=0, how='all', inplace=True)
track_id_object_df.dropna(
axis=1, how='all', inplace=True)
#: Resolve overwrite for files sharing chan name
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
track_id_object_df)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
track_id_object_df)
elif has_object == False and has_track == True:
track_id_object_df = pd.merge(
track_id_object_df, track_df, how='outer',
on='TrackID' + chan_name)
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
track_id_object_df)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
track_id_object_df)
#: Fix issue overwrite for files sharing chan name
elif has_object == True and has_track == True:
#: First merge ID dictionary to objects
merged_object = pd.merge(
object_df, track_id_object_df, how='outer',
on='ID_Object' + chan_name)
#: Second merge above df to tracks
features_merged = pd.merge(
merged_object, track_df, how='outer',
on='TrackID' + chan_name)
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
features_merged)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
features_merged)
if all_overall_dfs:
#: Export overall data as xlsx file
self.create_overall_xlsx(
self.ims_filename, self.meta_dir_name, all_overall_dfs)
#: Create final output
self.logger.info("Creating final output (stage 3/3)...")
self.create_final_output(
self.ims_filename, non_overall_dfs, self.dir_name)
self.logger.info("{} complete!".format(str(self.ims_filename))) | formats/polus-imaris-parser-plugin/src/merge_ids_to_features.py | import csv
import h5py
import logging
import logging.config
import numpy as np
import os
from os import path
import pandas as pd
import pathlib
import re
import xlsxwriter
class CreateCsv:
"""Class combines csv linking IDs to csv containing feature info.
Merge linked track and object IDs to corresponding feature data to
produce a csv output file that can be visualized in FlowJo.
"""
def __init__(self, ims_filename, dir_name, meta_dir_name, logger=None):
"""Open .ims file for reading; h5py.File acts like a dictionary.
Args:
ims_filename (:obj:`str`): Name of the selected Imaris file
dir_name (:obj:`str`): Output csv collection
meta_dir_name (:obj:`str`): Output metadata directory
"""
#: Set up the logger
logging.basicConfig(
format='%(asctime)s-%(name)s-%(levelname)s-%(message)s',
datefmt='%b-%d-%y %H:%M:%S')
self.logger = logging.getLogger(__name__)
self.logger.setLevel(logging.INFO)
self.ims_filename = ims_filename
self.dir_name = dir_name
self.meta_dir_name = meta_dir_name
self.f = h5py.File(self.ims_filename, 'r')
def round_to_six(self, num):
"""Round values to six significant figures
Args:
num (:obj:`int`): Num to be rounded to 6 significant figures
Returns:
A number rounded to six significant figures
"""
if num != 0:
if np.isnan(num) != True:
num = np.round(num, -int(np.floor(np.log10(abs(num)))) + 5)
elif num == 0:
pass
return num
def get_df_from_csv(self, dirname, chan, chan_name, csv_substring):
"""Read intermediate csv files containing feature data
(``extract_ims_data.py`` output) or ID data (``link_ims_ids.py``
output) for each channel, and store in dataframes. ``chan``
represents attribute names within the ``Scene8/Content`` keys
of the .ims files with names ``MegaSurfaces0`` or ``Points0``.
Each attribute contains data belonging to particular channels.
The argument chan_name differs from chan because while chan
might have a more general name such as ``Points0``, chan_name is
extracted from the Imaris file's metadata, converted from byte to
string, and stored as **chan_name**. csv_substring is the
substring of the csv file that gets read in; can be `trackdf_`/
`objectdf_` for outputs of the first module
(``extract_ims_data.py``), or empty string for outputs of the
second module, ``link_ims_ids.py`` (links `ID_Object` and
`TrackID`).
Args:
dirname (:obj:`str`): Output csv collection provided by user
chan (:obj:`str`): attribute name in ``Scene8/Content``
chan_name (:obj:`str`): Channel name entered in Imaris
csv_substring (:obj:`str`): Temp .csv prefix (ex: trackdf_)
Returns:
Pandas dataframe created from intermediate csv files.
"""
keepsame = {'ID_Time'}
#: Read csv
temp_string = csv_substring + chan + ".csv"
temp_path = dirname/temp_string
df = pd.read_csv(temp_path)
#: Remove "Unnamed" columns:
df = df.loc[:, ~df.columns.str.contains('^Unnamed')]
#: suffix chan name to all headers except 'ID_Object, 'ID_Time'
if len(df.columns) > 0:
#: Suffix col names except some columns (keep same=ID_Time)
df.columns = ['{}{}'.format(
c, '' if c in keepsame else chan_name) for c in df.columns]
df.columns = df.columns.str.replace('__', '_')
#: Remove intermediate csv file
self.logger.debug("Remove line file_to_remove.unlink() to debug.")
self.logger.debug(
"CSV files can indicate where in the process an issue begins.")
file_to_remove = temp_path
file_to_remove.unlink()
return df
def get_overall(self, overall_df, chan_name):
"""Extract overall data from object data
This reads the output from the ``get_df_from_csv()`` function
and extracts overall data from the df containing non-track data.
Note that in the second module, ``extract_ims_data.py``,
data tagged `Overall` was assigned an `ID_Object` of -1
Args:
overall_df: Df obtained `ID_Object` of object df < 0.
chan_name (:obj:`str`): Channel name entered in Imaris
Returns:
DF containing overall data; formatted like Imaris version
"""
overall_df.dropna(axis=1, how='all', inplace=True)
#: All ID_Objects == -1 belong to Overall. Replace with np.NaN
overall_df['ID_Object' + chan_name] = \
overall_df['ID_Object'+chan_name].replace(
-1.0, np.NaN, inplace=True)
#: Replace time = -1.0 with np.NaN
overall_df['ID_Time'].replace(-1.0, np.NaN, inplace=True)
overall_df.reset_index()
#: Rearrange df to match exact format exported by Imaris file
overall_df = pd.melt(
overall_df, id_vars=['ID_Time', 'ID_Object' + chan_name],
var_name='Variable', value_name='Value')
overall_df = overall_df[
['Variable','Value','ID_Time','ID_Object'+chan_name]]
overall_df.rename(
{'ID_Time': 'Time', 'ID_Object' + chan_name: 'ID'},
axis='columns', inplace=True)
overall_df.dropna(subset=['Value'], inplace=True)
overall_df['Variable'] = overall_df['Variable'].str.replace('_', ' ')
overall_df=overall_df.dropna(axis=1,how='all')
return overall_df
def create_overall_xlsx(self,imaris_filename,meta_dirname,all_overall_dict):
"""Create overall xlsx. Each sheet represents one channel.
This function merges all Overall DFs together and write each
channel to an xlsx notebook that uses sheets to represent
individual channels
Args:
imaris_filename (:obj:`str`): Filename of Imaris file
meta_dirname (:obj:`str`): Output metadata directory
all_overall_dict: Dict key=Imaris channel, value=overall df
"""
#: Get basename from imaris filename, to prepend to Overall.xlsx
imaris_basename = imaris_filename.stem
#: Remove .ims extension
imaris_basename = imaris_basename[:-4]
#: Create a Pandas Excel writer using XlsxWriter as the engine
temp_string = imaris_basename + "_" + 'Overall.xlsx'
temp_path = meta_dirname/temp_string
writer = pd.ExcelWriter(temp_path, engine='xlsxwriter')
count = 1
for chan_name, overall_df_list in all_overall_dict.items():
for i in range(0, len(overall_df_list)):
str_i = "_"
if i >= 1:
str_i = "_" + str(i) + "_"
str_channel_name = re.sub('[^A-Za-z0-9]+', '_', chan_name)
#: Convert the dataframe to an XlsxWriter Excel object
str_channel_name_shortened = ""
if len(str_channel_name) > 25:
str_channel_name_shortened = str_channel_name[:25]
else:
str_channel_name_shortened = str_channel_name
#: Round Overall "Values" column to 6 significant digits
self.logger.debug("Converting data to 6 significant figures...")
overall_df_list[i]['Value'] = overall_df_list[i]\
['Value'].apply(self.round_to_six)
overall_df_list[i].to_excel(
writer,
sheet_name=str_channel_name_shortened + str_i + str(count),
index=False, startrow=2, startcol=0)
#: Get the xlsxwriter workbook and worksheet objects
worksheet = writer.sheets[
str_channel_name_shortened + str_i + str(count)]
#: Add original, unmodified channel name to first row
worksheet.write(0, 0, chan_name)
#: Set the column width and format.
worksheet.set_column(0, 0, 50) #: 1st, last col, width
#: Close the Pandas Excel writer and output the Excel file.
count = count + 1
writer.save()
def create_final_output(self, imaris_filename, non_overall_dfs, dirname):
"""Stores non-overall data in dataframes
Store remaining non-overall data with `TrackID` (if applicable),
`ID_Object` (if applicable), and feature data in a Pandas
dataframe.
Args:
imaris_filename (:obj:`str`): Filename of Imaris file
non_overall_dfs: dict key=channel name, value=non-overall df
dirname (:obj:`str`): Output csv collection provided by user
"""
#: Get basename from imaris filename, to prepend to channel.csv
imaris_basename = imaris_filename.stem
#: Remove .ims extension
imaris_basename = imaris_basename[:-4]
for chan_name, non_ov in non_overall_dfs.items():
#: Replace special characters from channel name (key) with _
chan_mod = re.sub('[^0-9a-zA-Z]+', '_', chan_name)
for i in range(0, len(non_ov)):
str_i = ""
if i == 1:
str_i = "_copy"
if i > 1:
str_i = "_copy " + str(i)
#: Remove _ from the front of file (due to some plugins)
for col in non_ov[i].columns:
if col[:1] == "_":
col_mod = col[1:]
non_ov[i].rename(columns={col:col_mod}, inplace=True)
#: Sort header names alphabetically
header_names = non_ov[i].columns
header_names = header_names.sort_values()
non_ov[i] = non_ov[i][header_names]
for c in non_ov[i].columns:
#: Round all but ID, TrackID, Time to 6 sigfigs
if c != "TrackID_"+chan_mod and c != "ID_Object_"+chan_mod:
if c!="ID_Time" and "TrackID" not in c:
non_ov[i][c]=non_ov[i][c].apply(self.round_to_six)
non_ov[i].columns = non_ov[i].columns.str.replace("___", "_")
non_ov[i].columns = non_ov[i].columns.str.replace("__", "_")
non_ov[i].columns = non_ov[i].columns.str.replace(
"ID_Time", "Time")
non_ov[i].columns = non_ov[i].columns.str.replace(
"ID_Object", "ID")
#: Display np.NaN values as as 'NaN' so FlowJo can view
temp_string = imaris_basename + "_" + chan_name + str_i + ".csv"
temp_path = dirname/temp_string
non_ov[i].to_csv(temp_path, index=False, na_rep='NaN')
def create_csv_fun(self):
"""Main function; combines intermediate files to produce output.
This function combines all intermediate files
(``extract_ims_data.py`` and ``link_ims_ids.py`` outputs)
to produce csv files that link IDs to features for each channel
and an xlsx file containing overall summary statistics.
It takes in as inputs the csv files created from
``link_ims_ids.py`` and ``extract_ims_data.py``. It outputs an
``Overall.xlsx`` file containing summary data for each channel.
The remaining feature data is exported within individual csv
files for each channel. For example: ``Red.csv``, ``Green.csv``,
and ``ColocSurfaces.csv``
"""
#: Open the file for reading; h5py.File acts like a dictionary
self.logger.debug(
"Opening .ims file {}...".format(str(self.ims_filename)))
self.f = h5py.File(self.ims_filename, 'r')
#: Determine # of groups (channel_names) in 'Scene8/Content'
logging.debug("Counting channel names in Scene8/Content...")
channel_names = list(self.f['Scene8']['Content'].keys())
# Ignore irrelevant channel types
channel_names = [
chan for chan in channel_names if chan.startswith(
"Points") or chan.startswith("MegaSurfaces")]
#: Combine objectdf, trackdf, track_id_object_df csv into 1 df
all_overall_dfs = {}
non_overall_dfs = {}
for i in range(0,len(channel_names)):
#: Loop through each attribute in Scene8/Content/
self.logger.debug(
"\n\nITERATION {}/{} OF FILE {}".format(
i+1, len(channel_names), self.ims_filename))
current_channel = channel_names[i]
self.logger.debug("Reading {}...".format(current_channel))
#: Read 'Name' attribute of each channel to get channel name
chan_name=self.f['Scene8']['Content'][current_channel].attrs['Name']
chan_name = chan_name.tostring(order='C')
#: Convert channel name from class byte to string
chan_name = str(chan_name, "utf-8")
excel_channel = chan_name
#: Remove special characters from channel name using regex
regex = re.compile('[^a-zA-Z0-9]+')
#: Replaces special characters with _
chan_name = regex.sub('_', chan_name)
chan_name = "_" + chan_name
#: Skip empty channels
if chan_name == "__":
pass
#: Read the required input files
else:
temp_string1 = "trackdf_" + current_channel + ".csv"
path1 = self.dir_name / temp_string1
temp_string2 = "objectdf_" + current_channel + ".csv"
path2 = self.dir_name / temp_string2
if path.exists(path1)==True and path.exists(path2)==True:
#: Load Track Data
track_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "trackdf_")
#: Load Object Data
object_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "objectdf_")
#: Load Track ID: Object ID data
track_id_object_df = self.get_df_from_csv(
self.dir_name, current_channel, chan_name, "")
has_track = True
has_object = True
has_track_id_object = True
#: Determine if track_df or object_df is empty.
if track_df.empty == True:
#: If so, set has_object or has_track to False.
has_track = False
if object_df.empty == True:
has_object = False
if track_id_object_df.empty == True:
track_id_object_df = pd.DataFrame(
{'TrackID' + chan_name:np.NaN, 'ID_Object' + \
chan_name:np.NaN}, index=[0])
has_track_id_object == True
#: Isolate "Overall" data
if (has_track_id_object == True and has_object == True) or \
(has_track_id_object == True and has_object == False):
#: Add 1 to all time chans (sets t=0 to t=1)
object_df['ID_Time'] = object_df['ID_Time'] + 1
#: Where Object ID < 0, save as "Overall"
overall_df = object_df.loc[object_df[
'ID_Object' + chan_name] < 0].copy()
#: Where Object ID > -1, save as "Object"
object_df = object_df.loc[
object_df['ID_Object' + chan_name] >= 0]
#: Flag empty dfs after moving object to overall
if object_df.empty == True:
has_object = False
overall_df = self.get_overall(overall_df, chan_name)
#: Make dict key=.ims channel, val=overall df
if excel_channel in all_overall_dfs:
all_overall_dfs[excel_channel].append(overall_df)
else:
all_overall_dfs[excel_channel] = []
all_overall_dfs[excel_channel].append(overall_df)
#: Merge dict of IDs and tracks/objects together
if has_object == True:
#: Wherever Object ID >= 1, save as object data
object_df=object_df[object_df['ID_Object'+chan_name]>=0]
object_df.dropna(axis=1, how='all', inplace=True)
#: Combine ID dictionary, Track, and/or Object data
if has_object == True and has_track == False:
track_id_object_df = pd.merge(
track_id_object_df, object_df,
how='outer', on='ID_Object' + chan_name)
track_id_object_df.dropna(
axis=0, how='all', inplace=True)
track_id_object_df.dropna(
axis=1, how='all', inplace=True)
#: Resolve overwrite for files sharing chan name
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
track_id_object_df)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
track_id_object_df)
elif has_object == False and has_track == True:
track_id_object_df = pd.merge(
track_id_object_df, track_df, how='outer',
on='TrackID' + chan_name)
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
track_id_object_df)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
track_id_object_df)
#: Fix issue overwrite for files sharing chan name
elif has_object == True and has_track == True:
#: First merge ID dictionary to objects
merged_object = pd.merge(
object_df, track_id_object_df, how='outer',
on='ID_Object' + chan_name)
#: Second merge above df to tracks
features_merged = pd.merge(
merged_object, track_df, how='outer',
on='TrackID' + chan_name)
if excel_channel in non_overall_dfs:
non_overall_dfs[excel_channel].append(
features_merged)
else:
non_overall_dfs[excel_channel] = []
non_overall_dfs[excel_channel].append(
features_merged)
if all_overall_dfs:
#: Export overall data as xlsx file
self.create_overall_xlsx(
self.ims_filename, self.meta_dir_name, all_overall_dfs)
#: Create final output
self.logger.info("Creating final output (stage 3/3)...")
self.create_final_output(
self.ims_filename, non_overall_dfs, self.dir_name)
self.logger.info("{} complete!".format(str(self.ims_filename))) | 0.692122 | 0.272285 |
import os
import sys
import unittest
from click.testing import CliRunner
from dirindex._cli import cli
from dirindex.version import VERSION
class Test1(unittest.TestCase):
def test1(self):
self.assertFalse(False, "False is False")
# self.assertTrue(False, "not Implemented")
def testUsage(self):
result = CliRunner().invoke(cli)
self.assertIn("Usage", result.output)
self.assertIn("make", result.output)
self.assertIn("read-resource", result.output)
def testVersion(self):
result = CliRunner().invoke(cli, args=["--version"])
self.assertIn("dirindex", result.output)
self.assertIn("version", result.output)
self.assertIn(VERSION, result.output)
def testmake1(self):
result = CliRunner().invoke(cli, args=["make", "--help"])
self.assertIn("Usage", result.output)
self.assertIn("make", result.output)
self.assertIn("template", result.output)
self.assertIn("hide", result.output)
self.assertIn("filename", result.output)
def testreadrsc1(self):
result = CliRunner().invoke(cli, args=["read-resource", "apache"])
self.assertIn("Index", result.output)
def testreadrsc2(self):
result = CliRunner().invoke(cli, args=["read-resource", ".gitignore"])
self.assertIn("lib", result.output)
self.assertIn("cover", result.output)
@unittest.skipIf(sys.platform.startswith("win"), "Skip Windows")
def testmake2(self):
runner = CliRunner()
with runner.isolated_filesystem():
with open("test.txt", "w") as f:
f.write("hello world\n")
os.mkdir("testdir")
with open("testdir/test2.txt", "w") as f:
f.write("hello world\n")
with open("testdir/ignore.txt", "w") as f:
f.write("hello world\n")
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", "."])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive"])
with open("testdir/lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test2.txt", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single", "--hide", "ignore.*"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
self.assertNotIn("ignore", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single", "--hide", "ignore.*", "--pattern", "*.txt", "--pattern", "testdir"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
self.assertNotIn("ignore", output) | tests/test_1.py | import os
import sys
import unittest
from click.testing import CliRunner
from dirindex._cli import cli
from dirindex.version import VERSION
class Test1(unittest.TestCase):
def test1(self):
self.assertFalse(False, "False is False")
# self.assertTrue(False, "not Implemented")
def testUsage(self):
result = CliRunner().invoke(cli)
self.assertIn("Usage", result.output)
self.assertIn("make", result.output)
self.assertIn("read-resource", result.output)
def testVersion(self):
result = CliRunner().invoke(cli, args=["--version"])
self.assertIn("dirindex", result.output)
self.assertIn("version", result.output)
self.assertIn(VERSION, result.output)
def testmake1(self):
result = CliRunner().invoke(cli, args=["make", "--help"])
self.assertIn("Usage", result.output)
self.assertIn("make", result.output)
self.assertIn("template", result.output)
self.assertIn("hide", result.output)
self.assertIn("filename", result.output)
def testreadrsc1(self):
result = CliRunner().invoke(cli, args=["read-resource", "apache"])
self.assertIn("Index", result.output)
def testreadrsc2(self):
result = CliRunner().invoke(cli, args=["read-resource", ".gitignore"])
self.assertIn("lib", result.output)
self.assertIn("cover", result.output)
@unittest.skipIf(sys.platform.startswith("win"), "Skip Windows")
def testmake2(self):
runner = CliRunner()
with runner.isolated_filesystem():
with open("test.txt", "w") as f:
f.write("hello world\n")
os.mkdir("testdir")
with open("testdir/test2.txt", "w") as f:
f.write("hello world\n")
with open("testdir/ignore.txt", "w") as f:
f.write("hello world\n")
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", "."])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive"])
with open("testdir/lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test2.txt", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single", "--hide", "ignore.*"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
self.assertNotIn("ignore", output)
runner.invoke(
cli, args=["make", "--template", "ls-l", "--filename", "lsl", ".", "--recursive", "--single", "--hide", "ignore.*", "--pattern", "*.txt", "--pattern", "testdir"])
with open("lsl") as f:
output = f.read()
self.assertIn("-rw-r--r--", output)
self.assertIn("test.txt", output)
self.assertIn("testdir", output)
self.assertIn("testdir/test2.txt", output)
self.assertNotIn("ignore", output) | 0.364438 | 0.529507 |
from django.db import models
from allianceauth.services.hooks import get_extension_logger
from esi.models import Token
from bravado.exception import HTTPUnauthorized, HTTPForbidden
from eveuniverse.models import EveSolarSystem
from . import __title__
from .utils import LoggerAddTag
from .helpers import esi_fetch
# Create your managers here.
logger = LoggerAddTag(get_extension_logger(__name__), __title__)
class LocationManager(models.Manager):
STATION_ID_START = 60000000
STATION_ID_END = 69999999
def get_or_create_from_esi(
self, token: Token, location_id: int, add_unknown: bool = True
) -> tuple:
"""gets or creates location object with data fetched from ESI"""
from .models import Location
try:
location = self.get(id=location_id)
created = False
except Location.DoesNotExist:
location, created = self.update_or_create_from_esi(
token=token, location_id=location_id, add_unknown=add_unknown
)
return location, created
def update_or_create_from_esi(
self, token: Token, location_id: int, add_unknown: bool = True
) -> tuple:
"""updates or creates location object with data fetched from ESI"""
from .models import Location
if location_id >= self.STATION_ID_START and location_id <= self.STATION_ID_END:
logger.info("Fetching station from ESI")
try:
station = esi_fetch(
"Universe.get_universe_stations_station_id",
args={"station_id": location_id},
)
eve_solar_system, _ = EveSolarSystem.objects.get_or_create_esi(
id=station["system_id"]
)
location, created = self.update_or_create(
id=location_id,
defaults={
"name": station["name"],
"eve_solar_system": eve_solar_system,
"category_id": Location.CATEGORY_STATION_ID,
},
)
except Exception as ex:
logger.exception("Failed to load station: {}".format(ex))
raise ex
else:
try:
structure = esi_fetch(
"Universe.get_universe_structures_structure_id",
args={"structure_id": location_id},
token=token,
)
eve_solar_system, _ = EveSolarSystem.objects.get_or_create_esi(
id=structure["solar_system_id"]
)
location, created = self.update_or_create(
id=location_id,
defaults={
"name": structure["name"],
"eve_solar_system": eve_solar_system,
"category_id": Location.CATEGORY_STRUCTURE_ID,
},
)
except (HTTPUnauthorized, HTTPForbidden) as ex:
logger.warning("No access to this structure: {}".format(ex))
if add_unknown:
location, created = self.get_or_create(
id=location_id,
defaults={
"name": "Unknown structure {}".format(location_id),
"category_id": Location.CATEGORY_STRUCTURE_ID,
},
)
else:
raise ex
except Exception as ex:
logger.exception("Failed to load structure: {}".format(ex))
raise ex
return location, created | buybacks/managers.py | from django.db import models
from allianceauth.services.hooks import get_extension_logger
from esi.models import Token
from bravado.exception import HTTPUnauthorized, HTTPForbidden
from eveuniverse.models import EveSolarSystem
from . import __title__
from .utils import LoggerAddTag
from .helpers import esi_fetch
# Create your managers here.
logger = LoggerAddTag(get_extension_logger(__name__), __title__)
class LocationManager(models.Manager):
STATION_ID_START = 60000000
STATION_ID_END = 69999999
def get_or_create_from_esi(
self, token: Token, location_id: int, add_unknown: bool = True
) -> tuple:
"""gets or creates location object with data fetched from ESI"""
from .models import Location
try:
location = self.get(id=location_id)
created = False
except Location.DoesNotExist:
location, created = self.update_or_create_from_esi(
token=token, location_id=location_id, add_unknown=add_unknown
)
return location, created
def update_or_create_from_esi(
self, token: Token, location_id: int, add_unknown: bool = True
) -> tuple:
"""updates or creates location object with data fetched from ESI"""
from .models import Location
if location_id >= self.STATION_ID_START and location_id <= self.STATION_ID_END:
logger.info("Fetching station from ESI")
try:
station = esi_fetch(
"Universe.get_universe_stations_station_id",
args={"station_id": location_id},
)
eve_solar_system, _ = EveSolarSystem.objects.get_or_create_esi(
id=station["system_id"]
)
location, created = self.update_or_create(
id=location_id,
defaults={
"name": station["name"],
"eve_solar_system": eve_solar_system,
"category_id": Location.CATEGORY_STATION_ID,
},
)
except Exception as ex:
logger.exception("Failed to load station: {}".format(ex))
raise ex
else:
try:
structure = esi_fetch(
"Universe.get_universe_structures_structure_id",
args={"structure_id": location_id},
token=token,
)
eve_solar_system, _ = EveSolarSystem.objects.get_or_create_esi(
id=structure["solar_system_id"]
)
location, created = self.update_or_create(
id=location_id,
defaults={
"name": structure["name"],
"eve_solar_system": eve_solar_system,
"category_id": Location.CATEGORY_STRUCTURE_ID,
},
)
except (HTTPUnauthorized, HTTPForbidden) as ex:
logger.warning("No access to this structure: {}".format(ex))
if add_unknown:
location, created = self.get_or_create(
id=location_id,
defaults={
"name": "Unknown structure {}".format(location_id),
"category_id": Location.CATEGORY_STRUCTURE_ID,
},
)
else:
raise ex
except Exception as ex:
logger.exception("Failed to load structure: {}".format(ex))
raise ex
return location, created | 0.387574 | 0.112844 |
import pandas as pd
import pytest
import numpy as np
from designs import conduction_1d
from designs.conduction_1d import Config
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_INSULATED
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[
conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_INSULATED,
conduction_1d.BOUNDARY_CONSTANT
])
def boundary_conditions_start(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_INSULATED
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[
conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_INSULATED,
conduction_1d.BOUNDARY_CONSTANT
])
def boundary_conditions_end(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_CONSTANT])
def boundaries_start_requiring_temperature(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_CONSTANT])
def boundaries_end_requiring_temperature(request):
return request.param
@pytest.fixture
def form_data_defaults() -> dict:
return {
"thermal_conductivity": 0.6,
"heat_capacity": 4200,
"density": 1000,
"grid_spacing_meters": 0.001,
"number_of_grid_nodes": 10,
"simulation_time_seconds": 1000,
"boundary_temperature_start": 100,
"boundary_temperature_end": 100,
"starting_temperature": 20,
"heat_trans_coeff_start": 5,
"heat_trans_coeff_end": 5,
"boundary_condition_start": {
"value": conduction_1d.BOUNDARY_CONSTANT
},
"boundary_condition_end": {
"value": conduction_1d.BOUNDARY_CONSTANT
}
}
@pytest.fixture()
def conf(form_data_defaults):
return conduction_1d._package_form_data(form_data_defaults)
def test_returns_correct_response_with_different_boundary_conditions(
form_data_defaults, boundary_conditions_start, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = boundary_conditions_start
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
@pytest.mark.parametrize("boundary_key",
["boundary_condition_start", "boundary_condition_end"])
def test_missing_convective_heat_trans_coeff_raises_warning(boundary_key, form_data_defaults):
form_data_defaults[boundary_key] = {"value": conduction_1d.BOUNDARY_CONVECTIVE}
form_data_defaults["heat_trans_coeff_start"] = None
form_data_defaults["heat_trans_coeff_end"] = None
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(
error_info.value
) == "Convective heat transfer coefficient must be supplied for this boundary condition."
def test_heat_transfer_coeff_end_can_be_none(form_data_defaults):
form_data_defaults["boundary_condition_start"] = {"value": conduction_1d.BOUNDARY_CONVECTIVE}
form_data_defaults["boundary_condition_end"] = {"value": conduction_1d.BOUNDARY_INSULATED}
form_data_defaults["heat_trans_coeff_start"] = 5
form_data_defaults["heat_trans_coeff_end"] = None
conduction_1d.main(form_data_defaults)
def test_missing_boundary_temperature_start_raises_warning(
form_data_defaults, boundaries_start_requiring_temperature, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = boundaries_start_requiring_temperature
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
del form_data_defaults["boundary_temperature_start"]
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(error_info.value).startswith(
"Missing boundary temperature start for boundary condition")
def test_missing_boundary_temperature_end_raises_warning(form_data_defaults,
boundary_conditions_start,
boundaries_end_requiring_temperature):
form_data_defaults["boundary_condition_start"] = boundary_conditions_start
form_data_defaults["boundary_condition_end"] = boundaries_end_requiring_temperature
del form_data_defaults["boundary_temperature_end"]
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(
error_info.value).startswith("Missing boundary temperature end for boundary condition")
def test_can_ignore_boundary_temperature_end_for_insulated_end_boundary(
form_data_defaults, boundary_conditions_start):
form_data_defaults["boundary_condition_end"] = {"value": conduction_1d.BOUNDARY_INSULATED}
form_data_defaults["boundary_conditions_start"] = boundary_conditions_start
del form_data_defaults["boundary_temperature_end"]
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
def test_can_ignore_boundary_temperature_start_for_insulated_start_boundary(
form_data_defaults, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = {
"value": conduction_1d.BOUNDARY_INSULATED
}
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
del form_data_defaults["boundary_temperature_start"]
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
def test_convective_energy_equal_to_accumulation(conf: Config):
# Performing energy balance check where temperature change is approximately constant.
conf["boundary_condition_start"] = conduction_1d.BOUNDARY_CONVECTIVE
conf["boundary_condition_end"] = conduction_1d.BOUNDARY_INSULATED
conf["heat_trans_coeff_start"] = 1
conf["simulation_time_seconds"] = 500
conf["starting_temperature"] = 20
solver_config = conduction_1d.get_solver_config(conf)
times, temperatures = conduction_1d.Solver.solve(solver_config, conf)
average_temp_end = np.mean(temperatures[-1])
start_temp = temperatures[0][0]
df = pd.DataFrame(temperatures)
df['times'] = times
assert start_temp == 20
assert average_temp_end - start_temp == pytest.approx(1.0773369387960905)
# m * cp * deltaT
assert (average_temp_end -
start_temp) * 0.009 * 4200 * 1000 == pytest.approx(40723.33628649221)
# h * A * (T_inf - T_avg) * delta_time
# i.e 1 * 500 * (100 - 20) == approx(40000) | tests/test_1d_conduction.py | import pandas as pd
import pytest
import numpy as np
from designs import conduction_1d
from designs.conduction_1d import Config
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_INSULATED
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[
conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_INSULATED,
conduction_1d.BOUNDARY_CONSTANT
])
def boundary_conditions_start(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_INSULATED
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[
conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_INSULATED,
conduction_1d.BOUNDARY_CONSTANT
])
def boundary_conditions_end(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_CONSTANT])
def boundaries_start_requiring_temperature(request):
return request.param
@pytest.fixture(params=[{
"value": conduction_1d.BOUNDARY_CONVECTIVE
}, {
"value": conduction_1d.BOUNDARY_CONSTANT
}],
ids=[conduction_1d.BOUNDARY_CONVECTIVE, conduction_1d.BOUNDARY_CONSTANT])
def boundaries_end_requiring_temperature(request):
return request.param
@pytest.fixture
def form_data_defaults() -> dict:
return {
"thermal_conductivity": 0.6,
"heat_capacity": 4200,
"density": 1000,
"grid_spacing_meters": 0.001,
"number_of_grid_nodes": 10,
"simulation_time_seconds": 1000,
"boundary_temperature_start": 100,
"boundary_temperature_end": 100,
"starting_temperature": 20,
"heat_trans_coeff_start": 5,
"heat_trans_coeff_end": 5,
"boundary_condition_start": {
"value": conduction_1d.BOUNDARY_CONSTANT
},
"boundary_condition_end": {
"value": conduction_1d.BOUNDARY_CONSTANT
}
}
@pytest.fixture()
def conf(form_data_defaults):
return conduction_1d._package_form_data(form_data_defaults)
def test_returns_correct_response_with_different_boundary_conditions(
form_data_defaults, boundary_conditions_start, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = boundary_conditions_start
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
@pytest.mark.parametrize("boundary_key",
["boundary_condition_start", "boundary_condition_end"])
def test_missing_convective_heat_trans_coeff_raises_warning(boundary_key, form_data_defaults):
form_data_defaults[boundary_key] = {"value": conduction_1d.BOUNDARY_CONVECTIVE}
form_data_defaults["heat_trans_coeff_start"] = None
form_data_defaults["heat_trans_coeff_end"] = None
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(
error_info.value
) == "Convective heat transfer coefficient must be supplied for this boundary condition."
def test_heat_transfer_coeff_end_can_be_none(form_data_defaults):
form_data_defaults["boundary_condition_start"] = {"value": conduction_1d.BOUNDARY_CONVECTIVE}
form_data_defaults["boundary_condition_end"] = {"value": conduction_1d.BOUNDARY_INSULATED}
form_data_defaults["heat_trans_coeff_start"] = 5
form_data_defaults["heat_trans_coeff_end"] = None
conduction_1d.main(form_data_defaults)
def test_missing_boundary_temperature_start_raises_warning(
form_data_defaults, boundaries_start_requiring_temperature, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = boundaries_start_requiring_temperature
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
del form_data_defaults["boundary_temperature_start"]
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(error_info.value).startswith(
"Missing boundary temperature start for boundary condition")
def test_missing_boundary_temperature_end_raises_warning(form_data_defaults,
boundary_conditions_start,
boundaries_end_requiring_temperature):
form_data_defaults["boundary_condition_start"] = boundary_conditions_start
form_data_defaults["boundary_condition_end"] = boundaries_end_requiring_temperature
del form_data_defaults["boundary_temperature_end"]
with pytest.raises(Warning) as error_info:
conduction_1d.main(form_data_defaults)
assert str(
error_info.value).startswith("Missing boundary temperature end for boundary condition")
def test_can_ignore_boundary_temperature_end_for_insulated_end_boundary(
form_data_defaults, boundary_conditions_start):
form_data_defaults["boundary_condition_end"] = {"value": conduction_1d.BOUNDARY_INSULATED}
form_data_defaults["boundary_conditions_start"] = boundary_conditions_start
del form_data_defaults["boundary_temperature_end"]
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
def test_can_ignore_boundary_temperature_start_for_insulated_start_boundary(
form_data_defaults, boundary_conditions_end):
form_data_defaults["boundary_condition_start"] = {
"value": conduction_1d.BOUNDARY_INSULATED
}
form_data_defaults["boundary_condition_end"] = boundary_conditions_end
del form_data_defaults["boundary_temperature_start"]
result = conduction_1d.main(form_data_defaults)
assert list(result) == ['plot_results', 'output_table']
for value in result.values():
assert value['action']
assert value['value']
def test_convective_energy_equal_to_accumulation(conf: Config):
# Performing energy balance check where temperature change is approximately constant.
conf["boundary_condition_start"] = conduction_1d.BOUNDARY_CONVECTIVE
conf["boundary_condition_end"] = conduction_1d.BOUNDARY_INSULATED
conf["heat_trans_coeff_start"] = 1
conf["simulation_time_seconds"] = 500
conf["starting_temperature"] = 20
solver_config = conduction_1d.get_solver_config(conf)
times, temperatures = conduction_1d.Solver.solve(solver_config, conf)
average_temp_end = np.mean(temperatures[-1])
start_temp = temperatures[0][0]
df = pd.DataFrame(temperatures)
df['times'] = times
assert start_temp == 20
assert average_temp_end - start_temp == pytest.approx(1.0773369387960905)
# m * cp * deltaT
assert (average_temp_end -
start_temp) * 0.009 * 4200 * 1000 == pytest.approx(40723.33628649221)
# h * A * (T_inf - T_avg) * delta_time
# i.e 1 * 500 * (100 - 20) == approx(40000) | 0.733452 | 0.429669 |
import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
from classifier_control.classifier.utils.subnetworks import ConvEncoder, ConvDecoder
from classifier_control.classifier.utils.layers import Linear
class VAE(torch.nn.Module):
def __init__(self, hp):
super().__init__()
self._hp = hp
self.encoder = ConvEncoder(self._hp)
out_size = self.encoder.get_output_size()
out_flat_size = out_size[0] * out_size[1] * out_size[2]
self.linear1 = Linear(in_dim=out_flat_size, out_dim=128, builder=self._hp.builder)
self.linear2 = Linear(in_dim=128, out_dim=self._hp.hidden_size * 2, builder=self._hp.builder)
self.linear3 = Linear(in_dim=self._hp.hidden_size, out_dim=128, builder=self._hp.builder)
self.linear4 = Linear(in_dim=128, out_dim=out_flat_size, builder=self._hp.builder)
self.decoder = ConvDecoder(self._hp)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def encode(self, image):
embeddings = self.encoder(image).reshape(image.size(0), -1)
e = F.relu(self.linear1(embeddings))
z = self.linear2(e)
mu, logvar = z[:, :self._hp.hidden_size], z[:, self._hp.hidden_size:]
return mu, logvar
def decode(self, z):
e = F.relu(self.linear3(z))
e = F.relu(self.linear4(e))
e = e.view(*([e.size(0)] + list(self.encoder.get_output_size())))
im = F.sigmoid(self.decoder(e))
return im
def forward(self, image):
mu, logvar = self.encode(image)
z = self.reparameterize(mu, logvar)
im = self.decode(z)
return mu, logvar, z, im
class Dynamics(torch.nn.Module):
def __init__(self, hp):
super().__init__()
self._hp = hp
self.linear1 = Linear(in_dim=self._hp.hidden_size, out_dim=self._hp.hidden_size, builder=self._hp.builder)
def forward(self, z):
return self.linear1(z) | classifier_control/classifier/utils/vae.py | import torch
from torch.autograd import Variable
import torch.nn.functional as F
from torch.nn.parameter import Parameter
import numpy as np
from classifier_control.classifier.utils.subnetworks import ConvEncoder, ConvDecoder
from classifier_control.classifier.utils.layers import Linear
class VAE(torch.nn.Module):
def __init__(self, hp):
super().__init__()
self._hp = hp
self.encoder = ConvEncoder(self._hp)
out_size = self.encoder.get_output_size()
out_flat_size = out_size[0] * out_size[1] * out_size[2]
self.linear1 = Linear(in_dim=out_flat_size, out_dim=128, builder=self._hp.builder)
self.linear2 = Linear(in_dim=128, out_dim=self._hp.hidden_size * 2, builder=self._hp.builder)
self.linear3 = Linear(in_dim=self._hp.hidden_size, out_dim=128, builder=self._hp.builder)
self.linear4 = Linear(in_dim=128, out_dim=out_flat_size, builder=self._hp.builder)
self.decoder = ConvDecoder(self._hp)
def reparameterize(self, mu, logvar):
std = torch.exp(0.5*logvar)
eps = torch.randn_like(std)
return mu + eps*std
def encode(self, image):
embeddings = self.encoder(image).reshape(image.size(0), -1)
e = F.relu(self.linear1(embeddings))
z = self.linear2(e)
mu, logvar = z[:, :self._hp.hidden_size], z[:, self._hp.hidden_size:]
return mu, logvar
def decode(self, z):
e = F.relu(self.linear3(z))
e = F.relu(self.linear4(e))
e = e.view(*([e.size(0)] + list(self.encoder.get_output_size())))
im = F.sigmoid(self.decoder(e))
return im
def forward(self, image):
mu, logvar = self.encode(image)
z = self.reparameterize(mu, logvar)
im = self.decode(z)
return mu, logvar, z, im
class Dynamics(torch.nn.Module):
def __init__(self, hp):
super().__init__()
self._hp = hp
self.linear1 = Linear(in_dim=self._hp.hidden_size, out_dim=self._hp.hidden_size, builder=self._hp.builder)
def forward(self, z):
return self.linear1(z) | 0.928789 | 0.342407 |
import numpy as np
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
def StepLR_scheduler(optimizer, step_size=6, gamma=0.1):
return StepLR(optimizer, step_size=step_size, gamma=gamma)
def LR_on_pleateau_scheduler(optimizer, patience=10, threshold=0.0001, threshold_mode='rel'):
return ReduceLROnPlateau(optimizer, mode='min', factor=0.1,
patience=patience, threshold=threshold, threshold_mode=threshold_mode,
cooldown=0, min_lr=0, eps=1e-08, verbose=False)
def OneCylePolicy(optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None,
pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85,
max_momentum=0.95, div_factor=25., final_div_factor=1e4, last_epoch=-1):
return OneCycleLR(optimizer, max_lr, total_steps, epochs, steps_per_epoch,
pct_start, anneal_strategy, cycle_momentum, base_momentum,
max_momentum, div_factor, final_div_factor, last_epoch)
def plot_lr(lr_min, lr_max, total_iterations, step_size):
iterations = np.arange(0, total_iterations, 1)
cycle = np.floor(1 + iterations/(2*step_size))
xt = np.abs(iterations/step_size - 2*cycle + 1)
lrt = lr_min + (lr_max - lr_min)*(1-xt)
cycle_width = 2
if max(cycle) > 1:
cycle_width = cycle_width*max(cycle)
figsize = (cycle_width, 3)
fig, ax = plt.subplots(figsize = figsize)
ax.plot(iterations, lrt)
ax.axhline(y = lr_min, xmax = total_iterations, color='red')
ax.axhline(y = lr_max, xmax = total_iterations, color='red')
ax.set_xlabel("Iterations", fontsize = 12)
ax.set_ylabel("Learning Rate", fontsize = 12)
ax.set_title("Variation of Learning Rate with iterations", fontsize = 14)
fig.tight_layout()
ax.text(lr_min - lr_max/10, "min_lr")
ax.text(lr_max + lr_max/10, "max_lr")
ax.margins(x=0.1, y=0.3)
plt.show() | Week11/main_engine/schedulers.py | import numpy as np
import matplotlib.pyplot as plt
from torch.optim.lr_scheduler import StepLR, ReduceLROnPlateau, OneCycleLR
def StepLR_scheduler(optimizer, step_size=6, gamma=0.1):
return StepLR(optimizer, step_size=step_size, gamma=gamma)
def LR_on_pleateau_scheduler(optimizer, patience=10, threshold=0.0001, threshold_mode='rel'):
return ReduceLROnPlateau(optimizer, mode='min', factor=0.1,
patience=patience, threshold=threshold, threshold_mode=threshold_mode,
cooldown=0, min_lr=0, eps=1e-08, verbose=False)
def OneCylePolicy(optimizer, max_lr, total_steps=None, epochs=None, steps_per_epoch=None,
pct_start=0.3, anneal_strategy='cos', cycle_momentum=True, base_momentum=0.85,
max_momentum=0.95, div_factor=25., final_div_factor=1e4, last_epoch=-1):
return OneCycleLR(optimizer, max_lr, total_steps, epochs, steps_per_epoch,
pct_start, anneal_strategy, cycle_momentum, base_momentum,
max_momentum, div_factor, final_div_factor, last_epoch)
def plot_lr(lr_min, lr_max, total_iterations, step_size):
iterations = np.arange(0, total_iterations, 1)
cycle = np.floor(1 + iterations/(2*step_size))
xt = np.abs(iterations/step_size - 2*cycle + 1)
lrt = lr_min + (lr_max - lr_min)*(1-xt)
cycle_width = 2
if max(cycle) > 1:
cycle_width = cycle_width*max(cycle)
figsize = (cycle_width, 3)
fig, ax = plt.subplots(figsize = figsize)
ax.plot(iterations, lrt)
ax.axhline(y = lr_min, xmax = total_iterations, color='red')
ax.axhline(y = lr_max, xmax = total_iterations, color='red')
ax.set_xlabel("Iterations", fontsize = 12)
ax.set_ylabel("Learning Rate", fontsize = 12)
ax.set_title("Variation of Learning Rate with iterations", fontsize = 14)
fig.tight_layout()
ax.text(lr_min - lr_max/10, "min_lr")
ax.text(lr_max + lr_max/10, "max_lr")
ax.margins(x=0.1, y=0.3)
plt.show() | 0.769254 | 0.421016 |
from collections import defaultdict
from logs.services import get_order_log_time_by_order_num
from order.constant import OrderStatus
from order.models import Order, OrderAddress, OrderDetail
from user.constant import USER_OUTPUT_CONSTANT
def get_shop_order_by_num(shop_id: int, num: str):
"""
通过店铺id和订单号获取订单及详情
:param shop_id:
:param num:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, order_num=num).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order_address = get_order_address_by_order_id(order.id)
if order_address:
order.address = order_address
# 设置顾客信息
for key in USER_OUTPUT_CONSTANT:
setattr(order.customer, key, getattr(order.customer.user, key))
return True, order
def get_order_by_shop_id_and_id(shop_id: int, order_id: int):
"""
通过商铺ID和订单ID获取订单
:param shop_id:
:param order_id:
:return:
"""
order = Order.objects.filter(id=order_id, shop_id=shop_id).first()
return order
def get_shop_order_by_shop_id_and_id(shop_id: int, order_id: int):
"""
通过商铺id和订单id获取订单及详情
:param shop_id:
:param order_id:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, id=order_id).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order_address = get_order_address_by_order_id(order.id)
if order_address:
order.address = order_address
return True, order
def get_order_address_by_order_id(order_id: int):
"""
通过订单id获取订单地址
:param order_id:
:return:
"""
order_address = OrderAddress.objects.filter(order_id=order_id).first()
if order_address:
order_address.address = order_address.full_address
return order_address
def get_shop_order_by_num_without_details(shop_id: int, order_num: int):
"""
通过订单号获取一个订单
:param shop_id:
:param order_num:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, order_num=order_num).first()
return order
def list_order_details_by_order_ids(order_ids: list):
"""
通过订单id列表获取订单详情
:param order_ids:
:return:
"""
order_details = OrderDetail.objects.filter(order_id__in=order_ids).all()
for order_detail in order_details:
order_detail.product_name = order_detail.product.name
order_detail.product_id = order_detail.product.id
order_detail.product_cover_picture = order_detail.product.cover_image_url
return order_details
def list_order_with_order_details_by_product_id(shop_id: int, product_id: int):
"""
通过货品ID查询出其对应的销售记录(订单记录)
:param shop_id:
:param product_id:
:return:
"""
order_with_order_details_query = Order.objects.filter(
shop_id=shop_id, order_detail__product_id=product_id
).order_by("id")
order_with_order_details = order_with_order_details_query.all()
for order in order_with_order_details:
for od in order.order_detail.all():
if not od.product_id == product_id:
continue
else:
order_detail = od
order.price_net = order_detail.price_net
order.quantity_net = order_detail.quantity_net
order.amount_net = order_detail.amount_net
for key in USER_OUTPUT_CONSTANT:
setattr(order.customer, key, getattr(order.customer.user, key))
return order_with_order_details
def get_order_by_num_for_update(num: str):
"""
通过订单获取订单-加锁
:param num:
:return:
"""
result = Order.objects.select_for_update().filter(num=num).first()
return result
def get_order_detail_by_id_only_msg_notify(order_id: int):
"""
通过订单ID获取订单及详情,专供订单微信消息通知使用,其他地方不要调用
:param order_id:
:return:
"""
order = Order.objects.filter(id=order_id).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order.address = get_order_address_by_order_id(order.id)
return True, order
def get_customer_order_by_id(customer_ids: list, order_id: int):
"""
通过客户ids和订单id查找一个客户的订单
:param customer_ids:
:param order_id:
:return:
"""
order = Order.objects.filter(customer_id__in=customer_ids, id=order_id).first()
return order
def get_customer_order_with_detail_by_id(customer_ids: list, order_id: int):
"""
通过客户ids和订单id查找一个客户的订单详情
:param customer_ids:
:param order_id:
:return:
"""
order = get_customer_order_by_id(customer_ids, order_id)
if not order:
return False, "订单不存在"
# 查找订单地址与订单商品详情
order.order_details = list_order_details_by_order_ids([order.id])
order.address = get_order_address_by_order_id(order.id)
# 查找配送记录
if order.order_status in [OrderStatus.CONFIRMED, OrderStatus.FINISHED]:
# 查找最新的操作时间,作为订单开始或送达时间
order.delivery_time = get_order_log_time_by_order_num(order.order_num)
return True, order
def get_order_by_customer_id_and_groupon_attend_id(customer_id: int, groupon_attend_id: int):
"""
通过客户id和拼团参与id获取订单
:param customer_id:
:param groupon_attend_id:
:return:
"""
order = (
Order.objects.filter(
customer_id=customer_id,
groupon_attend_id=groupon_attend_id,
order_status__in=[
OrderStatus.UNPAID,
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.REFUNDED,
OrderStatus.FINISHED,
OrderStatus.WAITTING,
]
).first()
)
return order
def list_shop_orders(
shop_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
num: str = None,
):
"""
获取店铺订单列表
:param shop_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:param num:
:return:
"""
if num:
order_list = (
Order.objects.filter(shop_id=shop_id, order_num=num)
.filter(
order_status__in=[
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.FINISHED,
OrderStatus.REFUNDED,
]
)
.all()
)
else:
order_list =(
Order.objects.filter(shop_id=shop_id)
.filter(
order_type__in=order_types,
pay_type__in=order_pay_types,
delivery_method__in=order_delivery_methods,
order_status__in=order_status
)
.order_by(
"order_status", "delivery_method", "delivery_period", "-id"
)
.all()
)
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
# 拼装顾客数据
for _ in USER_OUTPUT_CONSTANT:
setattr(order.customer, _ , getattr(order.customer.user, _))
return order_list
def list_customer_orders(
shop_id: int,
customer_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
):
"""
获取一个客户的历史订单列表
:param shop_id:
:param customer_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:return:
"""
order_list_query = Order.objects.filter(shop_id=shop_id, customer_id=customer_id)
if order_types:
order_list_query = order_list_query.filter(order_types__in=order_types)
if order_pay_types:
order_list_query = order_list_query.filter(order_pay_types__in=order_pay_types)
if order_delivery_methods:
order_list_query = order_list_query.filter(delivery_method__in=order_delivery_methods)
if order_status:
order_list_query = order_list_query.filter(order_status__in=order_status)
order_list = order_list_query.order_by(
"order_status", "delivery_method", "delivery_period", "-id"
).all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
return order_list
def list_customer_order_by_customer_ids(customer_ids: list, order_status: list):
"""
通过用户ID查出一个用户(对应多个客户)的所有订单
:param customer_ids:
:param order_status:
:return:
"""
order_list_query = (
Order.objects.filter(customer_id__in=customer_ids)
.order_by("-create_time")
)
if order_status:
order_list_query = order_list_query.filter(order_status__in=order_status)
order_list = order_list_query.all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
if order.delivery:
order.delivery_type = order.delivery.delivery_type
return order_list
def list_order_details_by_order_id(order_id: int):
"""
通过订单ID获取子订单列表
:param order_id:
:return:
"""
order_detail_list = OrderDetail.objects.filter(order_id=order_id).all()
return order_detail_list
def list_shop_abnormal_orders(
shop_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
num: str = None,
):
"""
获取商铺的异常订单列表
:param shop_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:param num:
:return:
"""
# 获取异常订单列表
if num:
orders = (
Order.objects.filter(
shop_id=shop_id, order_num=num, order_status=OrderStatus.REFUND_FAIL
)
)
else:
orders = (
Order.objects.filter(
order_type__in=order_types,
pay_type__in=order_pay_types,
delivery_method__in=order_delivery_methods,
order_status__in=order_status,
)
)
order_list = orders.order_by("delivery_method", "delivery_period", "-id").all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
return order_list
def count_abnormal_order(shop_id: int):
"""
获取一个店铺异常(退款失败)的订单数
:param shop_id:
:return:
"""
count = (
Order.objects.filter(
shop_id=shop_id,
order_status=OrderStatus.REFUND_FAIL,
)
.count()
)
return count
def list_order_by_groupon_attend_id(shop_id: int, groupona_attend_id: int):
"""
通过拼团参与ID列出一个团的所有订单
:param shop_id:
:param groupona_attend_id:
:return:
"""
order_list = (
Order.objects.filter(
shop_id=shop_id,
groupona_attend_id=groupona_attend_id,
order_status__in=[
OrderStatus.UNPAID,
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.FINISHED,
OrderStatus.REFUNDED,
OrderStatus.WAITTING,
OrderStatus.REFUND_FAIL,
]
)
.order_by('id')
.all()
)
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
def list_waitting_order_by_groupon_attend_id(groupon_attend_id: int):
"""
通过拼团参与id列出拼团中的订单
:param groupon_attend_id:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id=groupon_attend_id, order_status=OrderStatus.WAITTING
).all()
return orders
def list_unpaid_order_by_groupon_attend_id(groupon_attend_id: int):
"""
通过拼团参与id列出未支付的订单
:param groupon_attend_id:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id=groupon_attend_id, order_status=OrderStatus.UNPAID
).all()
return orders
def list_unpay_order_by_groupon_attend_ids(groupon_attend_ids: list):
"""
通过拼团参与id得到订单
:param groupon_attend_ids:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id__in=groupon_attend_ids, order_status=OrderStatus.UNPAID
).all()
return orders | wsc_django/wsc_django/apps/order/selectors.py | from collections import defaultdict
from logs.services import get_order_log_time_by_order_num
from order.constant import OrderStatus
from order.models import Order, OrderAddress, OrderDetail
from user.constant import USER_OUTPUT_CONSTANT
def get_shop_order_by_num(shop_id: int, num: str):
"""
通过店铺id和订单号获取订单及详情
:param shop_id:
:param num:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, order_num=num).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order_address = get_order_address_by_order_id(order.id)
if order_address:
order.address = order_address
# 设置顾客信息
for key in USER_OUTPUT_CONSTANT:
setattr(order.customer, key, getattr(order.customer.user, key))
return True, order
def get_order_by_shop_id_and_id(shop_id: int, order_id: int):
"""
通过商铺ID和订单ID获取订单
:param shop_id:
:param order_id:
:return:
"""
order = Order.objects.filter(id=order_id, shop_id=shop_id).first()
return order
def get_shop_order_by_shop_id_and_id(shop_id: int, order_id: int):
"""
通过商铺id和订单id获取订单及详情
:param shop_id:
:param order_id:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, id=order_id).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order_address = get_order_address_by_order_id(order.id)
if order_address:
order.address = order_address
return True, order
def get_order_address_by_order_id(order_id: int):
"""
通过订单id获取订单地址
:param order_id:
:return:
"""
order_address = OrderAddress.objects.filter(order_id=order_id).first()
if order_address:
order_address.address = order_address.full_address
return order_address
def get_shop_order_by_num_without_details(shop_id: int, order_num: int):
"""
通过订单号获取一个订单
:param shop_id:
:param order_num:
:return:
"""
order = Order.objects.filter(shop_id=shop_id, order_num=order_num).first()
return order
def list_order_details_by_order_ids(order_ids: list):
"""
通过订单id列表获取订单详情
:param order_ids:
:return:
"""
order_details = OrderDetail.objects.filter(order_id__in=order_ids).all()
for order_detail in order_details:
order_detail.product_name = order_detail.product.name
order_detail.product_id = order_detail.product.id
order_detail.product_cover_picture = order_detail.product.cover_image_url
return order_details
def list_order_with_order_details_by_product_id(shop_id: int, product_id: int):
"""
通过货品ID查询出其对应的销售记录(订单记录)
:param shop_id:
:param product_id:
:return:
"""
order_with_order_details_query = Order.objects.filter(
shop_id=shop_id, order_detail__product_id=product_id
).order_by("id")
order_with_order_details = order_with_order_details_query.all()
for order in order_with_order_details:
for od in order.order_detail.all():
if not od.product_id == product_id:
continue
else:
order_detail = od
order.price_net = order_detail.price_net
order.quantity_net = order_detail.quantity_net
order.amount_net = order_detail.amount_net
for key in USER_OUTPUT_CONSTANT:
setattr(order.customer, key, getattr(order.customer.user, key))
return order_with_order_details
def get_order_by_num_for_update(num: str):
"""
通过订单获取订单-加锁
:param num:
:return:
"""
result = Order.objects.select_for_update().filter(num=num).first()
return result
def get_order_detail_by_id_only_msg_notify(order_id: int):
"""
通过订单ID获取订单及详情,专供订单微信消息通知使用,其他地方不要调用
:param order_id:
:return:
"""
order = Order.objects.filter(id=order_id).first()
if not order:
return False, "订单不存在"
order.order_details = list_order_details_by_order_ids([order.id])
order.address = get_order_address_by_order_id(order.id)
return True, order
def get_customer_order_by_id(customer_ids: list, order_id: int):
"""
通过客户ids和订单id查找一个客户的订单
:param customer_ids:
:param order_id:
:return:
"""
order = Order.objects.filter(customer_id__in=customer_ids, id=order_id).first()
return order
def get_customer_order_with_detail_by_id(customer_ids: list, order_id: int):
"""
通过客户ids和订单id查找一个客户的订单详情
:param customer_ids:
:param order_id:
:return:
"""
order = get_customer_order_by_id(customer_ids, order_id)
if not order:
return False, "订单不存在"
# 查找订单地址与订单商品详情
order.order_details = list_order_details_by_order_ids([order.id])
order.address = get_order_address_by_order_id(order.id)
# 查找配送记录
if order.order_status in [OrderStatus.CONFIRMED, OrderStatus.FINISHED]:
# 查找最新的操作时间,作为订单开始或送达时间
order.delivery_time = get_order_log_time_by_order_num(order.order_num)
return True, order
def get_order_by_customer_id_and_groupon_attend_id(customer_id: int, groupon_attend_id: int):
"""
通过客户id和拼团参与id获取订单
:param customer_id:
:param groupon_attend_id:
:return:
"""
order = (
Order.objects.filter(
customer_id=customer_id,
groupon_attend_id=groupon_attend_id,
order_status__in=[
OrderStatus.UNPAID,
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.REFUNDED,
OrderStatus.FINISHED,
OrderStatus.WAITTING,
]
).first()
)
return order
def list_shop_orders(
shop_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
num: str = None,
):
"""
获取店铺订单列表
:param shop_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:param num:
:return:
"""
if num:
order_list = (
Order.objects.filter(shop_id=shop_id, order_num=num)
.filter(
order_status__in=[
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.FINISHED,
OrderStatus.REFUNDED,
]
)
.all()
)
else:
order_list =(
Order.objects.filter(shop_id=shop_id)
.filter(
order_type__in=order_types,
pay_type__in=order_pay_types,
delivery_method__in=order_delivery_methods,
order_status__in=order_status
)
.order_by(
"order_status", "delivery_method", "delivery_period", "-id"
)
.all()
)
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
# 拼装顾客数据
for _ in USER_OUTPUT_CONSTANT:
setattr(order.customer, _ , getattr(order.customer.user, _))
return order_list
def list_customer_orders(
shop_id: int,
customer_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
):
"""
获取一个客户的历史订单列表
:param shop_id:
:param customer_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:return:
"""
order_list_query = Order.objects.filter(shop_id=shop_id, customer_id=customer_id)
if order_types:
order_list_query = order_list_query.filter(order_types__in=order_types)
if order_pay_types:
order_list_query = order_list_query.filter(order_pay_types__in=order_pay_types)
if order_delivery_methods:
order_list_query = order_list_query.filter(delivery_method__in=order_delivery_methods)
if order_status:
order_list_query = order_list_query.filter(order_status__in=order_status)
order_list = order_list_query.order_by(
"order_status", "delivery_method", "delivery_period", "-id"
).all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
return order_list
def list_customer_order_by_customer_ids(customer_ids: list, order_status: list):
"""
通过用户ID查出一个用户(对应多个客户)的所有订单
:param customer_ids:
:param order_status:
:return:
"""
order_list_query = (
Order.objects.filter(customer_id__in=customer_ids)
.order_by("-create_time")
)
if order_status:
order_list_query = order_list_query.filter(order_status__in=order_status)
order_list = order_list_query.all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
if order.delivery:
order.delivery_type = order.delivery.delivery_type
return order_list
def list_order_details_by_order_id(order_id: int):
"""
通过订单ID获取子订单列表
:param order_id:
:return:
"""
order_detail_list = OrderDetail.objects.filter(order_id=order_id).all()
return order_detail_list
def list_shop_abnormal_orders(
shop_id: int,
order_types: list,
order_pay_types: list,
order_delivery_methods: list,
order_status: list,
num: str = None,
):
"""
获取商铺的异常订单列表
:param shop_id:
:param order_types:
:param order_pay_types:
:param order_delivery_methods:
:param order_status:
:param num:
:return:
"""
# 获取异常订单列表
if num:
orders = (
Order.objects.filter(
shop_id=shop_id, order_num=num, order_status=OrderStatus.REFUND_FAIL
)
)
else:
orders = (
Order.objects.filter(
order_type__in=order_types,
pay_type__in=order_pay_types,
delivery_method__in=order_delivery_methods,
order_status__in=order_status,
)
)
order_list = orders.order_by("delivery_method", "delivery_period", "-id").all()
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
return order_list
def count_abnormal_order(shop_id: int):
"""
获取一个店铺异常(退款失败)的订单数
:param shop_id:
:return:
"""
count = (
Order.objects.filter(
shop_id=shop_id,
order_status=OrderStatus.REFUND_FAIL,
)
.count()
)
return count
def list_order_by_groupon_attend_id(shop_id: int, groupona_attend_id: int):
"""
通过拼团参与ID列出一个团的所有订单
:param shop_id:
:param groupona_attend_id:
:return:
"""
order_list = (
Order.objects.filter(
shop_id=shop_id,
groupona_attend_id=groupona_attend_id,
order_status__in=[
OrderStatus.UNPAID,
OrderStatus.PAID,
OrderStatus.CONFIRMED,
OrderStatus.FINISHED,
OrderStatus.REFUNDED,
OrderStatus.WAITTING,
OrderStatus.REFUND_FAIL,
]
)
.order_by('id')
.all()
)
# 订单详情
order_ids = [order.id for order in order_list]
order_details = list_order_details_by_order_ids(order_ids)
map_order_lines = defaultdict(list)
for order_detail in order_details:
map_order_lines[order_detail.order_id].append(order_detail)
# 拼数据
for order in order_list:
order.order_details = map_order_lines.get(order.id)
def list_waitting_order_by_groupon_attend_id(groupon_attend_id: int):
"""
通过拼团参与id列出拼团中的订单
:param groupon_attend_id:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id=groupon_attend_id, order_status=OrderStatus.WAITTING
).all()
return orders
def list_unpaid_order_by_groupon_attend_id(groupon_attend_id: int):
"""
通过拼团参与id列出未支付的订单
:param groupon_attend_id:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id=groupon_attend_id, order_status=OrderStatus.UNPAID
).all()
return orders
def list_unpay_order_by_groupon_attend_ids(groupon_attend_ids: list):
"""
通过拼团参与id得到订单
:param groupon_attend_ids:
:return:
"""
orders = Order.objects.filter(
groupon_attend_id__in=groupon_attend_ids, order_status=OrderStatus.UNPAID
).all()
return orders | 0.483405 | 0.166032 |
def facility():
"""Return a selector function for the facility a component refers to."""
# This works both for analysis components, and facilities (in which case the
# facility itself is returned).
return lambda component: component.facility
def self():
"""Return a selector function that selects the component itself."""
return lambda component: component
def step(name, product_index=0):
"""Return a selector function for the process step with the given name.
:param name: the name of the process step to select
:param product_index: which product's steps to look in
"""
return lambda facility: facility.products[product_index].sequence.findStep(name)
def product(product_index=0):
"""Return a selector function for a particular product.
:param product_index: the index of the desired product in the facility's list
"""
return lambda facility: facility.products[product_index]
def output(output_name, product_index=0):
"""Return a selector function for a specified output of a product.
:param output_name: the name of the output to select
:param product_index: which product's outputs to look in
"""
return lambda facility: facility.products[product_index].outputs[output_name]
def content(component, item, collection="outputs", strip_units=False):
"""Return a selector function for a particular item of a component.
:param component: a selector function for the component of interest
:param item: the name of the item to be selected
:param collection: the name of the collection to look in
:param strip_units: whether to remove the units (if any) of the value found
"""
# TODO assert that the resulting value is a Number (if stripping units)
def get_item(facility):
component_instance = component(facility)
collection_instance = getattr(component_instance, collection)
value = collection_instance[item]
if hasattr(value, 'magnitude') and strip_units:
value = value.magnitude
return get_item | biopharma/optimisation/sel.py | def facility():
"""Return a selector function for the facility a component refers to."""
# This works both for analysis components, and facilities (in which case the
# facility itself is returned).
return lambda component: component.facility
def self():
"""Return a selector function that selects the component itself."""
return lambda component: component
def step(name, product_index=0):
"""Return a selector function for the process step with the given name.
:param name: the name of the process step to select
:param product_index: which product's steps to look in
"""
return lambda facility: facility.products[product_index].sequence.findStep(name)
def product(product_index=0):
"""Return a selector function for a particular product.
:param product_index: the index of the desired product in the facility's list
"""
return lambda facility: facility.products[product_index]
def output(output_name, product_index=0):
"""Return a selector function for a specified output of a product.
:param output_name: the name of the output to select
:param product_index: which product's outputs to look in
"""
return lambda facility: facility.products[product_index].outputs[output_name]
def content(component, item, collection="outputs", strip_units=False):
"""Return a selector function for a particular item of a component.
:param component: a selector function for the component of interest
:param item: the name of the item to be selected
:param collection: the name of the collection to look in
:param strip_units: whether to remove the units (if any) of the value found
"""
# TODO assert that the resulting value is a Number (if stripping units)
def get_item(facility):
component_instance = component(facility)
collection_instance = getattr(component_instance, collection)
value = collection_instance[item]
if hasattr(value, 'magnitude') and strip_units:
value = value.magnitude
return get_item | 0.835215 | 0.855369 |
import json
import xml
from collections import defaultdict
import xmltodict
import yaml
from chibi.snippet.dict import hate_ordered_dict, remove_xml_notatation
class Atlas:
def __new__( cls, item, *args, **kw ):
return _wrap( item )
def loads( string ):
try:
return Chibi_atlas( json.loads( string ) )
except ( json.JSONDecodeError, TypeError ):
try:
result = xmltodict.parse( string )
result = hate_ordered_dict( result )
result = remove_xml_notatation( result )
return Chibi_atlas( result )
except xml.parsers.expat.ExpatError:
return Chibi_atlas( yaml.safe_load( string ) )
class Chibi_atlas( dict ):
"""
Clase para crear dicionarios para que sus keys sean leibles como
atributos de classes
"""
def __init__( self, *args, **kw ):
for arg in args:
if isinstance( arg, dict ):
for k, v in arg.items():
self[ k ] = v
for k, v in kw.items():
self[ k ] = v
# super().__init__( *args, **kw )
def __getattr__( self, name ):
try:
return super().__getattribute__( name )
except AttributeError as e:
try:
return self[ name ]
except KeyError:
raise e
def __setattr__( self, name, value ):
try:
if getattr( type( self ), name, False ):
super().__setattr__( name, value )
else:
self[ name ] = _wrap( value )
except TypeError:
self[ name ] = _wrap( value )
def __delattr__( self, name ):
del self[ name ]
def __setitem__( self, name, value ):
super().__setitem__( name, _wrap( value ) )
def __dir__( self ):
return list( self.keys() )
class Chibi_atlas_ignore_case( Chibi_atlas ):
"""
clase que crea chibi atlas que son case insensitive
"""
def __init__( self, *args, **kw ):
args_clean = []
for a in args:
if isinstance( a, dict ) or hasattr( a, 'items' ):
args_clean.append( { k.lower(): v for k, v in a.items() } )
kw = { k.lower(): v for k, v in kw.items() }
super().__init__( *args_clean, **kw )
def __getattr__( self, name ):
name = name.lower()
return super().__getattr__( name )
def __getitem__( self, key ):
key = key.lower()
return super().__getitem__( key )
def __setattr__( self, name, value ):
name = name.lower()
return super().__setattr__( name, value )
def __setitem__( self, key, value ):
key = key.lower()
return super().__setitem__( key, value )
def _default_factory():
return Chibi_atlas_default()
class Chibi_atlas_default( defaultdict, Chibi_atlas ):
"""
chibi atlas que emula `py:class:collections.defaultdict`
"""
def __init__( self, default_factory=None, *args, **kw ):
if default_factory is None:
default_factory = _default_factory
super().__init__( default_factory, *args, **kw )
class __Chibi_atlas_list( list ):
def __getitem__( self, index ):
value = super().__getitem__( index, )
value = _wrap( value )
self[ index ] = value
return value
def __iter__( self ):
for i, v in enumerate( super().__iter__() ):
value = _wrap( v )
self[ i ] = value
yield value
def _wrap( val, klass=None ):
if type( val ) == dict:
if klass is None:
return Chibi_atlas( val )
else:
return klass( val )
elif type( val ) == list:
if klass is None:
return __Chibi_atlas_list( val )
else:
return klass( val )
return val
yaml.add_representer(
Chibi_atlas, yaml.representer.SafeRepresenter.represent_dict )
yaml.add_representer(
__Chibi_atlas_list, yaml.representer.SafeRepresenter.represent_list ) | chibi/atlas/__init__.py | import json
import xml
from collections import defaultdict
import xmltodict
import yaml
from chibi.snippet.dict import hate_ordered_dict, remove_xml_notatation
class Atlas:
def __new__( cls, item, *args, **kw ):
return _wrap( item )
def loads( string ):
try:
return Chibi_atlas( json.loads( string ) )
except ( json.JSONDecodeError, TypeError ):
try:
result = xmltodict.parse( string )
result = hate_ordered_dict( result )
result = remove_xml_notatation( result )
return Chibi_atlas( result )
except xml.parsers.expat.ExpatError:
return Chibi_atlas( yaml.safe_load( string ) )
class Chibi_atlas( dict ):
"""
Clase para crear dicionarios para que sus keys sean leibles como
atributos de classes
"""
def __init__( self, *args, **kw ):
for arg in args:
if isinstance( arg, dict ):
for k, v in arg.items():
self[ k ] = v
for k, v in kw.items():
self[ k ] = v
# super().__init__( *args, **kw )
def __getattr__( self, name ):
try:
return super().__getattribute__( name )
except AttributeError as e:
try:
return self[ name ]
except KeyError:
raise e
def __setattr__( self, name, value ):
try:
if getattr( type( self ), name, False ):
super().__setattr__( name, value )
else:
self[ name ] = _wrap( value )
except TypeError:
self[ name ] = _wrap( value )
def __delattr__( self, name ):
del self[ name ]
def __setitem__( self, name, value ):
super().__setitem__( name, _wrap( value ) )
def __dir__( self ):
return list( self.keys() )
class Chibi_atlas_ignore_case( Chibi_atlas ):
"""
clase que crea chibi atlas que son case insensitive
"""
def __init__( self, *args, **kw ):
args_clean = []
for a in args:
if isinstance( a, dict ) or hasattr( a, 'items' ):
args_clean.append( { k.lower(): v for k, v in a.items() } )
kw = { k.lower(): v for k, v in kw.items() }
super().__init__( *args_clean, **kw )
def __getattr__( self, name ):
name = name.lower()
return super().__getattr__( name )
def __getitem__( self, key ):
key = key.lower()
return super().__getitem__( key )
def __setattr__( self, name, value ):
name = name.lower()
return super().__setattr__( name, value )
def __setitem__( self, key, value ):
key = key.lower()
return super().__setitem__( key, value )
def _default_factory():
return Chibi_atlas_default()
class Chibi_atlas_default( defaultdict, Chibi_atlas ):
"""
chibi atlas que emula `py:class:collections.defaultdict`
"""
def __init__( self, default_factory=None, *args, **kw ):
if default_factory is None:
default_factory = _default_factory
super().__init__( default_factory, *args, **kw )
class __Chibi_atlas_list( list ):
def __getitem__( self, index ):
value = super().__getitem__( index, )
value = _wrap( value )
self[ index ] = value
return value
def __iter__( self ):
for i, v in enumerate( super().__iter__() ):
value = _wrap( v )
self[ i ] = value
yield value
def _wrap( val, klass=None ):
if type( val ) == dict:
if klass is None:
return Chibi_atlas( val )
else:
return klass( val )
elif type( val ) == list:
if klass is None:
return __Chibi_atlas_list( val )
else:
return klass( val )
return val
yaml.add_representer(
Chibi_atlas, yaml.representer.SafeRepresenter.represent_dict )
yaml.add_representer(
__Chibi_atlas_list, yaml.representer.SafeRepresenter.represent_list ) | 0.430985 | 0.075007 |
from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from .models import Parking_place, MyUser
choice = (('Manager', 'Manager'), ('Employee', 'Employee'))
class RegisterForm(UserCreationForm):
role = forms.ChoiceField(
required=True,
choices=choice
)
first_name = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'First Name',
'class': 'form-control',
}))
last_name = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Last Name',
'class': 'form-control',
}))
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Username',
'class': 'form-control',
}))
email = forms.EmailField(
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Email',
'class': 'form-control',
}))
password1 = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
}))
password2 = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Confirm Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
}))
class Meta:
model = MyUser
fields = ['first_name', 'last_name',
'username', 'email', 'role',
'password1', 'password2']
class LoginForm(AuthenticationForm):
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Username',
'class': 'form-control',
}))
password = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
'name': 'password',
}))
remember_me = forms.BooleanField(required=False)
class Meta:
model = MyUser
fields = ['username', 'password', 'remember_me']
class UpdateUserForm(forms.ModelForm):
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = MyUser
fields = ['username', 'email']
class CreateParkForm(forms.ModelForm):
class Meta:
model = Parking_place
fields = ('owner', 'from_time', 'to_time', 'description')
widgets = {
'from_time': forms.TimeInput(format='%H:%M'),
'to_time': forms.TimeInput(format='%H:%M'),
}
class ChangeParkForm(forms.ModelForm):
class Meta:
model = Parking_place
fields = ('owner', 'from_time', 'to_time', 'description') | frigate/users/forms.py | from django import forms
from django.contrib.auth.forms import AuthenticationForm, UserCreationForm
from .models import Parking_place, MyUser
choice = (('Manager', 'Manager'), ('Employee', 'Employee'))
class RegisterForm(UserCreationForm):
role = forms.ChoiceField(
required=True,
choices=choice
)
first_name = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'First Name',
'class': 'form-control',
}))
last_name = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Last Name',
'class': 'form-control',
}))
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Username',
'class': 'form-control',
}))
email = forms.EmailField(
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Email',
'class': 'form-control',
}))
password1 = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
}))
password2 = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Confirm Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
}))
class Meta:
model = MyUser
fields = ['first_name', 'last_name',
'username', 'email', 'role',
'password1', 'password2']
class LoginForm(AuthenticationForm):
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'placeholder': 'Username',
'class': 'form-control',
}))
password = forms.CharField(
max_length=50,
required=True,
widget=forms.PasswordInput(attrs={'placeholder': 'Password',
'class': 'form-control',
'data-toggle': 'password',
'id': 'password',
'name': 'password',
}))
remember_me = forms.BooleanField(required=False)
class Meta:
model = MyUser
fields = ['username', 'password', 'remember_me']
class UpdateUserForm(forms.ModelForm):
username = forms.CharField(
max_length=100,
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
email = forms.EmailField(
required=True,
widget=forms.TextInput(attrs={'class': 'form-control'}))
class Meta:
model = MyUser
fields = ['username', 'email']
class CreateParkForm(forms.ModelForm):
class Meta:
model = Parking_place
fields = ('owner', 'from_time', 'to_time', 'description')
widgets = {
'from_time': forms.TimeInput(format='%H:%M'),
'to_time': forms.TimeInput(format='%H:%M'),
}
class ChangeParkForm(forms.ModelForm):
class Meta:
model = Parking_place
fields = ('owner', 'from_time', 'to_time', 'description') | 0.432543 | 0.071267 |
from django.urls import path, re_path
from .views import (
StaffMemberPaymentsView, OtherStaffMemberPaymentsView, FinancesByMonthView,
FinancesByDateView, FinancesByEventView, AllExpensesViewCSV, AllRevenuesViewCSV,
FinancialDetailView, ExpenseReportingView, RevenueReportingView,
CompensationRuleUpdateView, CompensationRuleResetView, ExpenseRuleGenerationView
)
from .ajax import updateEventRegistrations
from .autocomplete_light_registry import (
PaymentMethodAutoComplete, TransactionPartyAutoComplete,
ApprovalStatusAutoComplete
)
urlpatterns = [
path('staff-payments/', StaffMemberPaymentsView.as_view(), name='staffMemberPayments'),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/$',
StaffMemberPaymentsView.as_view(), name='staffMemberPayments'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/$',
OtherStaffMemberPaymentsView.as_view(), name='staffMemberPayments'
),
path(
'staff-payments/csv/',
StaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/csv/$',
StaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/csv/$',
OtherStaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
path('submit-expenses/', ExpenseReportingView.as_view(), name='submitExpenses'),
path('submit-revenues/', RevenueReportingView.as_view(), name='submitRevenues'),
path('finances/generate-items/', ExpenseRuleGenerationView.as_view(), name='generateFinancialItems'),
# These URLs are for Ajax/autocomplete functionality
path(
'submit-revenues/eventfilter/', updateEventRegistrations,
name='ajaxhandler_updateEventRegistrations'
),
path(
'autocomplete/paymentmethod/', PaymentMethodAutoComplete.as_view(),
name='paymentMethod-list-autocomplete'
),
path(
'autocomplete/approved/', ApprovalStatusAutoComplete.as_view(),
name='approved-list-autocomplete'
),
path(
'autocomplete/transactionparty/',
TransactionPartyAutoComplete.as_view(create_field='name'),
name='transactionParty-list-autocomplete'
),
# These URLs are for the financial views
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/(?P<month>[\w\+]+)/(?P<day>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialDateDetailView'
),
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/(?P<month>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialMonthDetailView'
),
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialYearDetailView'
),
path('finances/detail/', FinancialDetailView.as_view(), name='financialDetailView'),
path(
'finances/event/<slug:event>/',
FinancialDetailView.as_view(), name='financialEventDetailView'
),
path(
'finances/daily/csv/', FinancesByDateView.as_view(as_csv=True),
name='financesByDateCSV'
),
path(
'finances/daily/<slug:year>/', FinancesByDateView.as_view(),
name='financesByDate'
),
path(
'finances/daily/<slug:year>/csv/',
FinancesByDateView.as_view(as_csv=True), name='financesByDateCSV'
),
path('finances/daily/', FinancesByDateView.as_view(), name='financesByDate'),
path('finances/csv/', FinancesByMonthView.as_view(as_csv=True), name='financesByMonthCSV'),
path('finances/<slug:year>/', FinancesByMonthView.as_view(), name='financesByMonth'),
path(
'finances/<slug:year>/csv/',
FinancesByMonthView.as_view(as_csv=True), name='financesByMonthCSV'
),
path('finances/', FinancesByMonthView.as_view(), name='financesByMonth'),
path(
'finances-byevent/csv/', FinancesByEventView.as_view(as_csv=True),
name='financesByEventCSV'
),
path(
'finances-byevent/<slug:year>/csv/',
FinancesByEventView.as_view(as_csv=True), name='financesByEventCSV'
),
path('finances-byevent/', FinancesByEventView.as_view(), name='financesByEvent'),
path('finances/expenses/<slug:year>/csv/', AllExpensesViewCSV.as_view(), name='allexpensesCSV'),
path('finances/revenues/<slug:year>/csv/', AllRevenuesViewCSV.as_view(), name='allrevenuesCSV'),
path('compensation/update/', CompensationRuleUpdateView.as_view(), name='updateCompensationRules'),
path('compensation/reset/', CompensationRuleResetView.as_view(), name='resetCompensationRules'),
] | danceschool/financial/urls.py | from django.urls import path, re_path
from .views import (
StaffMemberPaymentsView, OtherStaffMemberPaymentsView, FinancesByMonthView,
FinancesByDateView, FinancesByEventView, AllExpensesViewCSV, AllRevenuesViewCSV,
FinancialDetailView, ExpenseReportingView, RevenueReportingView,
CompensationRuleUpdateView, CompensationRuleResetView, ExpenseRuleGenerationView
)
from .ajax import updateEventRegistrations
from .autocomplete_light_registry import (
PaymentMethodAutoComplete, TransactionPartyAutoComplete,
ApprovalStatusAutoComplete
)
urlpatterns = [
path('staff-payments/', StaffMemberPaymentsView.as_view(), name='staffMemberPayments'),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/$',
StaffMemberPaymentsView.as_view(), name='staffMemberPayments'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/$',
OtherStaffMemberPaymentsView.as_view(), name='staffMemberPayments'
),
path(
'staff-payments/csv/',
StaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/csv/$',
StaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
re_path(
r'^staff-payments/(?P<year>[\w\+]+)/(?P<first_name>[\w\+\.]+)-(?P<last_name>[\w\+\.]+)/csv/$',
OtherStaffMemberPaymentsView.as_view(as_csv=True),
name='staffMemberPaymentsCSV'
),
path('submit-expenses/', ExpenseReportingView.as_view(), name='submitExpenses'),
path('submit-revenues/', RevenueReportingView.as_view(), name='submitRevenues'),
path('finances/generate-items/', ExpenseRuleGenerationView.as_view(), name='generateFinancialItems'),
# These URLs are for Ajax/autocomplete functionality
path(
'submit-revenues/eventfilter/', updateEventRegistrations,
name='ajaxhandler_updateEventRegistrations'
),
path(
'autocomplete/paymentmethod/', PaymentMethodAutoComplete.as_view(),
name='paymentMethod-list-autocomplete'
),
path(
'autocomplete/approved/', ApprovalStatusAutoComplete.as_view(),
name='approved-list-autocomplete'
),
path(
'autocomplete/transactionparty/',
TransactionPartyAutoComplete.as_view(create_field='name'),
name='transactionParty-list-autocomplete'
),
# These URLs are for the financial views
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/(?P<month>[\w\+]+)/(?P<day>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialDateDetailView'
),
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/(?P<month>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialMonthDetailView'
),
re_path(
r'^finances/detail/(?P<year>[\w\+]+)/$',
FinancialDetailView.as_view(), name='financialYearDetailView'
),
path('finances/detail/', FinancialDetailView.as_view(), name='financialDetailView'),
path(
'finances/event/<slug:event>/',
FinancialDetailView.as_view(), name='financialEventDetailView'
),
path(
'finances/daily/csv/', FinancesByDateView.as_view(as_csv=True),
name='financesByDateCSV'
),
path(
'finances/daily/<slug:year>/', FinancesByDateView.as_view(),
name='financesByDate'
),
path(
'finances/daily/<slug:year>/csv/',
FinancesByDateView.as_view(as_csv=True), name='financesByDateCSV'
),
path('finances/daily/', FinancesByDateView.as_view(), name='financesByDate'),
path('finances/csv/', FinancesByMonthView.as_view(as_csv=True), name='financesByMonthCSV'),
path('finances/<slug:year>/', FinancesByMonthView.as_view(), name='financesByMonth'),
path(
'finances/<slug:year>/csv/',
FinancesByMonthView.as_view(as_csv=True), name='financesByMonthCSV'
),
path('finances/', FinancesByMonthView.as_view(), name='financesByMonth'),
path(
'finances-byevent/csv/', FinancesByEventView.as_view(as_csv=True),
name='financesByEventCSV'
),
path(
'finances-byevent/<slug:year>/csv/',
FinancesByEventView.as_view(as_csv=True), name='financesByEventCSV'
),
path('finances-byevent/', FinancesByEventView.as_view(), name='financesByEvent'),
path('finances/expenses/<slug:year>/csv/', AllExpensesViewCSV.as_view(), name='allexpensesCSV'),
path('finances/revenues/<slug:year>/csv/', AllRevenuesViewCSV.as_view(), name='allrevenuesCSV'),
path('compensation/update/', CompensationRuleUpdateView.as_view(), name='updateCompensationRules'),
path('compensation/reset/', CompensationRuleResetView.as_view(), name='resetCompensationRules'),
] | 0.358802 | 0.095645 |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from racket_compiler.racket_parser import Parser
from solver_input_generator import Solver_Input
from third_party.demo2program.karel_env.dsl import get_KarelDSL
from third_party.demo2program.karel_env.dsl import dsl_enum_program as karel_enum
from third_party.demo2program.vizdoom_env.dsl import dsl_enum_program as vizdoom_enum
import numpy as np
import tensorflow as tf
import h5py
import pandas as pd
from rosette_query_generator import *
from model.model_ours import Model
import time
WARMUP=10
class Solver():
def __init__(self, config, dataset, dsl=None):
self.config = config
self.dataset = dataset
self.dsl = dsl
self.batch_size = config.batch_size
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
self.parsing_class = Parser(config.dataset_type)
self.lexer = self.parsing_class.racket_lexer
self.parser = self.parsing_class.racket_parser
if config.dataset_type == 'karel':
if config.ablation_loop_heuristic:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_2_while"]
if config.ablation_while_heuristic:
raise Exception("Both ablations are uncompatible")
elif config.ablation_while_heuristic:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_1_while_length_0",
"karel_synthax/0_if_1_while_length_1",
"karel_synthax/0_if_1_while_length_2",
"karel_synthax/0_if_1_while_length_3",
"karel_synthax/0_if_1_while_length_4",
"karel_synthax/0_if_1_while_length_5",
"karel_synthax/0_if_1_while_length_6",
"karel_synthax/0_if_1_while_length_7",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/2_if_0_while"]
else:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/0_if_1_while_length_0",
"karel_synthax/0_if_1_while_length_1",
"karel_synthax/0_if_1_while_length_2",
"karel_synthax/0_if_1_while_length_3",
"karel_synthax/0_if_1_while_length_4",
"karel_synthax/0_if_1_while_length_5",
"karel_synthax/0_if_1_while_length_6",
"karel_synthax/0_if_1_while_length_7",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_2_while"]
self.enum = karel_enum
self.action_threshold = 0
self.perception_thresholds = [0]
else:
self.synthax_files = ["vizdoom_synthax/0_if_0_while",
"vizdoom_synthax/1_if_0_while",
"vizdoom_synthax/0_if_1_while",
"vizdoom_synthax/2_if_0_while"]
if self.config.filtering == "none":
print("No filtering")
self.action_threshold = 0.
self.perception_thresholds = [0.]
elif self.config.filtering == "static":
print("Static filtering")
self.action_threshold = 0.98
self.perception_thresholds = [0.9]
else:
print("Dynamic filtering")
self.action_threshold = 0.98
self.perception_thresholds = [1.,0.95,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1]
self.enum = vizdoom_enum
self.model = Model(config, is_train=False)
self.checkpoint = config.checkpoint
self.checkpoint_name = os.path.basename(self.checkpoint)
self.saver = tf.train.Saver(max_to_keep=100)
self.saver.restore(self.session, self.checkpoint)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
exp_name = self.config.checkpoint.split("/")[-2]
k_str = str(self.config.num_k)
filter = self.config.filtering
self.exp_id = exp_name + "_k_"+ k_str + "_filter_"+ filter
self.query_generator = QueryGenerator(config,self.exp_id)
def check_correct_program(self, result, gt_program):
output_program = self.parser.parse(result,lexer=self.lexer)
if output_program is None:
return False
print("Program")
print(gt_program)
print(output_program)
p_prog, _ = self.enum.parse(output_program)
gt_prog, _ = self.enum.parse(gt_program)
if p_prog is None or gt_prog is None:
print("Parsing error")
return False,False
return gt_program == output_program, p_prog == gt_prog
def check_correct_execution(self, second_result):
n_false = 0
for l in second_result:
if l == "#f":
n_false += 1
if n_false == 0:
correct_execution = True
else:
correct_execution = False
print("FAILURE - Test Set", n_false)
return correct_execution
def log_results(self,count,count_correct_programs,count_correct_sequence,
count_incorrect_specification,mean_time_inference,max_time_inference,
mean_time_solver,max_time_solver):
denominator = float(len(self.dataset.ids))
count /= denominator
count_correct_programs /= denominator
count_correct_sequence /= denominator
count_incorrect_specification /= denominator
mean_time_inference /= denominator-WARMUP
mean_time_solver /= denominator
data = { "Execution accuracy": count,
"Program accuracy": count_correct_programs,
"Sequence accuracy": count_correct_sequence,
"Mean time inference": mean_time_inference,
"Max time inference": max_time_inference,
"Mean time solver": mean_time_solver,
"Max time solver": max_time_solver }
df = pd.DataFrame.from_dict(data,columns=["Value"],orient="index")
# Save results
log_file = "solver_logs/"+ self.exp_id+".txt"
with open(log_file,"w") as f:
df.to_csv(log_file,float_format="%.5f")
def solve(self):
count = 0
count_correct_programs = 0
count_correct_sequence = 0
count_test_set_failure = 0
count_unsat_failure = 0
count_incorrect_specification = 0
mean_time_inference = 0.
max_time_inference = 0.
mean_time_solver = 0.
max_time_solver = 0.
for i in range(len(self.dataset.ids)):
time_1 = time.time()
solver_input = Solver_Input(self.config, self.dataset, self.session, self.model, self.dsl, i)
time_2 = time.time()
time_inference = time_2 - time_1
if i > WARMUP:
mean_time_inference += time_inference
if time_inference > max_time_inference:
max_time_inference = time_inference
max_time = 0
if solver_input.sanity_check():
wrong_actions, wrong_perceptions = solver_input.truncate_input()
if wrong_actions > 0 or wrong_perceptions > 0:
count_incorrect_specification += 1
solver_input.filter_action_threshold(self.action_threshold)
for perception_threshold in self.perception_thresholds:
if self.config.filtering == "dynamic":
action_tokens, action_tokens_gt, perception_tokens, perception_tokens_gt = \
solver_input.filter_perception_proportion(perception_threshold)
else:
action_tokens, action_tokens_gt, perception_tokens, perception_tokens_gt = \
solver_input.filter_perception_threshold(perception_threshold)
is_break = False
for synthax_file in self.synthax_files:
print(synthax_file)
try:
time_3 = time.time()
result = self.query_generator.attempt(i,
solver_input.program_code,
action_tokens,
perception_tokens,
synthax_file)
time_4 = time.time()
solver_time = time_4 - time_3
if solver_time > max_time:
max_time = solver_time
is_break = True
break
except:
result = None
time_4 = time.time()
solver_time = time_4 - time_3
if solver_time > max_time:
max_time = solver_time
if is_break:
break
if result is not None:
try:
second_result = self.query_generator.test(i,
solver_input.program_code,
solver_input.test_action_tokens,
solver_input.test_perception_tokens,
synthax_file,
result)
correct_execution = self.check_correct_execution(second_result)
correct_sequence, correct_program = self.check_correct_program(result,solver_input.program_code)
except:
print("FAILURE - Test Set Exception")
correct_execution = False
correct_sequence = False
correct_program = False
if correct_execution:
count += 1
if correct_program:
count_correct_programs += 1
if correct_sequence:
count_correct_sequence += 1
print("SUCCESS - ALL")
else:
print("SUCCESS - PROGRAM")
else:
print("SUCCESS - EXECUTION")
else:
count_test_set_failure += 1
else:
print("FAILURE - Unsat")
count_unsat_failure += 1
else:
print("Failure - Input Problem")
mean_time_solver += max_time
if max_time_solver < max_time:
max_time_solver = max_time
denominator = float(i + 1)
print("EA", count / denominator,
"PA", count_correct_programs / denominator,
"SA", count_correct_sequence / denominator)
print("Unsat", count_unsat_failure / denominator,
"Test set", count_test_set_failure / denominator)
print("Time inference", time_inference,
"Time solver",max_time)
self.log_results(count,count_correct_programs,count_correct_sequence,
count_incorrect_specification,mean_time_inference,max_time_inference,
mean_time_solver,max_time_solver)
def generate_config(parser):
config = parser.parse_args()
if config.dataset_type == 'karel':
# Get dsl
f = h5py.File(os.path.join(config.dataset_path, 'data.hdf5'), 'r')
dsl_type = f['data_info']['dsl_type'].value
dsl = get_KarelDSL(dsl_type=dsl_type)
f.close()
import third_party.modified.karel_env.dataset_karel as dataset
dataset_train, dataset_test, dataset_val \
= dataset.create_default_splits(config.dataset_path, num_k=config.num_k)
elif config.dataset_type == 'vizdoom':
import third_party.modified.vizdoom_env.dataset_vizdoom as dataset
dataset_train, dataset_test, dataset_val \
= dataset.create_default_splits(config.dataset_path, num_k=config.num_k)
from third_party.demo2program.vizdoom_env.dsl.vocab import VizDoomDSLVocab
dsl = VizDoomDSLVocab(
perception_type=dataset_test.perception_type,
level=dataset_test.level)
else:
raise ValueError(config.dataset)
config.batch_size = 1
# Set data dimension in configuration
data_tuple = dataset_train.get_data(dataset_train.ids[0])
program, _, s_h, test_s_h, a_h, _, _, _, program_len, demo_len, test_demo_len, \
per, test_per = data_tuple[:13]
config.dim_program_token = np.asarray(program.shape)[0]
config.max_program_len = np.asarray(program.shape)[1]
config.k = np.asarray(s_h.shape)[0]
config.test_k = np.asarray(test_s_h.shape)[0]
config.max_demo_len = np.asarray(s_h.shape)[1]
config.h = np.asarray(s_h.shape)[2]
config.w = np.asarray(s_h.shape)[3]
config.depth = np.asarray(s_h.shape)[4]
config.action_space = np.asarray(a_h.shape)[2]
config.per_dim = np.asarray(per.shape)[2]
if config.dataset_type == 'karel':
config.dsl_type = dataset_train.dsl_type
config.env_type = dataset_train.env_type
config.vizdoom_pos_keys = []
config.vizdoom_max_init_pos_len = -1
config.perception_type = ''
config.level = None
elif config.dataset_type == 'vizdoom':
config.dsl_type = 'vizdoom_default' # vizdoom has 1 dsl type for now
config.env_type = 'vizdoom_default' # vizdoom has 1 env type
config.vizdoom_pos_keys = dataset_train.vizdoom_pos_keys
config.vizdoom_max_init_pos_len = dataset_train.vizdoom_max_init_pos_len
config.perception_type = dataset_train.perception_type
config.level = dataset_train.level
return config, dataset_test, dsl
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_type', type=str, default='karel',
choices=['karel', 'vizdoom'])
parser.add_argument('--filtering', type=str, default='none',
choices=['none', 'static','dynamic'])
parser.add_argument('--dataset_path', type=str,
default='datasets/karel_dataset',
help='the path to your dataset')
parser.add_argument('--num_k', type=int, default=10,
help='the number of seen demonstrations')
parser.add_argument('--num_lstm_cell_units', type=int, default=512)
parser.add_argument('--checkpoint', type=str, default='',help='the path to a trained checkpoint')
# Ablation of solver
parser.add_argument('--ablation_loop_heuristic', action='store_true', default=False,
help='set to True to ablate loop heuristic on Karel')
parser.add_argument('--ablation_while_heuristic', action='store_true', default=False,
help='set to True to ablate while heuristic on Karel')
config, dataset_test, dsl = generate_config(parser)
solver = Solver(config, dataset_test, dsl=dsl)
solver.solve()
if __name__ == '__main__':
main() | solver.py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from racket_compiler.racket_parser import Parser
from solver_input_generator import Solver_Input
from third_party.demo2program.karel_env.dsl import get_KarelDSL
from third_party.demo2program.karel_env.dsl import dsl_enum_program as karel_enum
from third_party.demo2program.vizdoom_env.dsl import dsl_enum_program as vizdoom_enum
import numpy as np
import tensorflow as tf
import h5py
import pandas as pd
from rosette_query_generator import *
from model.model_ours import Model
import time
WARMUP=10
class Solver():
def __init__(self, config, dataset, dsl=None):
self.config = config
self.dataset = dataset
self.dsl = dsl
self.batch_size = config.batch_size
session_config = tf.ConfigProto(
allow_soft_placement=True,
gpu_options=tf.GPUOptions(allow_growth=True),
device_count={'GPU': 1},
)
self.session = tf.Session(config=session_config)
self.parsing_class = Parser(config.dataset_type)
self.lexer = self.parsing_class.racket_lexer
self.parser = self.parsing_class.racket_parser
if config.dataset_type == 'karel':
if config.ablation_loop_heuristic:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_2_while"]
if config.ablation_while_heuristic:
raise Exception("Both ablations are uncompatible")
elif config.ablation_while_heuristic:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_1_while_length_0",
"karel_synthax/0_if_1_while_length_1",
"karel_synthax/0_if_1_while_length_2",
"karel_synthax/0_if_1_while_length_3",
"karel_synthax/0_if_1_while_length_4",
"karel_synthax/0_if_1_while_length_5",
"karel_synthax/0_if_1_while_length_6",
"karel_synthax/0_if_1_while_length_7",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/2_if_0_while"]
else:
self.synthax_files = ["karel_synthax/0_if_0_while",
"karel_synthax/0_if_1_while_length_0",
"karel_synthax/0_if_1_while_length_1",
"karel_synthax/0_if_1_while_length_2",
"karel_synthax/0_if_1_while_length_3",
"karel_synthax/0_if_1_while_length_4",
"karel_synthax/0_if_1_while_length_5",
"karel_synthax/0_if_1_while_length_6",
"karel_synthax/0_if_1_while_length_7",
"karel_synthax/0_if_1_while_length_20",
"karel_synthax/1_if_0_while",
"karel_synthax/0_if_2_while"]
self.enum = karel_enum
self.action_threshold = 0
self.perception_thresholds = [0]
else:
self.synthax_files = ["vizdoom_synthax/0_if_0_while",
"vizdoom_synthax/1_if_0_while",
"vizdoom_synthax/0_if_1_while",
"vizdoom_synthax/2_if_0_while"]
if self.config.filtering == "none":
print("No filtering")
self.action_threshold = 0.
self.perception_thresholds = [0.]
elif self.config.filtering == "static":
print("Static filtering")
self.action_threshold = 0.98
self.perception_thresholds = [0.9]
else:
print("Dynamic filtering")
self.action_threshold = 0.98
self.perception_thresholds = [1.,0.95,0.9,0.8,0.7,0.6,0.5,0.4,0.3,0.2,0.1]
self.enum = vizdoom_enum
self.model = Model(config, is_train=False)
self.checkpoint = config.checkpoint
self.checkpoint_name = os.path.basename(self.checkpoint)
self.saver = tf.train.Saver(max_to_keep=100)
self.saver.restore(self.session, self.checkpoint)
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(self.session,
coord=coord, start=True)
exp_name = self.config.checkpoint.split("/")[-2]
k_str = str(self.config.num_k)
filter = self.config.filtering
self.exp_id = exp_name + "_k_"+ k_str + "_filter_"+ filter
self.query_generator = QueryGenerator(config,self.exp_id)
def check_correct_program(self, result, gt_program):
output_program = self.parser.parse(result,lexer=self.lexer)
if output_program is None:
return False
print("Program")
print(gt_program)
print(output_program)
p_prog, _ = self.enum.parse(output_program)
gt_prog, _ = self.enum.parse(gt_program)
if p_prog is None or gt_prog is None:
print("Parsing error")
return False,False
return gt_program == output_program, p_prog == gt_prog
def check_correct_execution(self, second_result):
n_false = 0
for l in second_result:
if l == "#f":
n_false += 1
if n_false == 0:
correct_execution = True
else:
correct_execution = False
print("FAILURE - Test Set", n_false)
return correct_execution
def log_results(self,count,count_correct_programs,count_correct_sequence,
count_incorrect_specification,mean_time_inference,max_time_inference,
mean_time_solver,max_time_solver):
denominator = float(len(self.dataset.ids))
count /= denominator
count_correct_programs /= denominator
count_correct_sequence /= denominator
count_incorrect_specification /= denominator
mean_time_inference /= denominator-WARMUP
mean_time_solver /= denominator
data = { "Execution accuracy": count,
"Program accuracy": count_correct_programs,
"Sequence accuracy": count_correct_sequence,
"Mean time inference": mean_time_inference,
"Max time inference": max_time_inference,
"Mean time solver": mean_time_solver,
"Max time solver": max_time_solver }
df = pd.DataFrame.from_dict(data,columns=["Value"],orient="index")
# Save results
log_file = "solver_logs/"+ self.exp_id+".txt"
with open(log_file,"w") as f:
df.to_csv(log_file,float_format="%.5f")
def solve(self):
count = 0
count_correct_programs = 0
count_correct_sequence = 0
count_test_set_failure = 0
count_unsat_failure = 0
count_incorrect_specification = 0
mean_time_inference = 0.
max_time_inference = 0.
mean_time_solver = 0.
max_time_solver = 0.
for i in range(len(self.dataset.ids)):
time_1 = time.time()
solver_input = Solver_Input(self.config, self.dataset, self.session, self.model, self.dsl, i)
time_2 = time.time()
time_inference = time_2 - time_1
if i > WARMUP:
mean_time_inference += time_inference
if time_inference > max_time_inference:
max_time_inference = time_inference
max_time = 0
if solver_input.sanity_check():
wrong_actions, wrong_perceptions = solver_input.truncate_input()
if wrong_actions > 0 or wrong_perceptions > 0:
count_incorrect_specification += 1
solver_input.filter_action_threshold(self.action_threshold)
for perception_threshold in self.perception_thresholds:
if self.config.filtering == "dynamic":
action_tokens, action_tokens_gt, perception_tokens, perception_tokens_gt = \
solver_input.filter_perception_proportion(perception_threshold)
else:
action_tokens, action_tokens_gt, perception_tokens, perception_tokens_gt = \
solver_input.filter_perception_threshold(perception_threshold)
is_break = False
for synthax_file in self.synthax_files:
print(synthax_file)
try:
time_3 = time.time()
result = self.query_generator.attempt(i,
solver_input.program_code,
action_tokens,
perception_tokens,
synthax_file)
time_4 = time.time()
solver_time = time_4 - time_3
if solver_time > max_time:
max_time = solver_time
is_break = True
break
except:
result = None
time_4 = time.time()
solver_time = time_4 - time_3
if solver_time > max_time:
max_time = solver_time
if is_break:
break
if result is not None:
try:
second_result = self.query_generator.test(i,
solver_input.program_code,
solver_input.test_action_tokens,
solver_input.test_perception_tokens,
synthax_file,
result)
correct_execution = self.check_correct_execution(second_result)
correct_sequence, correct_program = self.check_correct_program(result,solver_input.program_code)
except:
print("FAILURE - Test Set Exception")
correct_execution = False
correct_sequence = False
correct_program = False
if correct_execution:
count += 1
if correct_program:
count_correct_programs += 1
if correct_sequence:
count_correct_sequence += 1
print("SUCCESS - ALL")
else:
print("SUCCESS - PROGRAM")
else:
print("SUCCESS - EXECUTION")
else:
count_test_set_failure += 1
else:
print("FAILURE - Unsat")
count_unsat_failure += 1
else:
print("Failure - Input Problem")
mean_time_solver += max_time
if max_time_solver < max_time:
max_time_solver = max_time
denominator = float(i + 1)
print("EA", count / denominator,
"PA", count_correct_programs / denominator,
"SA", count_correct_sequence / denominator)
print("Unsat", count_unsat_failure / denominator,
"Test set", count_test_set_failure / denominator)
print("Time inference", time_inference,
"Time solver",max_time)
self.log_results(count,count_correct_programs,count_correct_sequence,
count_incorrect_specification,mean_time_inference,max_time_inference,
mean_time_solver,max_time_solver)
def generate_config(parser):
config = parser.parse_args()
if config.dataset_type == 'karel':
# Get dsl
f = h5py.File(os.path.join(config.dataset_path, 'data.hdf5'), 'r')
dsl_type = f['data_info']['dsl_type'].value
dsl = get_KarelDSL(dsl_type=dsl_type)
f.close()
import third_party.modified.karel_env.dataset_karel as dataset
dataset_train, dataset_test, dataset_val \
= dataset.create_default_splits(config.dataset_path, num_k=config.num_k)
elif config.dataset_type == 'vizdoom':
import third_party.modified.vizdoom_env.dataset_vizdoom as dataset
dataset_train, dataset_test, dataset_val \
= dataset.create_default_splits(config.dataset_path, num_k=config.num_k)
from third_party.demo2program.vizdoom_env.dsl.vocab import VizDoomDSLVocab
dsl = VizDoomDSLVocab(
perception_type=dataset_test.perception_type,
level=dataset_test.level)
else:
raise ValueError(config.dataset)
config.batch_size = 1
# Set data dimension in configuration
data_tuple = dataset_train.get_data(dataset_train.ids[0])
program, _, s_h, test_s_h, a_h, _, _, _, program_len, demo_len, test_demo_len, \
per, test_per = data_tuple[:13]
config.dim_program_token = np.asarray(program.shape)[0]
config.max_program_len = np.asarray(program.shape)[1]
config.k = np.asarray(s_h.shape)[0]
config.test_k = np.asarray(test_s_h.shape)[0]
config.max_demo_len = np.asarray(s_h.shape)[1]
config.h = np.asarray(s_h.shape)[2]
config.w = np.asarray(s_h.shape)[3]
config.depth = np.asarray(s_h.shape)[4]
config.action_space = np.asarray(a_h.shape)[2]
config.per_dim = np.asarray(per.shape)[2]
if config.dataset_type == 'karel':
config.dsl_type = dataset_train.dsl_type
config.env_type = dataset_train.env_type
config.vizdoom_pos_keys = []
config.vizdoom_max_init_pos_len = -1
config.perception_type = ''
config.level = None
elif config.dataset_type == 'vizdoom':
config.dsl_type = 'vizdoom_default' # vizdoom has 1 dsl type for now
config.env_type = 'vizdoom_default' # vizdoom has 1 env type
config.vizdoom_pos_keys = dataset_train.vizdoom_pos_keys
config.vizdoom_max_init_pos_len = dataset_train.vizdoom_max_init_pos_len
config.perception_type = dataset_train.perception_type
config.level = dataset_train.level
return config, dataset_test, dsl
def main():
import argparse
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--dataset_type', type=str, default='karel',
choices=['karel', 'vizdoom'])
parser.add_argument('--filtering', type=str, default='none',
choices=['none', 'static','dynamic'])
parser.add_argument('--dataset_path', type=str,
default='datasets/karel_dataset',
help='the path to your dataset')
parser.add_argument('--num_k', type=int, default=10,
help='the number of seen demonstrations')
parser.add_argument('--num_lstm_cell_units', type=int, default=512)
parser.add_argument('--checkpoint', type=str, default='',help='the path to a trained checkpoint')
# Ablation of solver
parser.add_argument('--ablation_loop_heuristic', action='store_true', default=False,
help='set to True to ablate loop heuristic on Karel')
parser.add_argument('--ablation_while_heuristic', action='store_true', default=False,
help='set to True to ablate while heuristic on Karel')
config, dataset_test, dsl = generate_config(parser)
solver = Solver(config, dataset_test, dsl=dsl)
solver.solve()
if __name__ == '__main__':
main() | 0.560734 | 0.106784 |
from os import getenv, path
import numpy as np
import torch
import torchvision
class ImageGenerationDataset(object):
def _transform(self, x):
x = np.array(x, dtype=np.int64).ravel()
y = (x.astype(np.float32) / 255) * 2 - 1
return torch.from_numpy(x[:-1]), torch.from_numpy(y[1:])
@property
def sequence_length(self):
return len(self[0][0])
class Imagenet64(ImageGenerationDataset):
def __init__(self, root, train):
if train:
self._data = np.load(
path.join(root, "imagenet-64x64", "train_data.npy"),
mmap_mode="r"
)
else:
self._data = np.load(
path.join(root, "imagenet-64x64", "val_data.npy"),
mmap_mode="r"
)
def __getitem__(self, i):
return self._transform(self._data[i])
def __len__(self):
return len(self._data)
class TorchvisionDataset(ImageGenerationDataset):
def __init__(self, dset, root, train):
self._dataset = dset(root, download=True, train=train)
def __getitem__(self, i):
return self._transform(self._dataset[i][0])
def __len__(self):
return len(self._dataset)
def add_dataset_arguments(parser):
parser.add_argument(
"--dataset",
choices=["mnist", "cifar10", "imagenet64"],
default="mnist",
help="Choose the dataset"
)
parser.add_argument(
"--dataset_directory",
default=getenv("DATASET_DIRECTORY", "./data"),
help="Where to find or place the datasets"
)
def get_dataset(args):
root = args.dataset_directory
dsets = {
"mnist": torchvision.datasets.MNIST,
"cifar10": torchvision.datasets.CIFAR10
}
if args.dataset == "imagenet64":
return (
Imagenet64(root, True),
Imagenet64(root, False)
)
if args.dataset in dsets:
return (
TorchvisionDataset(dsets[args.dataset], root, True),
TorchvisionDataset(dsets[args.dataset], root, False)
)
else:
raise RuntimeError("Dataset {} not available".format(args.dataset)) | image-generation/image_datasets.py |
from os import getenv, path
import numpy as np
import torch
import torchvision
class ImageGenerationDataset(object):
def _transform(self, x):
x = np.array(x, dtype=np.int64).ravel()
y = (x.astype(np.float32) / 255) * 2 - 1
return torch.from_numpy(x[:-1]), torch.from_numpy(y[1:])
@property
def sequence_length(self):
return len(self[0][0])
class Imagenet64(ImageGenerationDataset):
def __init__(self, root, train):
if train:
self._data = np.load(
path.join(root, "imagenet-64x64", "train_data.npy"),
mmap_mode="r"
)
else:
self._data = np.load(
path.join(root, "imagenet-64x64", "val_data.npy"),
mmap_mode="r"
)
def __getitem__(self, i):
return self._transform(self._data[i])
def __len__(self):
return len(self._data)
class TorchvisionDataset(ImageGenerationDataset):
def __init__(self, dset, root, train):
self._dataset = dset(root, download=True, train=train)
def __getitem__(self, i):
return self._transform(self._dataset[i][0])
def __len__(self):
return len(self._dataset)
def add_dataset_arguments(parser):
parser.add_argument(
"--dataset",
choices=["mnist", "cifar10", "imagenet64"],
default="mnist",
help="Choose the dataset"
)
parser.add_argument(
"--dataset_directory",
default=getenv("DATASET_DIRECTORY", "./data"),
help="Where to find or place the datasets"
)
def get_dataset(args):
root = args.dataset_directory
dsets = {
"mnist": torchvision.datasets.MNIST,
"cifar10": torchvision.datasets.CIFAR10
}
if args.dataset == "imagenet64":
return (
Imagenet64(root, True),
Imagenet64(root, False)
)
if args.dataset in dsets:
return (
TorchvisionDataset(dsets[args.dataset], root, True),
TorchvisionDataset(dsets[args.dataset], root, False)
)
else:
raise RuntimeError("Dataset {} not available".format(args.dataset)) | 0.757615 | 0.323955 |
import sys
import os
import tempfile
import unittest
import sd3.cfa.graph
import sd3.cfa.shortestpath
class TestGraph(unittest.TestCase):
def test_edge(self):
node_src_id = 1
node_dest_id = 2
node_src = sd3.cfa.graph.Node(node_src_id)
node_dest = sd3.cfa.graph.Node(node_dest_id)
# Test creation
edge = sd3.cfa.graph.Edge(node_src, node_dest)
self.assertIs(edge.get_src(), node_src)
self.assertIs(edge.get_dest(), node_dest)
# Test attributes
first_attr = "a"
second_attr = "b"
self.assertFalse(edge.has_attr(first_attr))
self.assertFalse(edge.has_attr(second_attr))
edge.add_attr(first_attr)
self.assertTrue(edge.has_attr(first_attr))
self.assertFalse(edge.has_attr(second_attr))
edge.add_attr(second_attr)
self.assertTrue(edge.has_attr(first_attr))
self.assertTrue(edge.has_attr(second_attr))
def test_node(self):
node_src_id = 1
node_dest_id = 2
node_data = "Fake data"
node_src = sd3.cfa.graph.Node(node_src_id)
# Test initial state
self.assertEqual(node_src.get_id(), node_src_id)
self.assertIsNone(node_src.get_data())
self.assertListEqual(node_src.get_successors(), [])
self.assertListEqual(node_src.get_predecessors(), [])
# Test data
node_src.set_data(node_data)
self.assertEqual(node_src.get_data(), node_data)
# Test successor add
node_dest = sd3.cfa.graph.Node(node_dest_id)
self.assertFalse(node_src.has_successor(node_dest))
node_src.add_successor(node_dest)
self.assertTrue(node_src.has_successor(node_dest))
self.assertFalse(node_dest.has_successor(node_src))
successors = node_src.get_successors()
self.assertEqual(len(successors), 1)
self.assertIs(successors[0].get_src(), node_src)
self.assertIs(successors[0].get_dest(), node_dest)
predecessors = node_dest.get_predecessors()
self.assertEqual(len(predecessors), 1)
self.assertIs(predecessors[0].get_src(), node_src)
self.assertIs(predecessors[0].get_dest(), node_dest)
def test_node_loop(self):
node = sd3.cfa.graph.Node(1)
node.add_successor(node)
successors = node.get_successors()
predecessors = node.get_predecessors()
self.assertEqual(len(successors), 1)
self.assertEqual(len(predecessors), 1)
self.assertIs(successors[0].get_src(), node)
self.assertIs(successors[0].get_dest(), node)
self.assertIs(predecessors[0].get_src(), node)
self.assertIs(predecessors[0].get_dest(), node)
def test_graph(self):
node_id = 1
second_node_id = 2
third_node_id = 2
# New graph is empty
graph = sd3.cfa.graph.Graph()
self.assertIsNone(graph.get_entry())
self.assertEqual(len(graph.get_node_list()), 0)
# Forbid entry set on an unknown node
node = sd3.cfa.graph.Node(node_id)
self.assertRaises(KeyError, graph.set_entry, node)
self.assertRaises(KeyError, graph.set_exit, node)
# Test node add
(node, is_new) = graph.add_node(node_id)
self.assertEqual(node.get_id(), node_id)
self.assertTrue(is_new)
self.assertTrue(graph.has_node(node_id))
# Test node add with the same id
(existing_node, is_new) = graph.add_node(node_id)
self.assertEqual(existing_node.get_id(), node_id)
self.assertFalse(is_new)
# Test that node can be fetched
self.assertIs(graph.get_node(node_id), node)
# Test set entry with an invalid node
fake_node = sd3.cfa.graph.Node(node_id)
self.assertRaises(KeyError, graph.set_entry, fake_node)
self.assertRaises(KeyError, graph.set_exit, fake_node)
# Test valid entry set
graph.set_entry(node)
self.assertIs(graph.get_entry(), node)
# Test valid exit set
graph.set_exit(node)
self.assertIs(graph.get_exit(), node)
# Test node list
(second_node, _) = graph.add_node(second_node_id)
self.assertListEqual(graph.get_node_list(), [node, second_node])
# Test node iterator
nodes_id = set()
nodes_it = set()
for it_node_id, it_node in graph.get_node_it():
nodes_id.add(it_node_id)
nodes_it.add(it_node)
self.assertSetEqual(nodes_id, {node_id, second_node_id})
self.assertSetEqual(nodes_it, {node, second_node})
# Test edges
(third_node, _) = graph.add_node(third_node_id)
node.add_successor(second_node)
second_node.add_successor(third_node)
edge_set = set()
for edge in list(graph.get_edges_it()):
edge_set.add((edge.get_src(), edge.get_dest()))
expected_edge_set = set()
expected_edge_set.add((node, second_node))
expected_edge_set.add((second_node, third_node))
self.assertSetEqual(edge_set, expected_edge_set)
# Run draw to test that the function doesn't crash
node_id_str = lambda n: "%s" % n.get_id()
(fd, path) = tempfile.mkstemp(suffix=".png")
graph.draw(path, node_id_str=node_id_str)
os.close(fd)
os.remove(path)
def test_shortest_path(self):
graph = sd3.cfa.graph.Graph()
# Create nodes
(node1, _) = graph.add_node(1)
(node2, _) = graph.add_node(2)
(node3, _) = graph.add_node(3)
(node4, _) = graph.add_node(4)
(node5, _) = graph.add_node(5)
(node6, _) = graph.add_node(6)
(node7, _) = graph.add_node(7)
# Create edges
node1.add_successor(node2)
node1.add_successor(node5)
node1.add_successor(node7)
node2.add_successor(node3)
node2.add_successor(node4)
node3.add_successor(node4)
node3.add_successor(node7)
node5.add_successor(node3)
node5.add_successor(node5)
node5.add_successor(node6)
node7.add_successor(node6)
# Check some paths
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node3), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node4), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node6), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node7), 1)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node2, node4), 1)
self.assertEqual(
sd3.cfa.shortestpath.get(graph, node2, node1),
sd3.cfa.shortestpath.INFINITE)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node5, node6), 1) | tests/cfa/test_graph.py | import sys
import os
import tempfile
import unittest
import sd3.cfa.graph
import sd3.cfa.shortestpath
class TestGraph(unittest.TestCase):
def test_edge(self):
node_src_id = 1
node_dest_id = 2
node_src = sd3.cfa.graph.Node(node_src_id)
node_dest = sd3.cfa.graph.Node(node_dest_id)
# Test creation
edge = sd3.cfa.graph.Edge(node_src, node_dest)
self.assertIs(edge.get_src(), node_src)
self.assertIs(edge.get_dest(), node_dest)
# Test attributes
first_attr = "a"
second_attr = "b"
self.assertFalse(edge.has_attr(first_attr))
self.assertFalse(edge.has_attr(second_attr))
edge.add_attr(first_attr)
self.assertTrue(edge.has_attr(first_attr))
self.assertFalse(edge.has_attr(second_attr))
edge.add_attr(second_attr)
self.assertTrue(edge.has_attr(first_attr))
self.assertTrue(edge.has_attr(second_attr))
def test_node(self):
node_src_id = 1
node_dest_id = 2
node_data = "Fake data"
node_src = sd3.cfa.graph.Node(node_src_id)
# Test initial state
self.assertEqual(node_src.get_id(), node_src_id)
self.assertIsNone(node_src.get_data())
self.assertListEqual(node_src.get_successors(), [])
self.assertListEqual(node_src.get_predecessors(), [])
# Test data
node_src.set_data(node_data)
self.assertEqual(node_src.get_data(), node_data)
# Test successor add
node_dest = sd3.cfa.graph.Node(node_dest_id)
self.assertFalse(node_src.has_successor(node_dest))
node_src.add_successor(node_dest)
self.assertTrue(node_src.has_successor(node_dest))
self.assertFalse(node_dest.has_successor(node_src))
successors = node_src.get_successors()
self.assertEqual(len(successors), 1)
self.assertIs(successors[0].get_src(), node_src)
self.assertIs(successors[0].get_dest(), node_dest)
predecessors = node_dest.get_predecessors()
self.assertEqual(len(predecessors), 1)
self.assertIs(predecessors[0].get_src(), node_src)
self.assertIs(predecessors[0].get_dest(), node_dest)
def test_node_loop(self):
node = sd3.cfa.graph.Node(1)
node.add_successor(node)
successors = node.get_successors()
predecessors = node.get_predecessors()
self.assertEqual(len(successors), 1)
self.assertEqual(len(predecessors), 1)
self.assertIs(successors[0].get_src(), node)
self.assertIs(successors[0].get_dest(), node)
self.assertIs(predecessors[0].get_src(), node)
self.assertIs(predecessors[0].get_dest(), node)
def test_graph(self):
node_id = 1
second_node_id = 2
third_node_id = 2
# New graph is empty
graph = sd3.cfa.graph.Graph()
self.assertIsNone(graph.get_entry())
self.assertEqual(len(graph.get_node_list()), 0)
# Forbid entry set on an unknown node
node = sd3.cfa.graph.Node(node_id)
self.assertRaises(KeyError, graph.set_entry, node)
self.assertRaises(KeyError, graph.set_exit, node)
# Test node add
(node, is_new) = graph.add_node(node_id)
self.assertEqual(node.get_id(), node_id)
self.assertTrue(is_new)
self.assertTrue(graph.has_node(node_id))
# Test node add with the same id
(existing_node, is_new) = graph.add_node(node_id)
self.assertEqual(existing_node.get_id(), node_id)
self.assertFalse(is_new)
# Test that node can be fetched
self.assertIs(graph.get_node(node_id), node)
# Test set entry with an invalid node
fake_node = sd3.cfa.graph.Node(node_id)
self.assertRaises(KeyError, graph.set_entry, fake_node)
self.assertRaises(KeyError, graph.set_exit, fake_node)
# Test valid entry set
graph.set_entry(node)
self.assertIs(graph.get_entry(), node)
# Test valid exit set
graph.set_exit(node)
self.assertIs(graph.get_exit(), node)
# Test node list
(second_node, _) = graph.add_node(second_node_id)
self.assertListEqual(graph.get_node_list(), [node, second_node])
# Test node iterator
nodes_id = set()
nodes_it = set()
for it_node_id, it_node in graph.get_node_it():
nodes_id.add(it_node_id)
nodes_it.add(it_node)
self.assertSetEqual(nodes_id, {node_id, second_node_id})
self.assertSetEqual(nodes_it, {node, second_node})
# Test edges
(third_node, _) = graph.add_node(third_node_id)
node.add_successor(second_node)
second_node.add_successor(third_node)
edge_set = set()
for edge in list(graph.get_edges_it()):
edge_set.add((edge.get_src(), edge.get_dest()))
expected_edge_set = set()
expected_edge_set.add((node, second_node))
expected_edge_set.add((second_node, third_node))
self.assertSetEqual(edge_set, expected_edge_set)
# Run draw to test that the function doesn't crash
node_id_str = lambda n: "%s" % n.get_id()
(fd, path) = tempfile.mkstemp(suffix=".png")
graph.draw(path, node_id_str=node_id_str)
os.close(fd)
os.remove(path)
def test_shortest_path(self):
graph = sd3.cfa.graph.Graph()
# Create nodes
(node1, _) = graph.add_node(1)
(node2, _) = graph.add_node(2)
(node3, _) = graph.add_node(3)
(node4, _) = graph.add_node(4)
(node5, _) = graph.add_node(5)
(node6, _) = graph.add_node(6)
(node7, _) = graph.add_node(7)
# Create edges
node1.add_successor(node2)
node1.add_successor(node5)
node1.add_successor(node7)
node2.add_successor(node3)
node2.add_successor(node4)
node3.add_successor(node4)
node3.add_successor(node7)
node5.add_successor(node3)
node5.add_successor(node5)
node5.add_successor(node6)
node7.add_successor(node6)
# Check some paths
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node3), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node4), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node6), 2)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node1, node7), 1)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node2, node4), 1)
self.assertEqual(
sd3.cfa.shortestpath.get(graph, node2, node1),
sd3.cfa.shortestpath.INFINITE)
self.assertEqual(sd3.cfa.shortestpath.get(graph, node5, node6), 1) | 0.449393 | 0.529811 |
import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import defaultdict
from config import config
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.makedirs(os.path.join(path, 'scripts'), exist_ok=True)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def get_location(s, key):
d = defaultdict(list)
for k,va in [(v,i) for i,v in enumerate(s)]:
d[k].append(va)
return d[key]
def list_substract(list1, list2):
list1 = [item for item in list1 if item not in set(list2)]
return list1
def check_cand(cand, operations):
cand = np.reshape(cand, [-1, config.edges])
offset, cell_cand = 0, cand[0]
for j in range(4):
edges = cell_cand[offset:offset+j+2]
edges_ops = operations[offset:offset+j+2]
none_idxs = get_location(edges, 0)
if len(none_idxs) < j:
general_idxs = list_substract(range(j+2), none_idxs)
num = min(j-len(none_idxs), len(general_idxs))
general_idxs = np.random.choice(general_idxs, size=num, replace=False, p=None)
for k in general_idxs:
edges[k] = 0
elif len(none_idxs) > j:
none_idxs = np.random.choice(none_idxs, size=len(none_idxs)-j, replace=False, p=None)
for k in none_idxs:
if len(edges_ops[k]) > 1:
l = np.random.randint(len(edges_ops[k])-1)
edges[k] = edges_ops[k][l+1]
offset += len(edges)
return tuple(cell_cand) | darts_search_space/imagenet/rlnas/evolution_search/utils.py | import os
import numpy as np
import torch
import shutil
import torchvision.transforms as transforms
from torch.autograd import Variable
from collections import defaultdict
from config import config
class AvgrageMeter(object):
def __init__(self):
self.reset()
def reset(self):
self.avg = 0
self.sum = 0
self.cnt = 0
def update(self, val, n=1):
self.sum += val * n
self.cnt += n
self.avg = self.sum / self.cnt
def accuracy(output, target, topk=(1,)):
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0/batch_size))
return res
class Cutout(object):
def __init__(self, length):
self.length = length
def __call__(self, img):
h, w = img.size(1), img.size(2)
mask = np.ones((h, w), np.float32)
y = np.random.randint(h)
x = np.random.randint(w)
y1 = np.clip(y - self.length // 2, 0, h)
y2 = np.clip(y + self.length // 2, 0, h)
x1 = np.clip(x - self.length // 2, 0, w)
x2 = np.clip(x + self.length // 2, 0, w)
mask[y1: y2, x1: x2] = 0.
mask = torch.from_numpy(mask)
mask = mask.expand_as(img)
img *= mask
return img
def _data_transforms_cifar10(args):
CIFAR_MEAN = [0.49139968, 0.48215827, 0.44653124]
CIFAR_STD = [0.24703233, 0.24348505, 0.26158768]
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
if args.cutout:
train_transform.transforms.append(Cutout(args.cutout_length))
valid_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(CIFAR_MEAN, CIFAR_STD),
])
return train_transform, valid_transform
def count_parameters_in_MB(model):
return np.sum(np.prod(v.size()) for name, v in model.named_parameters() if "auxiliary" not in name)/1e6
def save_checkpoint(state, is_best, save):
filename = os.path.join(save, 'checkpoint.pth.tar')
torch.save(state, filename)
if is_best:
best_filename = os.path.join(save, 'model_best.pth.tar')
shutil.copyfile(filename, best_filename)
def save(model, model_path):
torch.save(model.state_dict(), model_path)
def load(model, model_path):
model.load_state_dict(torch.load(model_path))
def drop_path(x, drop_prob):
if drop_prob > 0.:
keep_prob = 1.-drop_prob
mask = Variable(torch.cuda.FloatTensor(x.size(0), 1, 1, 1).bernoulli_(keep_prob))
x.div_(keep_prob)
x.mul_(mask)
return x
def create_exp_dir(path, scripts_to_save=None):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
print('Experiment dir : {}'.format(path))
if scripts_to_save is not None:
os.makedirs(os.path.join(path, 'scripts'), exist_ok=True)
for script in scripts_to_save:
dst_file = os.path.join(path, 'scripts', os.path.basename(script))
shutil.copyfile(script, dst_file)
def get_location(s, key):
d = defaultdict(list)
for k,va in [(v,i) for i,v in enumerate(s)]:
d[k].append(va)
return d[key]
def list_substract(list1, list2):
list1 = [item for item in list1 if item not in set(list2)]
return list1
def check_cand(cand, operations):
cand = np.reshape(cand, [-1, config.edges])
offset, cell_cand = 0, cand[0]
for j in range(4):
edges = cell_cand[offset:offset+j+2]
edges_ops = operations[offset:offset+j+2]
none_idxs = get_location(edges, 0)
if len(none_idxs) < j:
general_idxs = list_substract(range(j+2), none_idxs)
num = min(j-len(none_idxs), len(general_idxs))
general_idxs = np.random.choice(general_idxs, size=num, replace=False, p=None)
for k in general_idxs:
edges[k] = 0
elif len(none_idxs) > j:
none_idxs = np.random.choice(none_idxs, size=len(none_idxs)-j, replace=False, p=None)
for k in none_idxs:
if len(edges_ops[k]) > 1:
l = np.random.randint(len(edges_ops[k])-1)
edges[k] = edges_ops[k][l+1]
offset += len(edges)
return tuple(cell_cand) | 0.599485 | 0.37051 |
def objective(trial):
global layer_depth
global first_hidden_dim
global second_hidden_dim
global third_hidden_dim
global latent_dim
global beta
global kappa
global batch_size
global learning_rate
#leaky_reluの時に使用する
global leaky_alpha
#ドロップアウトを用いる場合
global dr_rate
leaky_alpha = trial.suggest_loguniform('lealy_alpha',1e-5,1e-2)
dr_rate = trial.suggest_loguniform('dr_rate',1e-5,1e-2)
#layer_depth = trial.suggest_int("layer_num",1,3)
layer_depth = trial.suggest_int("layer_num",3,3)#3層に固定
#レイヤー数に応じたノードの数をとるハイパーパラメータ変数を作る
if layer_depth == 1:
first_hidden_dim = 0
second_hidden_dim = 0
elif layer_depth == 2:
first_hidden_dim = int(trial.suggest_int("first_layer_dim",100,2000))
second_hidden_dim = 0
elif layer_depth == 3:
first_hidden_dim = int(trial.suggest_int("first_layer_dim",100,2000))
second_hidden_dim = int(trial.suggest_int("second_layer_dim",100,first_hidden_dim))
third_hidden_dim = int(trial.suggest_int("third_layer_dim",100,second_hidden_dim))
latent_dim = trial.suggest_int("latent_dim",25,250)
#潜在空間の次元が隠れそうより大きくならないように条件分岐
if layer_depth ==2:
if latent_dim > first_hidden_dim:
latent_dim = trial.suggest_int("latent_dim",25,first_hidden_dim)
else:
pass
elif layer_depth == 3:
if latent_dim > second_hidden_dim:
latent_dim = trial.suggest_int("latent_dim",25,second_hidden_dim)
else:
pass
else:
pass
beta = trial.suggest_discrete_uniform("beta",0.0,0.5,0.01)
beta = K.variable(beta)
kappa = trial.suggest_discrete_uniform("kappa",0.0,0.5,0.01)
batch_size=trial.suggest_categorical('batch_size', [64,128,256, 512, 1024])
learning_rate = trial.suggest_loguniform('learning_rate',1e-5,1e-2)
cvae_model = CVAE(original_dim=original_dim,label_dim=label_dim,
first_hidden_dim = first_hidden_dim,
second_hidden_dim = second_hidden_dim,
third_hidden_dim = third_hidden_dim,
latent_dim = latent_dim,
batch_size = batch_size,
epochs = epoch,
learning_rate = learning_rate,
kappa = kappa,
beta = beta,
layer_depth = layer_depth,
leaky_alpha = leaky_alpha,
dr_rate = dr_rate)
cvae_model.build_encoder_layer()
cvae_model.build_decoder_layer()
cvae_model.compile_cvae()
cvae_model.train_cvae()
loss=cvae_model.get_cvae_loss()
return loss | CVAE/hyper_parameter_optimizer/optuna_object.py | def objective(trial):
global layer_depth
global first_hidden_dim
global second_hidden_dim
global third_hidden_dim
global latent_dim
global beta
global kappa
global batch_size
global learning_rate
#leaky_reluの時に使用する
global leaky_alpha
#ドロップアウトを用いる場合
global dr_rate
leaky_alpha = trial.suggest_loguniform('lealy_alpha',1e-5,1e-2)
dr_rate = trial.suggest_loguniform('dr_rate',1e-5,1e-2)
#layer_depth = trial.suggest_int("layer_num",1,3)
layer_depth = trial.suggest_int("layer_num",3,3)#3層に固定
#レイヤー数に応じたノードの数をとるハイパーパラメータ変数を作る
if layer_depth == 1:
first_hidden_dim = 0
second_hidden_dim = 0
elif layer_depth == 2:
first_hidden_dim = int(trial.suggest_int("first_layer_dim",100,2000))
second_hidden_dim = 0
elif layer_depth == 3:
first_hidden_dim = int(trial.suggest_int("first_layer_dim",100,2000))
second_hidden_dim = int(trial.suggest_int("second_layer_dim",100,first_hidden_dim))
third_hidden_dim = int(trial.suggest_int("third_layer_dim",100,second_hidden_dim))
latent_dim = trial.suggest_int("latent_dim",25,250)
#潜在空間の次元が隠れそうより大きくならないように条件分岐
if layer_depth ==2:
if latent_dim > first_hidden_dim:
latent_dim = trial.suggest_int("latent_dim",25,first_hidden_dim)
else:
pass
elif layer_depth == 3:
if latent_dim > second_hidden_dim:
latent_dim = trial.suggest_int("latent_dim",25,second_hidden_dim)
else:
pass
else:
pass
beta = trial.suggest_discrete_uniform("beta",0.0,0.5,0.01)
beta = K.variable(beta)
kappa = trial.suggest_discrete_uniform("kappa",0.0,0.5,0.01)
batch_size=trial.suggest_categorical('batch_size', [64,128,256, 512, 1024])
learning_rate = trial.suggest_loguniform('learning_rate',1e-5,1e-2)
cvae_model = CVAE(original_dim=original_dim,label_dim=label_dim,
first_hidden_dim = first_hidden_dim,
second_hidden_dim = second_hidden_dim,
third_hidden_dim = third_hidden_dim,
latent_dim = latent_dim,
batch_size = batch_size,
epochs = epoch,
learning_rate = learning_rate,
kappa = kappa,
beta = beta,
layer_depth = layer_depth,
leaky_alpha = leaky_alpha,
dr_rate = dr_rate)
cvae_model.build_encoder_layer()
cvae_model.build_decoder_layer()
cvae_model.compile_cvae()
cvae_model.train_cvae()
loss=cvae_model.get_cvae_loss()
return loss | 0.423696 | 0.437944 |
import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from winnow.feature_extraction import SimilarityModel
import cv2
import yaml
def create_directory(directories,root_dir,alias):
for r in directories:
try:
os.makedirs(os.path.abspath(os.path.join(root_dir,alias,r)))
except Exception as e:
print(e)
def scan_videos(path,wildcard):
return glob(os.path.join(path,wildcard))
def get_original_fn_from_artifact(fp_list,sep):
"""Get original video filename using our encoding convention for generating additional training
artifacts
Arguments:
fp_list {List} -- List of filepaths to training artifacts (such as [path/filename.mp4_vgg_features.npy])
sep {String} -- Artifact separator (eg. 'vgg_features.npy')
Returns:
[List] -- List of the original video filenames
"""
return [os.path.basename(x).split(sep)[0] for x in fp_list]
def create_video_list(videos_to_be_processed,fp):
with open(fp, 'w', encoding="utf-8") as f:
for item in videos_to_be_processed:
f.write("%s\n" % item)
return os.path.abspath(fp)
def filter_results(thr,distances,indices):
results = []
results_distances = []
msk = distances < thr
for i,r in enumerate(msk):
results.append(indices[i,r])
results_distances.append(distances[i,r])
return results,results_distances
def uniq(row):
return ''.join([str(x) for x in sorted([row['query'],row['match']])])
def extract_additional_info(x):
v = np.load(x)
frames = np.load(x.replace('_vgg_features','_vgg_frames'))
grays = np.array([cv2.cvtColor(x,cv2.COLOR_BGR2GRAY) for x in frames])
grays = np.array([np.mean(x) for x in grays])
grays_avg = np.mean(grays,axis=0)
grays_std = np.std(grays,axis=0)
try:
grays_max = np.max(grays)
except:
grays_max = 0
shape = v.shape
intra_sum = np.sum(v,axis=1)
mean_act = np.mean(intra_sum)
try:
max_dif = np.max(intra_sum) - np.min(intra_sum)
except:
max_dif = 0
std_sum = np.std(intra_sum)
return shape[0],mean_act,std_sum,max_dif,grays_avg,grays_std,grays_max | winnow/utils/utils.py | import numpy as np
from glob import glob
import matplotlib.pyplot as plt
import numpy as np
import os
import pandas as pd
from sklearn.neighbors import NearestNeighbors
from winnow.feature_extraction import SimilarityModel
import cv2
import yaml
def create_directory(directories,root_dir,alias):
for r in directories:
try:
os.makedirs(os.path.abspath(os.path.join(root_dir,alias,r)))
except Exception as e:
print(e)
def scan_videos(path,wildcard):
return glob(os.path.join(path,wildcard))
def get_original_fn_from_artifact(fp_list,sep):
"""Get original video filename using our encoding convention for generating additional training
artifacts
Arguments:
fp_list {List} -- List of filepaths to training artifacts (such as [path/filename.mp4_vgg_features.npy])
sep {String} -- Artifact separator (eg. 'vgg_features.npy')
Returns:
[List] -- List of the original video filenames
"""
return [os.path.basename(x).split(sep)[0] for x in fp_list]
def create_video_list(videos_to_be_processed,fp):
with open(fp, 'w', encoding="utf-8") as f:
for item in videos_to_be_processed:
f.write("%s\n" % item)
return os.path.abspath(fp)
def filter_results(thr,distances,indices):
results = []
results_distances = []
msk = distances < thr
for i,r in enumerate(msk):
results.append(indices[i,r])
results_distances.append(distances[i,r])
return results,results_distances
def uniq(row):
return ''.join([str(x) for x in sorted([row['query'],row['match']])])
def extract_additional_info(x):
v = np.load(x)
frames = np.load(x.replace('_vgg_features','_vgg_frames'))
grays = np.array([cv2.cvtColor(x,cv2.COLOR_BGR2GRAY) for x in frames])
grays = np.array([np.mean(x) for x in grays])
grays_avg = np.mean(grays,axis=0)
grays_std = np.std(grays,axis=0)
try:
grays_max = np.max(grays)
except:
grays_max = 0
shape = v.shape
intra_sum = np.sum(v,axis=1)
mean_act = np.mean(intra_sum)
try:
max_dif = np.max(intra_sum) - np.min(intra_sum)
except:
max_dif = 0
std_sum = np.std(intra_sum)
return shape[0],mean_act,std_sum,max_dif,grays_avg,grays_std,grays_max | 0.41941 | 0.344829 |
import os
import logging
from keyring.keyring.util import properties
from keyring.keyring.backend import KeyringBackend
from keyring.keyring.errors import (InitError, PasswordDeleteError,
ExceptionRaisedContext)
try:
import secretstorage.exceptions
except ImportError:
pass
log = logging.getLogger(__name__)
class Keyring(KeyringBackend):
"""Secret Service Keyring"""
@properties.ClassProperty
@classmethod
def priority(cls):
with ExceptionRaisedContext() as exc:
secretstorage.__name__
if exc:
raise RuntimeError("SecretService required")
try:
bus = secretstorage.dbus_init()
secretstorage.Collection(bus)
except secretstorage.exceptions.SecretServiceNotAvailableException:
raise RuntimeError("Unable to get initialize SecretService")
if 'DISPLAY' not in os.environ:
raise RuntimeError("SecretService cannot run without a DISPLAY "
"environment variable")
return 5
def get_default_collection(self):
bus = secretstorage.dbus_init()
if hasattr(secretstorage, 'get_default_collection'):
collection = secretstorage.get_default_collection(bus)
else:
collection = secretstorage.Collection(bus)
if collection.is_locked():
if collection.unlock():
raise InitError("Failed to unlock the collection!")
return collection
def get_password(self, service, username):
"""Get password of the username for the service
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.get_secret().decode('utf-8')
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
collection = self.get_default_collection()
attributes = {
"application": "python-keyring",
"service": service,
"username": username
}
label = "Password for '%s' on '%s'" % (username, service)
collection.create_item(label, attributes, password, replace=True)
def delete_password(self, service, username):
"""Delete the stored password (only the first one)
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.delete()
raise PasswordDeleteError("No such password!") | keyring/keyring/backends/SecretService.py | import os
import logging
from keyring.keyring.util import properties
from keyring.keyring.backend import KeyringBackend
from keyring.keyring.errors import (InitError, PasswordDeleteError,
ExceptionRaisedContext)
try:
import secretstorage.exceptions
except ImportError:
pass
log = logging.getLogger(__name__)
class Keyring(KeyringBackend):
"""Secret Service Keyring"""
@properties.ClassProperty
@classmethod
def priority(cls):
with ExceptionRaisedContext() as exc:
secretstorage.__name__
if exc:
raise RuntimeError("SecretService required")
try:
bus = secretstorage.dbus_init()
secretstorage.Collection(bus)
except secretstorage.exceptions.SecretServiceNotAvailableException:
raise RuntimeError("Unable to get initialize SecretService")
if 'DISPLAY' not in os.environ:
raise RuntimeError("SecretService cannot run without a DISPLAY "
"environment variable")
return 5
def get_default_collection(self):
bus = secretstorage.dbus_init()
if hasattr(secretstorage, 'get_default_collection'):
collection = secretstorage.get_default_collection(bus)
else:
collection = secretstorage.Collection(bus)
if collection.is_locked():
if collection.unlock():
raise InitError("Failed to unlock the collection!")
return collection
def get_password(self, service, username):
"""Get password of the username for the service
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.get_secret().decode('utf-8')
def set_password(self, service, username, password):
"""Set password for the username of the service
"""
collection = self.get_default_collection()
attributes = {
"application": "python-keyring",
"service": service,
"username": username
}
label = "Password for '%s' on '%s'" % (username, service)
collection.create_item(label, attributes, password, replace=True)
def delete_password(self, service, username):
"""Delete the stored password (only the first one)
"""
collection = self.get_default_collection()
items = collection.search_items(
{"username": username, "service": service})
for item in items:
return item.delete()
raise PasswordDeleteError("No such password!") | 0.464416 | 0.094678 |
import numpy as np
import math
def calculateSurface(mesh, vertGroups=None, faceMask=None):
"""
Calculate surface area of a mesh. Specify vertGroups or faceMask to
calculate area of a subset of the mesh and filter out other faces.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
l1 = _sideLengthsFromTris(v1)
l2 = _sideLengthsFromTris(v2)
l = np.vstack([l1,l2])
return _surfaceOfTris(l)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
l = _sideLengthsFromTris(v)
return _surfaceOfTris(l)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def calculateVolume(mesh, vertGroups=None, faceMask=None):
"""
Calculate the volume of a mesh.
Mesh is expected to be closed.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
v = np.vstack([v1,v2])
return _signedVolumeFromTris(v)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
return _signedVolumeFromTris(v)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def _sideLengthsFromTris(triVects):
"""
Calculate lengths of the sides of triangles specified by their vectors
in clockwise fashion.
triVects = [ [T1V1, T1V2, T1V3], [T2V1, T2V2, T2V3], ... ]
with Ti a triangle, Vi a triange vector, defined in clockwise fashion
and each vector (TiVi) an array [x, y, z] with vector coordinates
Returns a list [ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ...]
with Ti a triangle (in the same order as in the input), and Li the length of
side i (a float)
"""
v = triVects
s = np.zeros(v.shape, dtype=np.float32)
# Get side vectors
s[:,0] = v[:,1] - v[:,0]
s[:,1] = v[:,2] - v[:,1]
s[:,2] = v[:,0] - v[:,2]
# Calculate lengths of sides
l = s[:,:,0]*s[:,:,0] + s[:,:,1]*s[:,:,1] + s[:,:,2]*s[:,:,2]
l = np.sqrt(l)
return l
def _surfaceOfTris(triSideLengths):
"""
Calculate total surface area of triangles with sides of specified lengths
triSideLengths should be an array of layout
[ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ... ]
with Ti a triangle, and Li the length of the ith side of the triangle
TiLi should be a float.
Returns a float representing the total surface area.
"""
l = triSideLengths
# Heron's formula
o = ( l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] +l[:,1] -l[:,2]) * \
(-l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] -l[:,1] +l[:,2])
o = np.sqrt(o)/4
return np.sum(o)
def _signedVolumeFromTris(triVects):
"""
Calculate volume of a set of triangles by summing signed volumes of
tetrahedrons between those triangles and the origin.
"""
v = triVects
v321 = v[:,2,0] * v[:,1,1] * v[:,0,2]
v231 = v[:,1,0] * v[:,2,1] * v[:,0,2]
v312 = v[:,2,0] * v[:,0,1] * v[:,1,2]
v132 = v[:,0,0] * v[:,2,1] * v[:,1,2]
v213 = v[:,1,0] * v[:,0,1] * v[:,2,2]
v123 = v[:,0,0] * v[:,1,1] * v[:,2,2]
signedVolume = -v321 + v231 + v312 - v132 - v213 + v123
signedVolume /= 6.0
vol = np.sum(signedVolume)
return math.fabs(vol)
def findVertIndex(mesh, vert):
"""
Find the index of specified vertex (as an [x, y, z] array) within mesh.
"""
matches = list(np.where(mesh.coord == vert)[0])
return [idx for idx in set(matches) if matches.count(idx) > 2] | makehuman-master/makehuman/shared/mesh_operations.py | import numpy as np
import math
def calculateSurface(mesh, vertGroups=None, faceMask=None):
"""
Calculate surface area of a mesh. Specify vertGroups or faceMask to
calculate area of a subset of the mesh and filter out other faces.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
l1 = _sideLengthsFromTris(v1)
l2 = _sideLengthsFromTris(v2)
l = np.vstack([l1,l2])
return _surfaceOfTris(l)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
l = _sideLengthsFromTris(v)
return _surfaceOfTris(l)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def calculateVolume(mesh, vertGroups=None, faceMask=None):
"""
Calculate the volume of a mesh.
Mesh is expected to be closed.
"""
if vertGroups is not None:
f_idx = mesh.getFacesForGroups(vertGroups)
fvert = mesh.fvert[f_idx]
elif faceMask is not None:
f_idx = np.argwhere(faceMask)[...,0]
fvert = mesh.fvert[f_idx]
else:
fvert = mesh.fvert
if mesh.vertsPerPrimitive == 4:
# Split quads in triangles (assumes clockwise ordering of verts)
t1 = fvert[:,[0,1,2]]
t2 = fvert[:,[2,3,0]]
v1 = mesh.coord[t1]
v2 = mesh.coord[t2]
v = np.vstack([v1,v2])
return _signedVolumeFromTris(v)
elif mesh.vertsPerPrimitive == 3:
v = mesh.coord[fvert]
return _signedVolumeFromTris(v)
else:
raise RuntimeError("Only supports meshes with triangle or quad primitives.")
def _sideLengthsFromTris(triVects):
"""
Calculate lengths of the sides of triangles specified by their vectors
in clockwise fashion.
triVects = [ [T1V1, T1V2, T1V3], [T2V1, T2V2, T2V3], ... ]
with Ti a triangle, Vi a triange vector, defined in clockwise fashion
and each vector (TiVi) an array [x, y, z] with vector coordinates
Returns a list [ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ...]
with Ti a triangle (in the same order as in the input), and Li the length of
side i (a float)
"""
v = triVects
s = np.zeros(v.shape, dtype=np.float32)
# Get side vectors
s[:,0] = v[:,1] - v[:,0]
s[:,1] = v[:,2] - v[:,1]
s[:,2] = v[:,0] - v[:,2]
# Calculate lengths of sides
l = s[:,:,0]*s[:,:,0] + s[:,:,1]*s[:,:,1] + s[:,:,2]*s[:,:,2]
l = np.sqrt(l)
return l
def _surfaceOfTris(triSideLengths):
"""
Calculate total surface area of triangles with sides of specified lengths
triSideLengths should be an array of layout
[ [T1L1, T1L2, T1L3], [T2L1, T2L2, T2L3], ... ]
with Ti a triangle, and Li the length of the ith side of the triangle
TiLi should be a float.
Returns a float representing the total surface area.
"""
l = triSideLengths
# Heron's formula
o = ( l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] +l[:,1] -l[:,2]) * \
(-l[:,0] +l[:,1] +l[:,2]) * \
( l[:,0] -l[:,1] +l[:,2])
o = np.sqrt(o)/4
return np.sum(o)
def _signedVolumeFromTris(triVects):
"""
Calculate volume of a set of triangles by summing signed volumes of
tetrahedrons between those triangles and the origin.
"""
v = triVects
v321 = v[:,2,0] * v[:,1,1] * v[:,0,2]
v231 = v[:,1,0] * v[:,2,1] * v[:,0,2]
v312 = v[:,2,0] * v[:,0,1] * v[:,1,2]
v132 = v[:,0,0] * v[:,2,1] * v[:,1,2]
v213 = v[:,1,0] * v[:,0,1] * v[:,2,2]
v123 = v[:,0,0] * v[:,1,1] * v[:,2,2]
signedVolume = -v321 + v231 + v312 - v132 - v213 + v123
signedVolume /= 6.0
vol = np.sum(signedVolume)
return math.fabs(vol)
def findVertIndex(mesh, vert):
"""
Find the index of specified vertex (as an [x, y, z] array) within mesh.
"""
matches = list(np.where(mesh.coord == vert)[0])
return [idx for idx in set(matches) if matches.count(idx) > 2] | 0.793026 | 0.753263 |
import htmlvis
import pytest
from mock import Mock, mock_open
@pytest.fixture(autouse=True)
def patch_seqdiag_draw(mocker):
mocker.patch('htmlvis.seqdiag.draw')
@pytest.fixture(autouse=True)
def patch_open(mocker):
try:
mocker.patch('builtins.open', mock_open())
except ImportError:
mocker.patch('__builtin__.open', mock_open())
@pytest.fixture
def successful_transaction():
return htmlvis.Transaction(
client_name='The Client',
server_name='The Server',
request=htmlvis.Request(
body='request body',
elapsed=0.1,
headers={},
method='PUT',
url_path='/kindness'),
response=htmlvis.Response(
body='response body', elapsed=0.2, headers={}, status='200 OK'))
@pytest.fixture
def error_transaction():
return htmlvis.Transaction(
client_name='The Client',
server_name='The Server',
request=htmlvis.Request(
body='request body',
elapsed=1.1,
headers={},
method='GET',
url_path='/rudeness'),
response=htmlvis.Response(
body='response body',
elapsed=1.2,
headers={},
status='404 Not Found'))
class TestRequestProcessingInSaveSeqDiag(object):
def test_creates_a_request_message_from_the_transaction_request(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.category == htmlvis.seqdiag_model.Category.request
def test_the_client_is_the_message_source(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.src == successful_transaction.client_name
def test_the_server_is_the_message_destination(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.dst == successful_transaction.server_name
def test_the_url_is_passed_as_additional_data(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.data[
'url'] == successful_transaction.request.url_path
def test_the_body_is_passed_as_note(self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.note == successful_transaction.request.body
@pytest.mark.parametrize(
'transaction, expected_method',
[(successful_transaction, 'PUT'), (error_transaction, 'GET')])
def test_includes_the_http_method_as_message_data(self, transaction,
expected_method):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.data['method'] == expected_method
@pytest.mark.parametrize('transaction, expected_text',
[(successful_transaction, 'PUT /kindness'),
(error_transaction, 'GET /rudeness')])
def test_combines_http_method_and_url_path_as_message_text(
self, transaction, expected_text):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.text == expected_text
class TestResponseProcessingInSaveSeqDiag(object):
def test_creates_a_response_message_from_the_transaction_response(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.category == htmlvis.seqdiag_model.Category.response
def test_the_server_is_the_message_source(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.src == successful_transaction.server_name
def test_the_client_is_the_message_destination(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.dst == successful_transaction.client_name
def test_the_status_is_passed_as_additional_data(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.data[
'status'] == successful_transaction.response.status
@pytest.mark.parametrize('transaction, expected_text',
[(successful_transaction, '200 OK'),
(error_transaction, '404 Not Found')])
def test_uses_the_status_as_message_text(self, transaction, expected_text):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.text == expected_text
def test_the_body_is_passed_as_note(self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.note == successful_transaction.response.body
class TestTransactionProcessingInSaveSeqDiag(object):
def test_passes_both_request_and_response_to_the_sequence_diagram_generator(
self, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 2
def test_passes_multiple_transactions_to_the_sequence_diagram_generator(
self, successful_transaction, error_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction, error_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 4
def test_converts_transactions_to_messages_ordered_by_elapsed_time(
self, successful_transaction, error_transaction):
successful_transaction.request.elapsed = 0.01
successful_transaction.response.elapsed = 0.05
error_transaction.request.elapsed = 0.02
error_transaction.response.elapsed = 0.03
sniffer = Mock()
sniffer.transactions = [
successful_transaction,
error_transaction,
]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
msg_time = [msg.when for msg in messages]
assert msg_time == [0.01, 0.02, 0.03, 0.05]
def test_gets_transactions_from_all_sniffers(self, successful_transaction,
error_transaction):
sniffer_a = Mock()
sniffer_a.transactions = [successful_transaction]
sniffer_b = Mock()
sniffer_b.transactions = [error_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer_a, sniffer_b])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 4
def test_opens_the_output_file_path_for_writing(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
open.assert_called_once_with('/fake/path', 'w')
def test_writes_the_html_sequence_diagram_to_the_output_file(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
open.return_value.write.assert_called_once_with(
htmlvis.seqdiag.draw.return_value) | tests/test_htmlvis.py | import htmlvis
import pytest
from mock import Mock, mock_open
@pytest.fixture(autouse=True)
def patch_seqdiag_draw(mocker):
mocker.patch('htmlvis.seqdiag.draw')
@pytest.fixture(autouse=True)
def patch_open(mocker):
try:
mocker.patch('builtins.open', mock_open())
except ImportError:
mocker.patch('__builtin__.open', mock_open())
@pytest.fixture
def successful_transaction():
return htmlvis.Transaction(
client_name='The Client',
server_name='The Server',
request=htmlvis.Request(
body='request body',
elapsed=0.1,
headers={},
method='PUT',
url_path='/kindness'),
response=htmlvis.Response(
body='response body', elapsed=0.2, headers={}, status='200 OK'))
@pytest.fixture
def error_transaction():
return htmlvis.Transaction(
client_name='The Client',
server_name='The Server',
request=htmlvis.Request(
body='request body',
elapsed=1.1,
headers={},
method='GET',
url_path='/rudeness'),
response=htmlvis.Response(
body='response body',
elapsed=1.2,
headers={},
status='404 Not Found'))
class TestRequestProcessingInSaveSeqDiag(object):
def test_creates_a_request_message_from_the_transaction_request(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.category == htmlvis.seqdiag_model.Category.request
def test_the_client_is_the_message_source(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.src == successful_transaction.client_name
def test_the_server_is_the_message_destination(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.dst == successful_transaction.server_name
def test_the_url_is_passed_as_additional_data(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.data[
'url'] == successful_transaction.request.url_path
def test_the_body_is_passed_as_note(self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.note == successful_transaction.request.body
@pytest.mark.parametrize(
'transaction, expected_method',
[(successful_transaction, 'PUT'), (error_transaction, 'GET')])
def test_includes_the_http_method_as_message_data(self, transaction,
expected_method):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.data['method'] == expected_method
@pytest.mark.parametrize('transaction, expected_text',
[(successful_transaction, 'PUT /kindness'),
(error_transaction, 'GET /rudeness')])
def test_combines_http_method_and_url_path_as_message_text(
self, transaction, expected_text):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
request_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][0]
assert request_msg.text == expected_text
class TestResponseProcessingInSaveSeqDiag(object):
def test_creates_a_response_message_from_the_transaction_response(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.category == htmlvis.seqdiag_model.Category.response
def test_the_server_is_the_message_source(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.src == successful_transaction.server_name
def test_the_client_is_the_message_destination(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.dst == successful_transaction.client_name
def test_the_status_is_passed_as_additional_data(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.data[
'status'] == successful_transaction.response.status
@pytest.mark.parametrize('transaction, expected_text',
[(successful_transaction, '200 OK'),
(error_transaction, '404 Not Found')])
def test_uses_the_status_as_message_text(self, transaction, expected_text):
sniffer = Mock()
sniffer.transactions = [transaction()]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.text == expected_text
def test_the_body_is_passed_as_note(self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
response_msg = htmlvis.seqdiag.draw.call_args[1]['messages'][1]
assert response_msg.note == successful_transaction.response.body
class TestTransactionProcessingInSaveSeqDiag(object):
def test_passes_both_request_and_response_to_the_sequence_diagram_generator(
self, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 2
def test_passes_multiple_transactions_to_the_sequence_diagram_generator(
self, successful_transaction, error_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction, error_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 4
def test_converts_transactions_to_messages_ordered_by_elapsed_time(
self, successful_transaction, error_transaction):
successful_transaction.request.elapsed = 0.01
successful_transaction.response.elapsed = 0.05
error_transaction.request.elapsed = 0.02
error_transaction.response.elapsed = 0.03
sniffer = Mock()
sniffer.transactions = [
successful_transaction,
error_transaction,
]
htmlvis.save_seq_diag('/fake/path', [sniffer])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
msg_time = [msg.when for msg in messages]
assert msg_time == [0.01, 0.02, 0.03, 0.05]
def test_gets_transactions_from_all_sniffers(self, successful_transaction,
error_transaction):
sniffer_a = Mock()
sniffer_a.transactions = [successful_transaction]
sniffer_b = Mock()
sniffer_b.transactions = [error_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer_a, sniffer_b])
messages = htmlvis.seqdiag.draw.call_args[1]['messages']
assert len(messages) == 4
def test_opens_the_output_file_path_for_writing(self, mocker,
successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
open.assert_called_once_with('/fake/path', 'w')
def test_writes_the_html_sequence_diagram_to_the_output_file(
self, mocker, successful_transaction):
sniffer = Mock()
sniffer.transactions = [successful_transaction]
htmlvis.save_seq_diag('/fake/path', [sniffer])
open.return_value.write.assert_called_once_with(
htmlvis.seqdiag.draw.return_value) | 0.453988 | 0.215433 |
from aws_adfs import account_aliases_fetcher
def _aws_account(account_alias, account_no):
return u'<div class="saml-account-name">Account: {} ({})</div>'.format(account_alias, account_no)
def _account_page_response(accounts):
response = type('', (), {})()
response.request = type('', (), {})()
response.request.headers = {}
response.status_code = 'irrelevant'
response.headers = {}
response.text = u'''
<html>
<body>
<div>
<form>
<fieldset>
{}
</fieldset>
</form>
</div>
</body>
</html>
'''.format('\n'.join([account for account in accounts]))
return response
class TestAccountAliasesFetcher:
def test_returns_empty_account_dictionary_when_no_account_are_named(self):
# given user with no aws accounts
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {}
def test_returns_one_account_when_one_account_is_listed(self):
# given user with no aws accounts
account_no = '123'
account_alias = 'single'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([_aws_account(account_alias, account_no)])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias}
def test_returns_two_accounts_when_two_accounts_are_listed(self):
# given user with no aws accounts
account_no = '1'
account_alias = 'single'
second_account_no = '2'
second_account_alias = 'bingle'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([
_aws_account(account_alias, account_no),
_aws_account(second_account_alias, second_account_no),
])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias, second_account_no: second_account_alias}
def setup_method(self, method):
self.authenticated_session = type('', (), {})()
self.irrelevant_auth_method = {}
self.irrelevant_username = 'irrelevant username'
self.irrelevant_password = '<PASSWORD>'
self.authenticated_saml_response = 'irrelevant saml response'
self.irrelevant_config = type('', (), {})()
self.irrelevant_config.ssl_verification = True | test/test_account_aliases_fetcher.py | from aws_adfs import account_aliases_fetcher
def _aws_account(account_alias, account_no):
return u'<div class="saml-account-name">Account: {} ({})</div>'.format(account_alias, account_no)
def _account_page_response(accounts):
response = type('', (), {})()
response.request = type('', (), {})()
response.request.headers = {}
response.status_code = 'irrelevant'
response.headers = {}
response.text = u'''
<html>
<body>
<div>
<form>
<fieldset>
{}
</fieldset>
</form>
</div>
</body>
</html>
'''.format('\n'.join([account for account in accounts]))
return response
class TestAccountAliasesFetcher:
def test_returns_empty_account_dictionary_when_no_account_are_named(self):
# given user with no aws accounts
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {}
def test_returns_one_account_when_one_account_is_listed(self):
# given user with no aws accounts
account_no = '123'
account_alias = 'single'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([_aws_account(account_alias, account_no)])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias}
def test_returns_two_accounts_when_two_accounts_are_listed(self):
# given user with no aws accounts
account_no = '1'
account_alias = 'single'
second_account_no = '2'
second_account_alias = 'bingle'
self.authenticated_session.post = lambda *args, **kwargs: _account_page_response([
_aws_account(account_alias, account_no),
_aws_account(second_account_alias, second_account_no),
])
# when gets account aliases via fetcher
accounts = account_aliases_fetcher.account_aliases(self.authenticated_session,
self.irrelevant_username,
self.irrelevant_password,
self.irrelevant_auth_method,
self.authenticated_saml_response,
self.irrelevant_config)
# then returns no accounts
assert accounts == {account_no: account_alias, second_account_no: second_account_alias}
def setup_method(self, method):
self.authenticated_session = type('', (), {})()
self.irrelevant_auth_method = {}
self.irrelevant_username = 'irrelevant username'
self.irrelevant_password = '<PASSWORD>'
self.authenticated_saml_response = 'irrelevant saml response'
self.irrelevant_config = type('', (), {})()
self.irrelevant_config.ssl_verification = True | 0.661595 | 0.110112 |
from . import sys, librosa, np
def voice_frequency(
audio_path: str,
sr: int = 1000,
sr_scalar: int = 22,
deno: int = 30,
freqdiff: int = 13,
freqmin: int = 65,
) -> float:
y, _ = librosa.load(audio_path, sr=sr)
y_fourier = librosa.stft(y, n_fft=1024)
data = librosa.amplitude_to_db(abs(y_fourier))
more_sr = False
minval = max(np.amax(data, axis=0)) / deno
data = np.clip(data, minval, None)
data -= minval
if np.cumsum(data).any() == 0:
y, sr = librosa.load(audio_path, sr=sr * sr_scalar)
y_fourier = librosa.stft(y, n_fft=1024)
data = librosa.amplitude_to_db(abs(y_fourier))
more_sr = True
minval = max(np.amax(data, axis=0)) * 2
data += minval
data = np.clip(data, 0, None)
for i in range(data.shape[1]):
col = data[:, i]
for i in range(len(col) - 1):
if col[i] > 0:
cumsum = 0
for j in range(len(col) - 1):
if col[i + j] <= 0:
col[i + j // 2] = int(cumsum / j)
break
cumsum += col[i + j]
col[i + j] = 0
data = data.tolist()
decibels = []
freqs = []
for i in range(len(data)):
if sum(data[i]) > 0:
decibels.append(sum(data[i]))
if more_sr:
freqs.append(i * sr_scalar)
else:
freqs.append(i)
if len(decibels) == 0:
return 0
counter = 0
freq_db_over_zero = [[(freqs[0], decibels[0])]]
for i in range(1, len(freqs)):
if abs(freqs[i] - freq_db_over_zero[counter][-1][0]) >= freqdiff:
counter += 1
freq_db_over_zero.append([])
freq_db_over_zero[counter].append((freqs[i], decibels[i]))
wght_avges = []
for fds in freq_db_over_zero:
avgs = [fd[0] for fd in fds]
wghs = [fd[1] for fd in fds]
wght_avge = 0
for i in range(len(avgs)):
wght_avge += avgs[i] * wghs[i]
wght_avge /= sum(wghs)
wght_avges.append(wght_avge)
for i in range(len(wght_avges)):
if sr / 2 > wght_avges[i] > freqmin:
return wght_avges[i]
return sr / 2
if __name__ == "__main__":
voice_frequency(*sys.argv[1:]) | backend/utils/voice_frequency2.py | from . import sys, librosa, np
def voice_frequency(
audio_path: str,
sr: int = 1000,
sr_scalar: int = 22,
deno: int = 30,
freqdiff: int = 13,
freqmin: int = 65,
) -> float:
y, _ = librosa.load(audio_path, sr=sr)
y_fourier = librosa.stft(y, n_fft=1024)
data = librosa.amplitude_to_db(abs(y_fourier))
more_sr = False
minval = max(np.amax(data, axis=0)) / deno
data = np.clip(data, minval, None)
data -= minval
if np.cumsum(data).any() == 0:
y, sr = librosa.load(audio_path, sr=sr * sr_scalar)
y_fourier = librosa.stft(y, n_fft=1024)
data = librosa.amplitude_to_db(abs(y_fourier))
more_sr = True
minval = max(np.amax(data, axis=0)) * 2
data += minval
data = np.clip(data, 0, None)
for i in range(data.shape[1]):
col = data[:, i]
for i in range(len(col) - 1):
if col[i] > 0:
cumsum = 0
for j in range(len(col) - 1):
if col[i + j] <= 0:
col[i + j // 2] = int(cumsum / j)
break
cumsum += col[i + j]
col[i + j] = 0
data = data.tolist()
decibels = []
freqs = []
for i in range(len(data)):
if sum(data[i]) > 0:
decibels.append(sum(data[i]))
if more_sr:
freqs.append(i * sr_scalar)
else:
freqs.append(i)
if len(decibels) == 0:
return 0
counter = 0
freq_db_over_zero = [[(freqs[0], decibels[0])]]
for i in range(1, len(freqs)):
if abs(freqs[i] - freq_db_over_zero[counter][-1][0]) >= freqdiff:
counter += 1
freq_db_over_zero.append([])
freq_db_over_zero[counter].append((freqs[i], decibels[i]))
wght_avges = []
for fds in freq_db_over_zero:
avgs = [fd[0] for fd in fds]
wghs = [fd[1] for fd in fds]
wght_avge = 0
for i in range(len(avgs)):
wght_avge += avgs[i] * wghs[i]
wght_avge /= sum(wghs)
wght_avges.append(wght_avge)
for i in range(len(wght_avges)):
if sr / 2 > wght_avges[i] > freqmin:
return wght_avges[i]
return sr / 2
if __name__ == "__main__":
voice_frequency(*sys.argv[1:]) | 0.242026 | 0.383237 |
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the api/api_proto/features.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>aQ6qvKIVnTWnVqr+vhOpVlpOWUBZ+nyy+rIU3P99XiPaosstUqN'
'xpM8Wc8nUv96vJZU7qYjRkPMqXq71hLrX1EJzrnacDa4DtJP4kEtqC33d/66GtkTlEU8oFQTPq'
'6u41cQ0jjZW4w3JVveURaWcbE/jihWVkWb2lLZIBSJFYXMX1apDjbPJtzfNVTW18yojTU/poyX'
'ldopbVZqpValXiMxO0otumlFX778bEdHFkFYizPQbVrOtjSBwSdkKEBqUefJn1YD05XmNnSjKa'
'/jDarIettZrZRJjXgxDFShnE8ry59Xs8v/R0OlpxrgcJxdtWapUK207TAG/aYqaW9vlxqPSdd4'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'skrtMWOwuhb3T0xyx/bJ5XJ5T4/XXeYI1lgnztMnHLmzz5h7t467Zgu7ddx7qrFbxy5TDs1ljw'
'Hfz6X7JMPP5SmzBuAypxK+Edsa9co9OV/IHeiS6qK9qQaeGD+t/J612jHK5/Zug50DMOAXlPJG'
'SmvEK/TEWJsb3TvRhSqq3o7xxjro6097DLu5Q13TXcxPqeTuscc67Cu29zCWyz8tiwveVNluw4'
'<KEY>nX6WrP5GuIcj9zfC<KEY>bTQIWNsqs7fHtU74piF<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'wXj0CP00rKCKnlVKmZGAFeoJjBjwOxgJwPceyJ1QoUjAhPy95uuqR4WRgKTeSFYogO3NXRIqCN'
'SLv8DFIGOfucpJBlKRYaGgWN/IFaGgWN/Ln+JikNRvPuIkBOmPHBYK047MCAXF+hebXAyIpMsN'
'9Uq63IJQLOlyC2JOl1vICg64xUJQbMAtFoJiA26xEBQbcIuFraBl3uSkMBSzIgNCQTErdUwoKG'
'ZdmOJiESuYMu9xUgSKpSJDQkGx1PBFoaBY6voKF4tawbQ5zUlRKJaOJIWCYmnriFBQLH3uZS4W'
's4KD5m1OikGxwUhKKCg2OHhKKCg2eLnAxeJWMGPe4aQ4FMtEMkJBsUz2vFBQLHO1yMWUFRwya5'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'xzNZHrmDlIypqk7Jh55ihBm8R2jIUwie1YIikUsB1LpYlt0ApdDFzaR1ns1hdjR4htENmOs7JB'
'Unac+QSJ6zgrGySu46xskLiOs7Ig6ycDz+3DFb3CJ2OnqUDYCl0PvLhPAfQH12NnSMwwivkCix'
'kmMV9gMcMk5gssZpjEfIHFDJOYL7CYESs0GZjahyu6k0moEywQtUI3A4V9CqAjucnWjKKYsyxm'
'lMScZTGjJOYsixklMWdZzCiJOctixqzQ7cDCPk0H/dBtbjox5DpvHqCmE6OmM2/e1k0nRmznzR'
'6hoNx8b1YoYDs/Mkps41ZoKbCyj7Lox5ZY2TiyXTazxDZObJfNJfLYSIYxMSYUlFuOp4QCtsuZ'
'IWKrrNC9wOvd2F7WbNEP3ovliK1Ctq+xtorYvmbeI6+FZAQTe4SCcq+xtorYvgbaahRI+hT7Fi'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'BE2SoB+Y79OcAckwJsaEgnIfxHuFAkE/SOKEC1FgnPxFA8afPkqDyg0D+YHAQo/A5JiQlDueFD'
'KIJIxBGgoGv88a5rDqp0SYHEaA/EVjkHPDtA/TBQs5f9aIp4UMIjmUZSyg/pZhHuHEYIhIJWQE'
'yYQICQMbkKmDQlLZw3lGgqKf85BCmhQkHBw+ZyRSQhpIpgUJx9XPeUgg/982zAzrFwb9gPycId'
'BhnR4R0kAyOiBkEMn0oDoG1T1gRX7JCHyha31DC0lAfQ8AxC8ZsQNUxrIiXzICX+5a5pIuY0GZ'
'LxmxQySzBY0k9KuwXiCZLWwlESC/ZBwmqSxsJ5geE9JAEloKk0Ekoakg/5QV+ZoR+MZ+MqcA4m'
'<KEY>'
'<KEY>'
'<KEY>
'FN0zpPsPReIM6f5D0T1Duv9QdM+Q7j8U3TOo+4eie4Z0/9BDQt0/FN0zpPuHonuGdP9Q+lAGfc'
'SPpD1myEcA+aEh0NjHfiTtIUM+4kfSHjPkI34k7XHIivyeEfiH+9lxCCB+z4C5C5bJWpE/NAL/'
'aD87ZqHMHxqxYyRzFu34RwaMoihzltowkH9oaMNmyZLwoUdIA8nerJBBJEdGGQsSPxIfmUVLRo'
'D8I+MA50ZbfiSmzZItPzISaSGDSLKPzKItfyK2zJItgfzIGObcaMufiC2zZMufiC2zZMufiC2H'
'rchPjcA/3s+WwwDxUyN2nMrkrMifGoE/368/5qDMn4o/yqEtfyYy58iWQP4p+6Mc+YOficw5su'
'XPROYc2fJnBg9dOVTozwwzx1hoSyB/ZlicG8euP/OwDMoeHxQyiGR2mHQZsSI/N3BXq/v0BHUZ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'oO2Jky/jiZMvNG21vg5K86l4bzFKNJj2mOqDhgYJUIsVMGmDT8564WvB/ZgvqygfUVtDKorBNF'
'<KEY>'
'StwpVdvORK18d+sx1vNDJKWeibCSKvho6zEzwJ/QNpQX1WCNqPh26Z1VCVhAu8bgA4YoNBESoy'
'9abHBN5B8qtVQCVV9tO43HWC9v4w9fkye6i3YASclcRZrARu2v/yY06iA2al8DaJ7+iqHibm+w'
'Eio6v7C6fG9xJhmwelV8Zn7ltiYNqwea1vyypkyklpaLmgpi1pWlGSZDSE5PLM9oMozk5MLCnC'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'gfBhEH44cKSb8M+T8GEUYjjch2zDJHzOpA2esJYvpzeWw/poJtfTyxkhacRMcpJBVEIoyDjS18'
'8ZgRg1+zkJi43qLYuwPn4Z7RXWIPgBNyPuzB5wM+KBywE3Y8gKHnRZ42bqQZc1HrEcdFmDmQ65'
'GXET9ZCbEQ9VDrkZaQNdMuK+p+1mxGMU280YtYKHXRlxv/OwKyMenBx2ZYxZwbyZ5iTcosy7xf'
'CoJM+7IjABOtF9c/yKtyV/AtrjimzJnzIzuVl7eWF64eR6Y629Sf1chpfzly98YvzUVXu6XjvR'
'wk5i0+zELkw3sedIX9FfsV9iE6PN/dAp84S7nx9GNjHf7v6p+IBvd/8UNDF9RABJp3kbz6CGet'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>sTdfc1GQ4TUXBSv+GpsASr0cmN7HBOg1Xub6C6EJ'
'<KEY>ZIIJ82VdfyEywQSbIEQmmIgPCgVsJ7LDjILHJSx8iEwwaU7kOCeaYNJFQRNMcncKkQ'
'kmQfjrhAImmDJH8+ftGzAINpwNp+HU1nFgA2VgzlCq2hTM1hyznXOb5+y18xfHL13mXhwim02Z'
'kxmGRptNuWxRwqn4kFDAdop3omFZOBuY28dm6EBnY/3eKVSBOyydQoUK5uyAHDWFMTHmO4YqxJ'
'O+Y6gCdNgXCQWSbpm5/EXb2QaWYzh5qK8119sNmGdUK285dh5H+dq5c+dedt4pbe/oOU2e9Q2T'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'n29umZtaeH2+udVxvrnFvk2fb26xe4+j8BVufHESvmJuZTgnCl9xUVD4Cje+OAlf4cYXx8b3gE'
'0Qp8b3wKxYnBO77gMXBRk+YBPEqfE9GBCNgHjLPMpJMFUASgkFIG8lRAUcLd+yDgmF5fJH3C2q'
'DxPqme4MPeP9o8OdaTq4ufPy0S8ZKioxjOdUnPYQfGHCe+wzxCiPhD//H7xKkP/IUAnfZQjrmN'
'w00rL0747F1qkoRqNUe4u3I+g3qlIql1mVrlsmMcqDqpxQ/fi7vNqqbMOSDsYGErC32Eefl+Wr'
'<KEY>'
'<KEY>'
'<KEY>'
'IuyybQy7GsOSSLFzRk1oz6lmPZWK9vOZZlE9BybNhFQUMOm1lZ1KEhhzuWY8NxQUFDDrsouNR3'
'F3VoyJw5LCg4wPOuAC/HckoWdWi7nLuoC+KuwAHfcmzEzMmiDg050rEcG4m7aViQQy0MXO6Puh'
'<KEY>mComC<KEY>DLAcTQ64rvq/1tT+7tXnpw/qXnVe9sLOP2qUdqi7aV/9FD+e'
'/7apYhO8k4+3VPSRgeelrV0HA+TbNuT44RNyNuA0/Pd59vCHPZKPruZccDf49fFD1heQzcLwaY'
'Bs/Q9CCae1yt49WgwDtVADIAU/WuyNw928cVxn4ps4O1ulpr6JE92t4yImkY47/CvfUvGJbadW'
'3sZo7M5DFWP3ocoZZdWcR6v1xmrZqbZKq3p/XA9i/ZCy0JjG77SLjjvhdUDSefSIFoMPlJj/Oz'
'DIT7RapfUt4ouHMC7l7Xz3eB8LZSuHh0FVxzdwujQOLs3Ku5pPqEi/8ewB9zYRkY6CeNzkb7Tp'
'<KEY>'
<KEY>'
'<KEY>/'
'nO8tyGW/Rlg16b8NpNM6t238Hz2l3Rn9G6otxDP+o9ia4eIiH5eALkm4qR6Xv0BMj3Ga0/pKJg'
'/eZOaTvbS6aPVJpLQGG1wJyD6yXbp6sFvuh6wTrH5I1qaTPbr+/9An0DyPz3DKVIKt3l/rcdnH'
'vGZfrPuJ5+ItfpYkLP4GL+rVJhfR3t47Xw7jPkcbroDI7UJ5OvnbhHr3T7mU9hO+bsXVu7N2c/'
'Dc153X/vdo/ckfV1uc5Jx8I6e3T3dU45Sy7Gq/yrab2g+jpuGzahSwSfct2w13/dsGnNqME1sN'
'NbMBGHRui7SxrvepfU4gILtYJ7qXRSpehrpbbpB1FdQQYku4fxisqWS7XNKmL4ZCKgoa5Ag1Jm'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'woqZf3GLOa0i5EOb2czuMuRlp9Hj6Bz5r8SUojZKMzBoTf4Qk8T4qCxNZRINPrEBPUO3U5mFdr'
'jEru+meC7xokqwS1yFxT5fW99zykBucaJchu7TJ0UadA2SQhP2nlPrUvq2JDQR8nget/BT3WMC'
'MwvTl6Hpu2WZbeSpxfukOHN/TvV5Dp3Yd3fqPa5TR94vqgFfSWYe61q43y3s6t3n+hvNOf4Uj9'
'MjHof1HvCVZd5PzId8xfvd4sz9Cnu75up61Sk1wFcGu0wqtK9rTmE2a4KHEc/zk+Q9XV12cs3v'
'9VH2myqzG4IV6O2KkupAYRWgAjoGDpKkrytG/5pv0EBBplW6szyL0b/PwMoQbjX2+0cf7GDJru'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'A3DDjTx+oUT9ZwqqUWStxuYsamkqA1feg+xhE1emI1ZvPGRFMHo/mW9m40mfI23XuiJ2TTvTd6'
'RDbdBwLZbnvFz3mb7gMcsUCb7t4WdUBvUVu+TXeLD7P0pruVSPs23XGL2t10T5nHZCc9hJQUwz'
'tsqYTl23NPpWzfnnvqyFFvzz3NN+H0nnvaTAkmbhWnzaRvzz09MOTbc0/nRrw990F35x63igfN'
'9Khvz33QPUVAyw2qpG/PfdDduQ/hTXMxC24VZ8xB2bkPRTBRUHCrOKPELLhVnHHNEsaL54ISBp'
'QhMzPMOcMRTBQrYUjEkGtcvJo/pPf/MfRuNHDs6XVKoXej4ZQXenegI/TuQEfo3QF/6N1BM8VJ'
'<KEY>'
<KEY>'
's4FPGLn/aeguzzt96Irqtepju9mutKgmsNNzdClFlYLHcXfzMPoCI0+gC9t3wWNQhMZ6u9Ggp4'
'LA1W04DbvZarTXWxSr4m0DsjvjoFP0gRx5WmrWa3Zprd5uif+gEGnxfKXttcpmu95mL/JImG6V'
'HoKvdHe0SertehOwt0q1TQcFfEr0CbaOs7EB9UBOmC6Y2dwbbJjSY2SMeqMk6L9L4PIq1dZZcM'
'DAZr3dbNW3tbAUmkN+sfIQytQVSGnLutGnT0c84QXzrBwMYZDAhY54wgtuJCA6pQuZIfXbhpxg'
'XTLt3FeNDjFLds15pF2uNjEOK4/Ag2sN6uKPxUXnJ5rNyiaMO/kxhbJXWh4SrK3XnbNNZ6fUID'
'9PMTegPpvUhViqvOucnbPP0r9LeVc3HZx3QY6rdHCe/1jtUlxeQqDgvIOH1Kwcq10xh3LXfPUp'
'<KEY>'
'<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mjcl1hPb3GxHTOos89UxqbNcHxSTWuiISS2YsznOafhCFHVMaiHuj0kt+GNSb7mymDre0B+Tes'
'tFMX3xhjom9ZYrS9CLNwxSC3PjDYPUwl5xI1uDvnjDILWwV1xZoIXN8bF8kFrYnPmKyILzmzme'
'3wSphc0piePFFjbHx/JBFPq2i4It7LY5l+WcYUoUFGxht10UbGG3XRR6TGOQUSL6pQ1BwRY279'
'<KEY>T<KEY>sAUXBVvYQlw4YAtb4BCBIEbbL3IwFxAhpOTl'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mCgo+FbF3aiEpfcByl0rLaHgrwdK+0QzYU95nUc7CgV/g+f+OhT8DfN1LXyIFlZvcP3pUPA3Em'
'<KEY>'
'gR5eV6MSUI7OZpV9RgiJ+65G6Hnum6sSHx6MYKJohJ7nvqsRep77oNExig8PO4HPGt2c+PhzXo'
'S4E+v1IsQ32HHqCPEN09G1pCPENzoixDfcQG005YYbqI1hqG6gtqGfa5FAbTTlZke492ZcArXR'
'lJtsBAr33nKj1U0dv5rhnKYvflWHe2+50epovS12VmEkKjw0h8mUFXNLpEZTVviSSphMWekRDm'
'<KEY>LAvfjVMa9S33Feg'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'eZKTwB0DJcV6AaSZkBaM3riZyQsFIM1jJxikDx/5GWJR+vQLQILZRy8AyXUJ9MYt97oEeuMW+/'
'Sw2W8F27xVBASgtM2WNI7+CCZKy8eHgNpK+k8/oLSHRxglaQUfmjajJAHlodke5ZzJMCYKCj4O'
'9FBJ008CysMDhxhlwAo+clEGAOWR+VAMOBDGREEZAJRHLsoAoDxyUSwr+I6ZZxQLUN4xHwmKFc'
'ZEQbEA5R0lclqA8s6hw4ySsoKPzTFGSQHKY/MdqYdUGBMFJQUoj5V0oRSgPD4l78KlreC75ilG'
'SQPKu+ZjaZ3pMCYKShpQ3lXS79OA8u7xk4wyaAU/Dc1FowwCyqfNd6W3DYYxUVAGAeXTSuQcBJ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'2nVMb9kCZ9dm9TP+K9rHV8tNiubNgUjXluttSko6iTJ/TZ3YlT52x7kWJu9dZGqVqtP9IbY4r2'
'r2pOE7ddeGdO77njTlIFZrN2fq3+jlPO8+465afZ7067sVNvOueUXajZt5YW5sfsUqfgeCKxg4'
'cStZa+pl6ym5VtupxO2WCCjO+S0O2pCNjh84ZcYcJ3Sb5g8LydLlABefCokEEkT+hKxCtUoV82'
'YADXWPjGCpBfME7JpaoIpceFpOwqJWQQycwQY4HP+BV5RUjfpPoVaRB0kwrIhNzWQsa/Im960V'
'0qIPkVoQhSXzR4tIjQ5OSLHhK+DvZFDwlfB/uikTosJJU9eoyRQvhalnmaE0OaFCS89PYlIzEk'
'JD2tlT0mZBDJk6cYSb+kdZYTcaX5qx4SzFKAdJHC9M5W9qSQ9M7WmTFGgrxflnebIrTa/LKHFK'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'Bu3cF0BsiTpxmpzwr9hgHzTZ2IE5rf8JD6Iki6SDCjATItLROmNECeOctI/VboN9Gla28Ak5oI'
'kL9hCDRMazBdoGFeA2QiJ2QQyQOHGCuJ77SBfhoLpjYRIH/TkBpKRihdvFSSnnU7KBrC9AZI0P'
'AYXa6MfNsI/M7TXk9M6OuVoW8bsUHij/crQ79twAQN+dMFywiQ3zZ0F6VnWzE9KaSB5ICbGkQy'
'N8JY9FYb9w59zfI7YoYo+cjviD+ie5ZApo4KGUSSe0cUfeR3DZ4eRclHftdDQs/wXQ8J2X7XSJ'
'0QMojk6TOMBNT3PJnQR37PQ0If+T3pZ1Hykd+THhslH/k9TyYo+n1PppAmBQl95Pc9JPSR35ce'
'GyUf+X1PJvCRPzDMY5yIPvIHHhL6yB9IS4ySj/yB9Ngo+cgfGEeOUo3HrMiPjcA/2O8tRHREPz'
'<KEY>
'<KEY>'
'<KEY>'
'wB95WCa9lcfvidKlVSBTabKLsiJ//LQ36biO0bH/sRFLEn98JDj0J/K+Hr0SHAHyj/lNOHonGN'
'OVkAaS/L4evRQMJL+vh08Fh34qb9XRW8ERIP+E39ej14IxPSYkZee36ui9YCD5fb0EvZX3Z/u9'
'L5fQb+VpXRL8Vp6eeyW8t/K0LonOt/IS8lZeSkh6Kw/mXsAfulLkLwwMgnmqhwyjCn9hhIl/GE'
'9QQz9nW4XpGBPJsJAmvYUX57z0uJ2b19Ck5AUjAunmBeovDTPBiVj0L/nZVCQpNa44L2jxT/lt'
'xTDdnEMyKqSJpEpwXnAnf2WY/ZyInuivPJHwNPCvjN4+zgu2+2f8Nh9SBpFxIU0ke3o5L7SZf8'
'4jAVIGkSJ+xESyr9+9efeTvNrnMp3VvyuyEHflp+ttqA59xaTjVTSDb4zk80rdqNZLrT3ymL48'
'<KEY>'
'PDeSbfU6n1+vbuuM3J3rtsflqnLRqvnd6stLbaaxRFtVmvlmqbXlXt4DW2pltj/80wvmsGby5O'
'/tg8eFPjLko86F2nWn2lVn9Uw2tuzVsfHcQ555FA21B/3kehZkcC1vjPevTycL1etSfbGErRtM'
'/aGupE0y6XWiVYhbachj4At/XDYKojPu3Cc1wAVoPrsN7cOyzt6dFiOyzE2TUtxHmFf4mtXMHV'
'<KEY>'
'PL3vtkG3VcHuM6e71eK1ewUBMLAY7Tugoi4X+ndwnWpOW5L9Buu91sgeb0ohpFsqzVH2ISW0zZ'
'tXqrsu7wSZIb8ODjWCvvEgf4rVdLlW18Fa2LEHgC79lChAAdy+11x5NDeYJ8LDmURAaW6+ttPA'
'orSSWdx4djIKVhQ0txGpVStemZmioIEpXtl95Vat6pUEl/IIy/bdXqXhrZvdJqKnq+jqDqDTdG'
'hWI4WnXbqZXhK8UsghDbGKOibQKtk6+30cNzSgImN1qPsJlwC7KbO846tiAoVcGG1cC2U9OtqN'
'<KEY>'
'<KEY>'
'<KEY>'
'y4WplbmJor24UlxcWJqxUbPpwtLU3ETh9sz0OeAPPO2ZOzPzy/bS7MTcXKeiysZw2CJK71fTnp'
'wBKScm52aQFek5XSjOTC2jQt6vKTAeCDg3puylxZmpAvwCe8yAOhPFe2MMuoTBvqDVxJw9PXF7'
'<KEY>pZXJ<KEY>CQ6Y'
'nlCWINGGAuSIffkytLBTJcYX55plhcWVwuLMyfglq+C5YBKSeg7DRZeGEetcW2MrNQvIewaAeq'
'gTH77uwMfC+iUclaE2iGJbDa1LI/GzAEI4JKnp72/MzNucLNmfmpGUxeQJi7haWZU1BhhSXMUC'
'DG0AaA6QppjRUFcin929d0x6g+7cINe2L6TgEl59zQApYK3FzIbFOzbPNzavw/G7YMYbRFCD0F'
'PCzGpp2s1WtneWvxlE1D1zmMZcZIYyTIIUNP3WhX9W6ks73mlMvoaVyQpjia+<KEY>nHH'
'<KEY>'
'<KEY>'
'<KEY>'
'vm/qdx83diljrQ01hU90Sjz0eCylbImHvmymcilC1Uxcm+HkXwdJXzbHJRoYD6AvdzzEeTnu/5'
'tJlwcs5eho0quBF4zcvb312cDZ5/7qeJPULtoYHHJ3SOI0r5tWziJQYtGhjA6uvC5heDq48nrH'
'6yDXO14HuQ5rLkdH/00GZroqU8EZ8P7KeBNlTxl3g13iBydZGYofnHaVIRYdyuiYwmlz0h9TOM'
'3K6JjC6ZhE7aEy06DMpo4ruxW43bWltZ9Rm5V91cGz/Fvc0ig0bc5tae0n9dHxanPmLTcmLYwl'
'/PFqc3EJnEF95rilYawQvibXvXIujT9T5fDio0tLw5iCIldOSP/dIl/lXBrvUCbEf8uo6Ia5UC'
'xRVCiMJeLK0SEwy1w5sJZ8LfDGUyvnWbRZ2VcdjG54jSuHQk5e76icXfroOJTXzdfcWJMwlvDH'
'obwel1NE1Od1qJy6fupvLeAYufW99VmD5dz+2riLPk+X+60Gkuju72/ApJVcozwbuBYbUAfl2c'
'CyOZAbIHxk1qGVfg6wbK65T/6FMX9EKIAqR3uEAq3K/UmqpagVehDY7lpLuhfsr5dvpdqlC2Hk'
'xwOuJXrGr+rWEp8x+vXRb/tVzQf+t/2qHW/7VTve9qtyF4pZoQb+aaMuXYiChZ+hmtw1dRdtcL'
'hscBeK6T+OJF2IWHQoE+M/mNTwv/XX4i6k3/prcRfSb/21vCeL/hdqVccN')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
FeaturesServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'api/api_proto/features.proto']['descriptor'],
'service_descriptor': _INDEX[u'api/api_proto/features.proto']['services'][u'Features'],
} | appengine/monorail/api/api_proto/features_prpc_pb2.py |
import base64
import zlib
from google.protobuf import descriptor_pb2
# Includes description of the api/api_proto/features.proto and all of its transitive
# dependencies. Includes source code info.
FILE_DESCRIPTOR_SET = descriptor_pb2.FileDescriptorSet()
FILE_DESCRIPTOR_SET.ParseFromString(zlib.decompress(base64.b64decode(
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>aQ6qvKIVnTWnVqr+vhOpVlpOWUBZ+nyy+rIU3P99XiPaosstUqN'
'xpM8Wc8nUv96vJZU7qYjRkPMqXq71hLrX1EJzrnacDa4DtJP4kEtqC33d/66GtkTlEU8oFQTPq'
'6u41cQ0jjZW4w3JVveURaWcbE/jihWVkWb2lLZIBSJFYXMX1apDjbPJtzfNVTW18yojTU/poyX'
'ldopbVZqpValXiMxO0otumlFX778bEdHFkFYizPQbVrOtjSBwSdkKEBqUefJn1YD05XmNnSjKa'
'/jDarIettZrZRJjXgxDFShnE8ry59Xs8v/R0OlpxrgcJxdtWapUK207TAG/aYqaW9vlxqPSdd4'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'skrtMWOwuhb3T0xyx/bJ5XJ5T4/XXeYI1lgnztMnHLmzz5h7t467Zgu7ddx7qrFbxy5TDs1ljw'
'Hfz6X7JMPP5SmzBuAypxK+Edsa9co9OV/IHeiS6qK9qQaeGD+t/J612jHK5/Zug50DMOAXlPJG'
'SmvEK/TEWJsb3TvRhSqq3o7xxjro6097DLu5Q13TXcxPqeTuscc67Cu29zCWyz8tiwveVNluw4'
'<KEY>nX6WrP5GuIcj9zfC<KEY>bTQIWNsqs7fHtU74piF<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'wXj0CP00rKCKnlVKmZGAFeoJjBjwOxgJwPceyJ1QoUjAhPy95uuqR4WRgKTeSFYogO3NXRIqCN'
'SLv8DFIGOfucpJBlKRYaGgWN/IFaGgWN/Ln+JikNRvPuIkBOmPHBYK047MCAXF+hebXAyIpMsN'
'9Uq63IJQLOlyC2JOl1vICg64xUJQbMAtFoJiA26xEBQbcIuFraBl3uSkMBSzIgNCQTErdUwoKG'
'ZdmOJiESuYMu9xUgSKpSJDQkGx1PBFoaBY6voKF4tawbQ5zUlRKJaOJIWCYmnriFBQLH3uZS4W'
's4KD5m1OikGxwUhKKCg2OHhKKCg2eLnAxeJWMGPe4aQ4FMtEMkJBsUz2vFBQLHO1yMWUFRwya5'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'xzNZHrmDlIypqk7Jh55ihBm8R2jIUwie1YIikUsB1LpYlt0ApdDFzaR1ns1hdjR4htENmOs7JB'
'Unac+QSJ6zgrGySu46xskLiOs7Ig6ycDz+3DFb3CJ2OnqUDYCl0PvLhPAfQH12NnSMwwivkCix'
'kmMV9gMcMk5gssZpjEfIHFDJOYL7CYESs0GZjahyu6k0moEywQtUI3A4V9CqAjucnWjKKYsyxm'
'lMScZTGjJOYsixklMWdZzCiJOctixqzQ7cDCPk0H/dBtbjox5DpvHqCmE6OmM2/e1k0nRmznzR'
'6hoNx8b1YoYDs/Mkps41ZoKbCyj7Lox5ZY2TiyXTazxDZObJfNJfLYSIYxMSYUlFuOp4QCtsuZ'
'IWKrrNC9wOvd2F7WbNEP3ovliK1Ctq+xtorYvmbeI6+FZAQTe4SCcq+xtorYvgbaahRI+hT7Fi'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'BE2SoB+Y79OcAckwJsaEgnIfxHuFAkE/SOKEC1FgnPxFA8afPkqDyg0D+YHAQo/A5JiQlDueFD'
'KIJIxBGgoGv88a5rDqp0SYHEaA/EVjkHPDtA/TBQs5f9aIp4UMIjmUZSyg/pZhHuHEYIhIJWQE'
'yYQICQMbkKmDQlLZw3lGgqKf85BCmhQkHBw+ZyRSQhpIpgUJx9XPeUgg/982zAzrFwb9gPycId'
'BhnR4R0kAyOiBkEMn0oDoG1T1gRX7JCHyha31DC0lAfQ8AxC8ZsQNUxrIiXzICX+5a5pIuY0GZ'
'LxmxQySzBY0k9KuwXiCZLWwlESC/ZBwmqSxsJ5geE9JAEloKk0Ekoakg/5QV+ZoR+MZ+MqcA4m'
'<KEY>'
'<KEY>'
'<KEY>
'FN0zpPsPReIM6f5D0T1Duv9QdM+Q7j8U3TOo+4eie4Z0/9BDQt0/FN0zpPuHonuGdP9Q+lAGfc'
'SPpD1myEcA+aEh0NjHfiTtIUM+4kfSHjPkI34k7XHIivyeEfiH+9lxCCB+z4C5C5bJWpE/NAL/'
'aD87ZqHMHxqxYyRzFu34RwaMoihzltowkH9oaMNmyZLwoUdIA8nerJBBJEdGGQsSPxIfmUVLRo'
'D8I+MA50ZbfiSmzZItPzISaSGDSLKPzKItfyK2zJItgfzIGObcaMufiC2zZMufiC2zZMufiC2H'
'rchPjcA/3s+WwwDxUyN2nMrkrMifGoE/368/5qDMn4o/yqEtfyYy58iWQP4p+6Mc+YOficw5su'
'XPROYc2fJnBg9dOVTozwwzx1hoSyB/ZlicG8euP/OwDMoeHxQyiGR2mHQZsSI/N3BXq/v0BHUZ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'oO2Jky/jiZMvNG21vg5K86l4bzFKNJj2mOqDhgYJUIsVMGmDT8564WvB/ZgvqygfUVtDKorBNF'
'<KEY>'
'StwpVdvORK18d+sx1vNDJKWeibCSKvho6zEzwJ/QNpQX1WCNqPh26Z1VCVhAu8bgA4YoNBESoy'
'9abHBN5B8qtVQCVV9tO43HWC9v4w9fkye6i3YASclcRZrARu2v/yY06iA2al8DaJ7+iqHibm+w'
'Eio6v7C6fG9xJhmwelV8Zn7ltiYNqwea1vyypkyklpaLmgpi1pWlGSZDSE5PLM9oMozk5MLCnC'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'gfBhEH44cKSb8M+T8GEUYjjch2zDJHzOpA2esJYvpzeWw/poJtfTyxkhacRMcpJBVEIoyDjS18'
'8ZgRg1+zkJi43qLYuwPn4Z7RXWIPgBNyPuzB5wM+KBywE3Y8gKHnRZ42bqQZc1HrEcdFmDmQ65'
'GXET9ZCbEQ9VDrkZaQNdMuK+p+1mxGMU280YtYKHXRlxv/OwKyMenBx2ZYxZwbyZ5iTcosy7xf'
'CoJM+7IjABOtF9c/yKtyV/AtrjimzJnzIzuVl7eWF64eR6Y629Sf1chpfzly98YvzUVXu6XjvR'
'wk5i0+zELkw3sedIX9FfsV9iE6PN/dAp84S7nx9GNjHf7v6p+IBvd/8UNDF9RABJp3kbz6CGet'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>sTdfc1GQ4TUXBSv+GpsASr0cmN7HBOg1Xub6C6EJ'
'<KEY>ZIIJ82VdfyEywQSbIEQmmIgPCgVsJ7LDjILHJSx8iEwwaU7kOCeaYNJFQRNMcncKkQ'
'kmQfjrhAImmDJH8+ftGzAINpwNp+HU1nFgA2VgzlCq2hTM1hyznXOb5+y18xfHL13mXhwim02Z'
'kxmGRptNuWxRwqn4kFDAdop3omFZOBuY28dm6EBnY/3eKVSBOyydQoUK5uyAHDWFMTHmO4YqxJ'
'O+Y6gCdNgXCQWSbpm5/EXb2QaWYzh5qK8119sNmGdUK285dh5H+dq5c+dedt4pbe/oOU2e9Q2T'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'n29umZtaeH2+udVxvrnFvk2fb26xe4+j8BVufHESvmJuZTgnCl9xUVD4Cje+OAlf4cYXx8b3gE'
'0Qp8b3wKxYnBO77gMXBRk+YBPEqfE9GBCNgHjLPMpJMFUASgkFIG8lRAUcLd+yDgmF5fJH3C2q'
'DxPqme4MPeP9o8OdaTq4ufPy0S8ZKioxjOdUnPYQfGHCe+wzxCiPhD//H7xKkP/IUAnfZQjrmN'
'w00rL0747F1qkoRqNUe4u3I+g3qlIql1mVrlsmMcqDqpxQ/fi7vNqqbMOSDsYGErC32Eefl+Wr'
'<KEY>'
'<KEY>'
'<KEY>'
'IuyybQy7GsOSSLFzRk1oz6lmPZWK9vOZZlE9BybNhFQUMOm1lZ1KEhhzuWY8NxQUFDDrsouNR3'
'F3VoyJw5LCg4wPOuAC/HckoWdWi7nLuoC+KuwAHfcmzEzMmiDg050rEcG4m7aViQQy0MXO6Puh'
'<KEY>mComC<KEY>DLAcTQ64rvq/1tT+7tXnpw/qXnVe9sLOP2qUdqi7aV/9FD+e'
'/7apYhO8k4+3VPSRgeelrV0HA+TbNuT44RNyNuA0/Pd59vCHPZKPruZccDf49fFD1heQzcLwaY'
'Bs/Q9CCae1yt49WgwDtVADIAU/WuyNw928cVxn4ps4O1ulpr6JE92t4yImkY47/CvfUvGJbadW'
'3sZo7M5DFWP3ocoZZdWcR6v1xmrZqbZKq3p/XA9i/ZCy0JjG77SLjjvhdUDSefSIFoMPlJj/Oz'
'DIT7RapfUt4ouHMC7l7Xz3eB8LZSuHh0FVxzdwujQOLs3Ku5pPqEi/8ewB9zYRkY6CeNzkb7Tp'
'<KEY>'
<KEY>'
'<KEY>/'
'nO8tyGW/Rlg16b8NpNM6t238Hz2l3Rn9G6otxDP+o9ia4eIiH5eALkm4qR6Xv0BMj3Ga0/pKJg'
'/eZOaTvbS6aPVJpLQGG1wJyD6yXbp6sFvuh6wTrH5I1qaTPbr+/9An0DyPz3DKVIKt3l/rcdnH'
'vGZfrPuJ5+ItfpYkLP4GL+rVJhfR3t47Xw7jPkcbroDI7UJ5OvnbhHr3T7mU9hO+bsXVu7N2c/'
'Dc153X/vdo/ckfV1uc5Jx8I6e3T3dU45Sy7Gq/yrab2g+jpuGzahSwSfct2w13/dsGnNqME1sN'
'NbMBGHRui7SxrvepfU4gILtYJ7qXRSpehrpbbpB1FdQQYku4fxisqWS7XNKmL4ZCKgoa5Ag1Jm'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'woqZf3GLOa0i5EOb2czuMuRlp9Hj6Bz5r8SUojZKMzBoTf4Qk8T4qCxNZRINPrEBPUO3U5mFdr'
'jEru+meC7xokqwS1yFxT5fW99zykBucaJchu7TJ0UadA2SQhP2nlPrUvq2JDQR8nget/BT3WMC'
'MwvTl6Hpu2WZbeSpxfukOHN/TvV5Dp3Yd3fqPa5TR94vqgFfSWYe61q43y3s6t3n+hvNOf4Uj9'
'MjHof1HvCVZd5PzId8xfvd4sz9Cnu75up61Sk1wFcGu0wqtK9rTmE2a4KHEc/zk+Q9XV12cs3v'
'9VH2myqzG4IV6O2KkupAYRWgAjoGDpKkrytG/5pv0EBBplW6szyL0b/PwMoQbjX2+0cf7GDJru'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'A3DDjTx+oUT9ZwqqUWStxuYsamkqA1feg+xhE1emI1ZvPGRFMHo/mW9m40mfI23XuiJ2TTvTd6'
'RDbdBwLZbnvFz3mb7gMcsUCb7t4WdUBvUVu+TXeLD7P0pruVSPs23XGL2t10T5nHZCc9hJQUwz'
'tsqYTl23NPpWzfnnvqyFFvzz3NN+H0nnvaTAkmbhWnzaRvzz09MOTbc0/nRrw990F35x63igfN'
'9Khvz33QPUVAyw2qpG/PfdDduQ/hTXMxC24VZ8xB2bkPRTBRUHCrOKPELLhVnHHNEsaL54ISBp'
'QhMzPMOcMRTBQrYUjEkGtcvJo/pPf/MfRuNHDs6XVKoXej4ZQXenegI/TuQEfo3QF/6N1BM8VJ'
'<KEY>'
<KEY>'
's4FPGLn/aeguzzt96Irqtepju9mutKgmsNNzdClFlYLHcXfzMPoCI0+gC9t3wWNQhMZ6u9Ggp4'
'LA1W04DbvZarTXWxSr4m0DsjvjoFP0gRx5WmrWa3Zprd5uif+gEGnxfKXttcpmu95mL/JImG6V'
'HoKvdHe0SertehOwt0q1TQcFfEr0CbaOs7EB9UBOmC6Y2dwbbJjSY2SMeqMk6L9L4PIq1dZZcM'
'DAZr3dbNW3tbAUmkN+sfIQytQVSGnLutGnT0c84QXzrBwMYZDAhY54wgtuJCA6pQuZIfXbhpxg'
'XTLt3FeNDjFLds15pF2uNjEOK4/Ag2sN6uKPxUXnJ5rNyiaMO/kxhbJXWh4SrK3XnbNNZ6fUID'
'9PMTegPpvUhViqvOucnbPP0r9LeVc3HZx3QY6rdHCe/1jtUlxeQqDgvIOH1Kwcq10xh3LXfPUp'
'<KEY>'
'<KEY>
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mjcl1hPb3GxHTOos89UxqbNcHxSTWuiISS2YsznOafhCFHVMaiHuj0kt+GNSb7mymDre0B+Tes'
'tFMX3xhjom9ZYrS9CLNwxSC3PjDYPUwl5xI1uDvnjDILWwV1xZoIXN8bF8kFrYnPmKyILzmzme'
'3wSphc0piePFFjbHx/JBFPq2i4It7LY5l+WcYUoUFGxht10UbGG3XRR6TGOQUSL6pQ1BwRY279'
'<KEY>T<KEY>sAUXBVvYQlw4YAtb4BCBIEbbL3IwFxAhpOTl'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'mCgo+FbF3aiEpfcByl0rLaHgrwdK+0QzYU95nUc7CgV/g+f+OhT8DfN1LXyIFlZvcP3pUPA3Em'
'<KEY>'
'gR5eV6MSUI7OZpV9RgiJ+65G6Hnum6sSHx6MYKJohJ7nvqsRep77oNExig8PO4HPGt2c+PhzXo'
'S4E+v1IsQ32HHqCPEN09G1pCPENzoixDfcQG005YYbqI1hqG6gtqGfa5FAbTTlZke492ZcArXR'
'lJtsBAr33nKj1U0dv5rhnKYvflWHe2+50epovS12VmEkKjw0h8mUFXNLpEZTVviSSphMWekRDm'
'<KEY>LAvfjVMa9S33Feg'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'eZKTwB0DJcV6AaSZkBaM3riZyQsFIM1jJxikDx/5GWJR+vQLQILZRy8AyXUJ9MYt97oEeuMW+/'
'Sw2W8F27xVBASgtM2WNI7+CCZKy8eHgNpK+k8/oLSHRxglaQUfmjajJAHlodke5ZzJMCYKCj4O'
'9FBJ008CysMDhxhlwAo+clEGAOWR+VAMOBDGREEZAJRHLsoAoDxyUSwr+I6ZZxQLUN4xHwmKFc'
'ZEQbEA5R0lclqA8s6hw4ySsoKPzTFGSQHKY/MdqYdUGBMFJQUoj5V0oRSgPD4l78KlreC75ilG'
'SQPKu+ZjaZ3pMCYKShpQ3lXS79OA8u7xk4wyaAU/Dc1FowwCyqfNd6W3DYYxUVAGAeXTSuQcBJ'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'2nVMb9kCZ9dm9TP+K9rHV8tNiubNgUjXluttSko6iTJ/TZ3YlT52x7kWJu9dZGqVqtP9IbY4r2'
'r2pOE7ddeGdO77njTlIFZrN2fq3+jlPO8+465afZ7067sVNvOueUXajZt5YW5sfsUqfgeCKxg4'
'cStZa+pl6ym5VtupxO2WCCjO+S0O2pCNjh84ZcYcJ3Sb5g8LydLlABefCokEEkT+hKxCtUoV82'
'YADXWPjGCpBfME7JpaoIpceFpOwqJWQQycwQY4HP+BV5RUjfpPoVaRB0kwrIhNzWQsa/Im960V'
'0qIPkVoQhSXzR4tIjQ5OSLHhK+DvZFDwlfB/uikTosJJU9eoyRQvhalnmaE0OaFCS89PYlIzEk'
'JD2tlT0mZBDJk6cYSb+kdZYTcaX5qx4SzFKAdJHC9M5W9qSQ9M7WmTFGgrxflnebIrTa/LKHFK'
'<KEY>'
'<KEY>'
'<KEY>'
'<KEY>'
'Bu3cF0BsiTpxmpzwr9hgHzTZ2IE5rf8JD6Iki6SDCjATItLROmNECeOctI/VboN9Gla28Ak5oI'
'kL9hCDRMazBdoGFeA2QiJ2QQyQOHGCuJ77SBfhoLpjYRIH/TkBpKRihdvFSSnnU7KBrC9AZI0P'
'AYXa6MfNsI/M7TXk9M6OuVoW8bsUHij/crQ79twAQN+dMFywiQ3zZ0F6VnWzE9KaSB5ICbGkQy'
'N8JY9FYb9w59zfI7YoYo+cjviD+ie5ZApo4KGUSSe0cUfeR3DZ4eRclHftdDQs/wXQ8J2X7XSJ'
'0QMojk6TOMBNT3PJnQR37PQ0If+T3pZ1Hykd+THhslH/k9TyYo+n1PppAmBQl95Pc9JPSR35ce'
'GyUf+X1PJvCRPzDMY5yIPvIHHhL6yB9IS4ySj/yB9Ngo+cgfGEeOUo3HrMiPjcA/2O8tRHREPz'
'<KEY>
'<KEY>'
'<KEY>'
'wB95WCa9lcfvidKlVSBTabKLsiJ//LQ36biO0bH/sRFLEn98JDj0J/K+Hr0SHAHyj/lNOHonGN'
'OVkAaS/L4evRQMJL+vh08Fh34qb9XRW8ERIP+E39ej14IxPSYkZee36ui9YCD5fb0EvZX3Z/u9'
'L5fQb+VpXRL8Vp6eeyW8t/K0LonOt/IS8lZeSkh6Kw/mXsAfulLkLwwMgnmqhwyjCn9hhIl/GE'
'9QQz9nW4XpGBPJsJAmvYUX57z0uJ2b19Ck5AUjAunmBeovDTPBiVj0L/nZVCQpNa44L2jxT/lt'
'xTDdnEMyKqSJpEpwXnAnf2WY/ZyInuivPJHwNPCvjN4+zgu2+2f8Nh9SBpFxIU0ke3o5L7SZf8'
'4jAVIGkSJ+xESyr9+9efeTvNrnMp3VvyuyEHflp+ttqA59xaTjVTSDb4zk80rdqNZLrT3ymL48'
'<KEY>'
'PDeSbfU6n1+vbuuM3J3rtsflqnLRqvnd6stLbaaxRFtVmvlmqbXlXt4DW2pltj/80wvmsGby5O'
'/tg8eFPjLko86F2nWn2lVn9Uw2tuzVsfHcQ555FA21B/3kehZkcC1vjPevTycL1etSfbGErRtM'
'/aGupE0y6XWiVYhbachj4At/XDYKojPu3Cc1wAVoPrsN7cOyzt6dFiOyzE2TUtxHmFf4mtXMHV'
'<KEY>'
'PL3vtkG3VcHuM6e71eK1ewUBMLAY7Tugoi4X+ndwnWpOW5L9Buu91sgeb0ohpFsqzVH2ISW0zZ'
'tXqrsu7wSZIb8ODjWCvvEgf4rVdLlW18Fa2LEHgC79lChAAdy+11x5NDeYJ8LDmURAaW6+ttPA'
'orSSWdx4djIKVhQ0txGpVStemZmioIEpXtl95Vat6pUEl/IIy/bdXqXhrZvdJqKnq+jqDqDTdG'
'hWI4WnXbqZXhK8UsghDbGKOibQKtk6+30cNzSgImN1qPsJlwC7KbO846tiAoVcGG1cC2U9OtqN'
'<KEY>'
'<KEY>'
'<KEY>'
'y4WplbmJor24UlxcWJqxUbPpwtLU3ETh9sz0OeAPPO2ZOzPzy/bS7MTcXKeiysZw2CJK71fTnp'
'wBKScm52aQFek5XSjOTC2jQt6vKTAeCDg3puylxZmpAvwCe8yAOhPFe2MMuoTBvqDVxJw9PXF7'
'<KEY>pZXJ<KEY>CQ6Y'
'nlCWINGGAuSIffkytLBTJcYX55plhcWVwuLMyfglq+C5YBKSeg7DRZeGEetcW2MrNQvIewaAeq'
'gTH77uwMfC+iUclaE2iGJbDa1LI/GzAEI4JKnp72/MzNucLNmfmpGUxeQJi7haWZU1BhhSXMUC'
'DG0AaA6QppjRUFcin929d0x6g+7cINe2L6TgEl59zQApYK3FzIbFOzbPNzavw/G7YMYbRFCD0F'
'PCzGpp2s1WtneWvxlE1D1zmMZcZIYyTIIUNP3WhX9W6ks73mlMvoaVyQpjia+<KEY>nHH'
'<KEY>'
'<KEY>'
'<KEY>'
'vm/qdx83diljrQ01hU90Sjz0eCylbImHvmymcilC1Uxcm+HkXwdJXzbHJRoYD6AvdzzEeTnu/5'
'tJlwcs5eho0quBF4zcvb312cDZ5/7qeJPULtoYHHJ3SOI0r5tWziJQYtGhjA6uvC5heDq48nrH'
'6yDXO14HuQ5rLkdH/00GZroqU8EZ8P7KeBNlTxl3g13iBydZGYofnHaVIRYdyuiYwmlz0h9TOM'
'3K6JjC6ZhE7aEy06DMpo4ruxW43bWltZ9Rm5V91cGz/Fvc0ig0bc5tae0n9dHxanPmLTcmLYwl'
'/PFqc3EJnEF95rilYawQvibXvXIujT9T5fDio0tLw5iCIldOSP/dIl/lXBrvUCbEf8uo6Ia5UC'
'xRVCiMJeLK0SEwy1w5sJZ8LfDGUyvnWbRZ2VcdjG54jSuHQk5e76icXfroOJTXzdfcWJMwlvDH'
'obwel1NE1Od1qJy6fupvLeAYufW99VmD5dz+2riLPk+X+60Gkuju72/ApJVcozwbuBYbUAfl2c'
'CyOZAbIHxk1qGVfg6wbK65T/6FMX9EKIAqR3uEAq3K/UmqpagVehDY7lpLuhfsr5dvpdqlC2Hk'
'xwOuJXrGr+rWEp8x+vXRb/tVzQf+t/2qHW/7VTve9qtyF4pZoQb+aaMuXYiChZ+hmtw1dRdtcL'
'hscBeK6T+OJF2IWHQoE+M/mNTwv/XX4i6k3/prcRfSb/21vCeL/hdqVccN')))
_INDEX = {
f.name: {
'descriptor': f,
'services': {s.name: s for s in f.service},
}
for f in FILE_DESCRIPTOR_SET.file
}
FeaturesServiceDescription = {
'file_descriptor_set': FILE_DESCRIPTOR_SET,
'file_descriptor': _INDEX[u'api/api_proto/features.proto']['descriptor'],
'service_descriptor': _INDEX[u'api/api_proto/features.proto']['services'][u'Features'],
} | 0.198996 | 0.071009 |
from pycograph.config import settings
from pycograph.schemas.parse_result import PackageWithContext
def test_package_production_code():
package = PackageWithContext(
name="example",
full_name="example",
dir_path="",
)
assert package.is_test_object is False
assert package.label() == "package"
assert package.node_properties() == {
"name": "example",
"full_name": "example",
"is_test_object": False,
}
def test_package_unit_test_no_determine_test_types():
package = PackageWithContext(
name="tests.unit.cli",
full_name="tests.unit.cli",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == ""
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit.cli",
"full_name": "tests.unit.cli",
"is_test_object": True,
}
def test_package_unit():
settings.determine_test_types = True
package = PackageWithContext(
name="tests.unit.cli",
full_name="tests.unit.cli",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == "unit"
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit.cli",
"full_name": "tests.unit.cli",
"is_test_object": True,
"test_type": "unit",
}
def test_main_unit_test_package():
settings.determine_test_types = True
package = PackageWithContext(
name="tests.unit",
full_name="tests.unit",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == "unit"
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit",
"full_name": "tests.unit",
"is_test_object": True,
"test_type": "unit",
}
def test_main_test_package():
settings.determine_test_types = True
package = PackageWithContext(
name="tests",
full_name="tests",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == ""
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests",
"full_name": "tests",
"is_test_object": True,
"test_type": "",
} | tests/unit/schemas/parse_result/test_package.py | from pycograph.config import settings
from pycograph.schemas.parse_result import PackageWithContext
def test_package_production_code():
package = PackageWithContext(
name="example",
full_name="example",
dir_path="",
)
assert package.is_test_object is False
assert package.label() == "package"
assert package.node_properties() == {
"name": "example",
"full_name": "example",
"is_test_object": False,
}
def test_package_unit_test_no_determine_test_types():
package = PackageWithContext(
name="tests.unit.cli",
full_name="tests.unit.cli",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == ""
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit.cli",
"full_name": "tests.unit.cli",
"is_test_object": True,
}
def test_package_unit():
settings.determine_test_types = True
package = PackageWithContext(
name="tests.unit.cli",
full_name="tests.unit.cli",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == "unit"
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit.cli",
"full_name": "tests.unit.cli",
"is_test_object": True,
"test_type": "unit",
}
def test_main_unit_test_package():
settings.determine_test_types = True
package = PackageWithContext(
name="tests.unit",
full_name="tests.unit",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == "unit"
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests.unit",
"full_name": "tests.unit",
"is_test_object": True,
"test_type": "unit",
}
def test_main_test_package():
settings.determine_test_types = True
package = PackageWithContext(
name="tests",
full_name="tests",
dir_path="",
)
assert package.is_test_object is True
assert package.test_type == ""
assert package.label() == "test_package"
assert package.node_properties() == {
"name": "tests",
"full_name": "tests",
"is_test_object": True,
"test_type": "",
} | 0.667581 | 0.535038 |
from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.io import export_png, show
from bokeh.palettes import Category20
from sklearn.decomposition import PCA
import holoviews as hv
from holoviews.operation import gridmatrix
import numpy as np
import pandas as pd
import os
if not os.path.exists("./simulated_pca_data.csv"):
print("Generating and saving new data")
N = 15
Profiles = np.random.uniform(0, 2, size=(5, N))
U = np.random.choice([0, 1, 2, 3, 4], size=200, replace=True)
d = np.zeros((200, N + 1))
for i, x in enumerate(U):
d[i, :-1] = np.random.normal(Profiles[x, :], 0.3)
d[:, N] = U.astype(int)
np.savetxt("./simulated_pca_data.csv", d, delimiter=",")
d = d[:, :-1]
pd.DataFrame(d,columns=[str(x) for x in range(N)]).to_csv('../data/simulated_pca_data.csv')
pd.DataFrame(U).to_csv('../data/simulated_pca_data_labels.csv')
else:
F = np.loadtxt("./simulated_pca_data.csv", delimiter=",")
d = F[:, :-1]
U = F[:, -1].astype(int)
N = d.shape[1]
print("Loaded array with {} features and {} samples".format(d.shape[0], d.shape[1]))
colors = ["blue", "red", "black", "orange", "green"]
P = PCA(n_components=N).fit(d)
S = P.components_
D = P.transform(d)
pc_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components",
toolbar_location=None,
)
pc_plot.scatter(x=D[:, 0], y=D[:, 1])
export_png(pc_plot, filename="../img/pcadimred.png")
pc_plot_colored = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components (colored by underlying group)",
toolbar_location=None,
)
pc_plot_colored.scatter(x=D[:, 0], y=D[:, 1], color=[Category20[10][i] for i in U])
export_png(pc_plot_colored, filename="../img/pcadimred_colors.png")
eigenvalue_plot = figure(
title="Eigenvalues of the covariance matrix", toolbar_location=None
)
eigenvalue_plot.line(x=range(1, N + 1), y=P.explained_variance_)
eigenvalue_plot.circle(x=range(1, N + 1), y=P.explained_variance_)
export_png(eigenvalue_plot, filename="../img/eigenvalues.png")
feature_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two of the original features",
toolbar_location=None,
)
feature_plot.scatter(x=d[:, 0], y=d[:, 7])
export_png(feature_plot, filename="../img/features.png")
ds = hv.Dataset(pd.DataFrame(d, columns=[str(x) for x in range(N)]))
hv.extension("bokeh")
density_grid = gridmatrix(ds, chart_type=hv.Points).opts(
height=1000, width=1000, toolbar=None
)
hv.save(density_grid, "../img/density.png")
with open("./simulated_pca_data_table.html", "w") as f:
pd.DataFrame(
d,
columns=["f-{}".format(x) for x in range(15)],
index=["s-{}".format(x) for x in range(200)],
).to_html(float_format=lambda x: "{:.2f}".format(x), max_rows=5, buf=f)
loading_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Projection of feature axes (loadings) in PC space",
toolbar_location=None,
)
loading_plot.scatter(x=D[:, 0], y=D[:, 1])
for i in range(15):
loading_plot.line(
x=[-100 * S[0, i], 100 * S[0, i]],
y=[-100 * S[1, i], 100 * S[1, i]],
color=Category20[20][i],
line_width=1,
legend_label=str(i),
)
loading_plot.legend.location = "top_left"
loading_plot.legend.click_policy = "hide"
export_png(loading_plot, filename="../img/loading.png")
show(column(pc_plot, pc_plot_colored, feature_plot, eigenvalue_plot, loading_plot)) | PCA/src/DimReduction.py | from bokeh.plotting import figure
from bokeh.layouts import column
from bokeh.io import export_png, show
from bokeh.palettes import Category20
from sklearn.decomposition import PCA
import holoviews as hv
from holoviews.operation import gridmatrix
import numpy as np
import pandas as pd
import os
if not os.path.exists("./simulated_pca_data.csv"):
print("Generating and saving new data")
N = 15
Profiles = np.random.uniform(0, 2, size=(5, N))
U = np.random.choice([0, 1, 2, 3, 4], size=200, replace=True)
d = np.zeros((200, N + 1))
for i, x in enumerate(U):
d[i, :-1] = np.random.normal(Profiles[x, :], 0.3)
d[:, N] = U.astype(int)
np.savetxt("./simulated_pca_data.csv", d, delimiter=",")
d = d[:, :-1]
pd.DataFrame(d,columns=[str(x) for x in range(N)]).to_csv('../data/simulated_pca_data.csv')
pd.DataFrame(U).to_csv('../data/simulated_pca_data_labels.csv')
else:
F = np.loadtxt("./simulated_pca_data.csv", delimiter=",")
d = F[:, :-1]
U = F[:, -1].astype(int)
N = d.shape[1]
print("Loaded array with {} features and {} samples".format(d.shape[0], d.shape[1]))
colors = ["blue", "red", "black", "orange", "green"]
P = PCA(n_components=N).fit(d)
S = P.components_
D = P.transform(d)
pc_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components",
toolbar_location=None,
)
pc_plot.scatter(x=D[:, 0], y=D[:, 1])
export_png(pc_plot, filename="../img/pcadimred.png")
pc_plot_colored = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two most significant principal components (colored by underlying group)",
toolbar_location=None,
)
pc_plot_colored.scatter(x=D[:, 0], y=D[:, 1], color=[Category20[10][i] for i in U])
export_png(pc_plot_colored, filename="../img/pcadimred_colors.png")
eigenvalue_plot = figure(
title="Eigenvalues of the covariance matrix", toolbar_location=None
)
eigenvalue_plot.line(x=range(1, N + 1), y=P.explained_variance_)
eigenvalue_plot.circle(x=range(1, N + 1), y=P.explained_variance_)
export_png(eigenvalue_plot, filename="../img/eigenvalues.png")
feature_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Scatter plot of two of the original features",
toolbar_location=None,
)
feature_plot.scatter(x=d[:, 0], y=d[:, 7])
export_png(feature_plot, filename="../img/features.png")
ds = hv.Dataset(pd.DataFrame(d, columns=[str(x) for x in range(N)]))
hv.extension("bokeh")
density_grid = gridmatrix(ds, chart_type=hv.Points).opts(
height=1000, width=1000, toolbar=None
)
hv.save(density_grid, "../img/density.png")
with open("./simulated_pca_data_table.html", "w") as f:
pd.DataFrame(
d,
columns=["f-{}".format(x) for x in range(15)],
index=["s-{}".format(x) for x in range(200)],
).to_html(float_format=lambda x: "{:.2f}".format(x), max_rows=5, buf=f)
loading_plot = figure(
x_range=(-4, 4),
y_range=(-4, 4),
title="Projection of feature axes (loadings) in PC space",
toolbar_location=None,
)
loading_plot.scatter(x=D[:, 0], y=D[:, 1])
for i in range(15):
loading_plot.line(
x=[-100 * S[0, i], 100 * S[0, i]],
y=[-100 * S[1, i], 100 * S[1, i]],
color=Category20[20][i],
line_width=1,
legend_label=str(i),
)
loading_plot.legend.location = "top_left"
loading_plot.legend.click_policy = "hide"
export_png(loading_plot, filename="../img/loading.png")
show(column(pc_plot, pc_plot_colored, feature_plot, eigenvalue_plot, loading_plot)) | 0.595257 | 0.683471 |
from collections import defaultdict
import math
import sys
book = sys.argv[1]
def get_chapter(sid):
def single(s):
return int(s.split('-')[-1])
p = sid.split(':')
if len(p) == 2:
return [single(p[0])]
else:
return [single(p[0]), single(p[1])]
total_sents = 0
total_words = 0
rel = 0
head = 0
pos = 0
check_sent = 0
check_word = 0
by_pos = defaultdict(lambda: [0,0,0,0])
chapters = defaultdict(lambda: [0,0])
with open(f'{book}.parsed.conllu') as fin:
for line in fin:
if '# sent_id' in line:
total_sents += 1
for c in get_chapter(line):
chapters[c][0] += 1
elif '\t' in line:
ls = line.split('\t')
if '-' in ls[0]:
continue
total_words += 1
by_pos[ls[4]][0] += 1
if ls[3] != '_':
pos += 1
else:
by_pos[ls[4]][1] += 1
if ls[6] != '_':
head += 1
else:
by_pos[ls[4]][2] += 1
if ls[7] != '_':
rel += 1
else:
by_pos[ls[4]][3] += 1
checkers = defaultdict(lambda: 0)
def check(fname):
global check_sent, checkers, check_word, chapters
with open(fname) as fin:
for line in fin:
if '# sent_id' in line:
check_sent += 1
for c in get_chapter(line):
chapters[c][1] += 1
elif '# checker =' in line:
for n in line.split('=')[1].split(','):
checkers[n.strip()] += 1
elif '\t' in line:
if '-' in line.split()[0]:
continue
check_word += 1
check(f'{book}.checked.conllu')
check(f'{book}.manual.conllu')
def table(headers, rows):
actual_headers = headers[:2]
for h in headers[2:]:
actual_headers.append(h)
actual_headers.append('%')
lines = [''] * (len(rows)+1)
for i in range(len(actual_headers)):
col = [actual_headers[i]]
for r in rows:
if i < 2:
col.append(r[i])
elif i % 2 == 1:
idx = (i - 3) // 2 + 2
col.append(round(100.0 * r[idx] / r[1], 2))
else:
col.append(r[(i-2)//2 + 2])
wd = max(len(str(s)) for s in col)
for j, ent in enumerate(col):
add = str(ent)
mv = ' '*(wd - len(add))
if isinstance(ent, str):
add += mv
else:
add = mv + add
if i > 0:
lines[j] += ' | '
lines[j] += add
return lines[0] + '\n' + '-'*len(lines[0]) + '\n' + '\n'.join(lines[1:])
print('')
print(table(['', 'Total', 'Count'],
[
['Sentences Checked', total_sents, check_sent],
['Words Checked', total_words, check_word],
['UPOS', total_words, pos],
['Have Head', total_words, head],
['Have Relation', total_words, rel]
]))
print('')
print(table(['POS Statistics', 'Count', 'Missing UPOS', 'Missing Head', 'Missing Rel'],
[ [k] + by_pos[k] for k in sorted(by_pos.keys()) ]))
print('\nAnnotators:')
for name, count in checkers.items():
print(f' {name}: {count} sentences')
chapter_dist = []
for i in range(11):
chapter_dist.append([])
for ch in sorted(chapters.keys()):
tot, ct = chapters[ch]
if tot == ct:
chapter_dist[10].append(ch)
else:
n = math.floor(10.0 * ct / tot)
#chapter_dist[n].append(ch)
chapter_dist[n].append(f'{ch}[{tot-ct}]')
print('\nChapters')
print(' Complete:', ' '.join(map(str, chapter_dist[10])))
for n in reversed(range(10)):
print(f' >={n}0%:', ' '.join(map(str, chapter_dist[n]))) | report.py |
from collections import defaultdict
import math
import sys
book = sys.argv[1]
def get_chapter(sid):
def single(s):
return int(s.split('-')[-1])
p = sid.split(':')
if len(p) == 2:
return [single(p[0])]
else:
return [single(p[0]), single(p[1])]
total_sents = 0
total_words = 0
rel = 0
head = 0
pos = 0
check_sent = 0
check_word = 0
by_pos = defaultdict(lambda: [0,0,0,0])
chapters = defaultdict(lambda: [0,0])
with open(f'{book}.parsed.conllu') as fin:
for line in fin:
if '# sent_id' in line:
total_sents += 1
for c in get_chapter(line):
chapters[c][0] += 1
elif '\t' in line:
ls = line.split('\t')
if '-' in ls[0]:
continue
total_words += 1
by_pos[ls[4]][0] += 1
if ls[3] != '_':
pos += 1
else:
by_pos[ls[4]][1] += 1
if ls[6] != '_':
head += 1
else:
by_pos[ls[4]][2] += 1
if ls[7] != '_':
rel += 1
else:
by_pos[ls[4]][3] += 1
checkers = defaultdict(lambda: 0)
def check(fname):
global check_sent, checkers, check_word, chapters
with open(fname) as fin:
for line in fin:
if '# sent_id' in line:
check_sent += 1
for c in get_chapter(line):
chapters[c][1] += 1
elif '# checker =' in line:
for n in line.split('=')[1].split(','):
checkers[n.strip()] += 1
elif '\t' in line:
if '-' in line.split()[0]:
continue
check_word += 1
check(f'{book}.checked.conllu')
check(f'{book}.manual.conllu')
def table(headers, rows):
actual_headers = headers[:2]
for h in headers[2:]:
actual_headers.append(h)
actual_headers.append('%')
lines = [''] * (len(rows)+1)
for i in range(len(actual_headers)):
col = [actual_headers[i]]
for r in rows:
if i < 2:
col.append(r[i])
elif i % 2 == 1:
idx = (i - 3) // 2 + 2
col.append(round(100.0 * r[idx] / r[1], 2))
else:
col.append(r[(i-2)//2 + 2])
wd = max(len(str(s)) for s in col)
for j, ent in enumerate(col):
add = str(ent)
mv = ' '*(wd - len(add))
if isinstance(ent, str):
add += mv
else:
add = mv + add
if i > 0:
lines[j] += ' | '
lines[j] += add
return lines[0] + '\n' + '-'*len(lines[0]) + '\n' + '\n'.join(lines[1:])
print('')
print(table(['', 'Total', 'Count'],
[
['Sentences Checked', total_sents, check_sent],
['Words Checked', total_words, check_word],
['UPOS', total_words, pos],
['Have Head', total_words, head],
['Have Relation', total_words, rel]
]))
print('')
print(table(['POS Statistics', 'Count', 'Missing UPOS', 'Missing Head', 'Missing Rel'],
[ [k] + by_pos[k] for k in sorted(by_pos.keys()) ]))
print('\nAnnotators:')
for name, count in checkers.items():
print(f' {name}: {count} sentences')
chapter_dist = []
for i in range(11):
chapter_dist.append([])
for ch in sorted(chapters.keys()):
tot, ct = chapters[ch]
if tot == ct:
chapter_dist[10].append(ch)
else:
n = math.floor(10.0 * ct / tot)
#chapter_dist[n].append(ch)
chapter_dist[n].append(f'{ch}[{tot-ct}]')
print('\nChapters')
print(' Complete:', ' '.join(map(str, chapter_dist[10])))
for n in reversed(range(10)):
print(f' >={n}0%:', ' '.join(map(str, chapter_dist[n]))) | 0.181372 | 0.284647 |
"""The Virtual File System (VFS) file-like object interface."""
import abc
import os
# Since this class implements the file-like object interface
# the names of the interface functions are in lower case as an exception
# to the normal naming convention.
class FileIO(object):
"""Class that implements the VFS file-like object interface."""
def __init__(self, resolver_context):
"""Initializes the file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
"""
super(FileIO, self).__init__()
self._is_cached = False
self._is_open = False
self._resolver_context = resolver_context
@abc.abstractmethod
def _Close(self):
"""Closes the file-like object.
Raises:
IOError: if the close failed.
"""
@abc.abstractmethod
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec: optional path specification (instance of path.PathSpec).
The default is None.
mode: optional file access mode. The default is 'rb' read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
def open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec: optional path specification (instance of path.PathSpec).
The default is None.
mode: optional file access mode. The default is 'rb' read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid.
"""
if self._is_open and not self._is_cached:
raise IOError(u'Already open.')
if mode != 'rb':
raise ValueError(u'Unsupport mode: {0:s}.'.format(mode))
if not self._is_open:
self._Open(path_spec=path_spec, mode=mode)
self._is_open = True
if path_spec and not self._resolver_context.GetFileObject(path_spec):
self._resolver_context.CacheFileObject(path_spec, self)
self._is_cached = True
if self._is_cached:
self._resolver_context.GrabFileObject(path_spec)
def close(self):
"""Closes the file-like object.
Raises:
IOError: if the file-like object was not opened or the close failed.
"""
if not self._is_open:
raise IOError(u'Not opened.')
if not self._is_cached:
close_file_object = True
elif self._resolver_context.ReleaseFileObject(self):
self._is_cached = False
close_file_object = True
else:
close_file_object = False
if close_file_object:
self._Close()
self._is_open = False
@abc.abstractmethod
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size: Optional integer value containing the number of bytes to read.
Default is all remaining data (None).
Returns:
A byte string containing the data read.
Raises:
IOError: if the read failed.
"""
@abc.abstractmethod
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks an offset within the file-like object.
Args:
offset: The offset to seek.
whence: Optional value that indicates whether offset is an absolute
or relative position within the file. Default is SEEK_SET.
Raises:
IOError: if the seek failed.
"""
# get_offset() is preferred above tell() by the libbfio layer used in libyal.
@abc.abstractmethod
def get_offset(self):
"""Returns the current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
"""
# Pythonesque alias for get_offset().
def tell(self):
"""Returns the current offset into the file-like object."""
return self.get_offset()
@abc.abstractmethod
def get_size(self):
"""Returns the size of the file-like object.
Raises:
IOError: if the file-like object has not been opened.
""" | dfvfs/file_io/file_io.py | """The Virtual File System (VFS) file-like object interface."""
import abc
import os
# Since this class implements the file-like object interface
# the names of the interface functions are in lower case as an exception
# to the normal naming convention.
class FileIO(object):
"""Class that implements the VFS file-like object interface."""
def __init__(self, resolver_context):
"""Initializes the file-like object.
Args:
resolver_context: the resolver context (instance of resolver.Context).
"""
super(FileIO, self).__init__()
self._is_cached = False
self._is_open = False
self._resolver_context = resolver_context
@abc.abstractmethod
def _Close(self):
"""Closes the file-like object.
Raises:
IOError: if the close failed.
"""
@abc.abstractmethod
def _Open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec: optional path specification (instance of path.PathSpec).
The default is None.
mode: optional file access mode. The default is 'rb' read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object could not be opened.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification is invalid.
"""
# Note: that the following functions do not follow the style guide
# because they are part of the file-like object interface.
def open(self, path_spec=None, mode='rb'):
"""Opens the file-like object defined by path specification.
Args:
path_spec: optional path specification (instance of path.PathSpec).
The default is None.
mode: optional file access mode. The default is 'rb' read-only binary.
Raises:
AccessError: if the access to open the file was denied.
IOError: if the file-like object was already opened or the open failed.
PathSpecError: if the path specification is incorrect.
ValueError: if the path specification or mode is invalid.
"""
if self._is_open and not self._is_cached:
raise IOError(u'Already open.')
if mode != 'rb':
raise ValueError(u'Unsupport mode: {0:s}.'.format(mode))
if not self._is_open:
self._Open(path_spec=path_spec, mode=mode)
self._is_open = True
if path_spec and not self._resolver_context.GetFileObject(path_spec):
self._resolver_context.CacheFileObject(path_spec, self)
self._is_cached = True
if self._is_cached:
self._resolver_context.GrabFileObject(path_spec)
def close(self):
"""Closes the file-like object.
Raises:
IOError: if the file-like object was not opened or the close failed.
"""
if not self._is_open:
raise IOError(u'Not opened.')
if not self._is_cached:
close_file_object = True
elif self._resolver_context.ReleaseFileObject(self):
self._is_cached = False
close_file_object = True
else:
close_file_object = False
if close_file_object:
self._Close()
self._is_open = False
@abc.abstractmethod
def read(self, size=None):
"""Reads a byte string from the file-like object at the current offset.
The function will read a byte string of the specified size or
all of the remaining data if no size was specified.
Args:
size: Optional integer value containing the number of bytes to read.
Default is all remaining data (None).
Returns:
A byte string containing the data read.
Raises:
IOError: if the read failed.
"""
@abc.abstractmethod
def seek(self, offset, whence=os.SEEK_SET):
"""Seeks an offset within the file-like object.
Args:
offset: The offset to seek.
whence: Optional value that indicates whether offset is an absolute
or relative position within the file. Default is SEEK_SET.
Raises:
IOError: if the seek failed.
"""
# get_offset() is preferred above tell() by the libbfio layer used in libyal.
@abc.abstractmethod
def get_offset(self):
"""Returns the current offset into the file-like object.
Raises:
IOError: if the file-like object has not been opened.
"""
# Pythonesque alias for get_offset().
def tell(self):
"""Returns the current offset into the file-like object."""
return self.get_offset()
@abc.abstractmethod
def get_size(self):
"""Returns the size of the file-like object.
Raises:
IOError: if the file-like object has not been opened.
""" | 0.931774 | 0.34017 |
import sys
import time
import os
import math
import json
import geopy
import pandas as pd
import numpy as np
import shapefile as shp
import math
import multiprocessing as mp
from pyproj import Proj, transform
from geopy.distance import VincentyDistance
from shapely.geometry import box, mapping, shape
lon_km = 0.004491576420629531
lat_km = 0.0045218473851471735
import multiprocessing as mp
class ParallelBucket:
def __init__(self, cpu_limit=True):
self.jobs = []
if cpu_limit:
self.ncpus = mp.cpu_count()
else:
self.ncpus = float("inf")
def add_job(self, func, args=()):
t = mp.Process(target=func, args=args)
t.start()
self.jobs.append(t)
if len(self.jobs) >= self.ncpus:
self.joinall()
def joinall(self):
for job in self.jobs:
job.join()
self.jobs = []
def getShape(coords):
minx, miny, maxx, maxy = coords
return json.dumps(mapping(box(minx, miny, maxx, maxy)))
def convert_proj(coord, in_proj='epsg:3857', out_proj='epsg:4326'):
inProj = Proj(init=in_proj)
outProj = Proj(init=out_proj)
x1, y1 = coord
x2, y2 = transform(inProj, outProj, x1, y1)
return [x2, y2]
def get_polygon(i, j, BB):
maxx,maxy = BB[0]
minx,miny = BB[1]
dx = 1333.3333333333
dy = 1333.3333333333
vertices = []
vertices.append(convert_proj([min(minx+dx*j,maxx), max(maxy-dy*i,miny)]))
vertices.append(convert_proj([min(minx+dx*(j+1),maxx), max(maxy-dy*i,miny)]))
vertices.append(convert_proj([min(minx+dx*(j+1),maxx), max(maxy-dy*(i+1),miny)]))
vertices.append(convert_proj([min(minx+dx*j,maxx), max(maxy-dy*(i+1),miny)]))
pol = np.array(vertices)
bb = [pol[:,:1].min(), pol[:,1:].min(), pol[:,:1].max(), pol[:,1:].max()]
return bb
def compute_subset(subset, BB, POLYGONS):
for i, j in subset:
bb = get_polygon(i, j, BB)
POLYGONS.append(bb)
def make_grid(BB, cell_size):
BB = [(BB[0], BB[1]), (BB[2], BB[3])]
BB = [convert_proj(coord, in_proj='epsg:4326', out_proj='epsg:3857') for coord in BB]
# minx,maxx,miny,maxy = 448262.080078, 450360.750122, 6262492.020081, 6262938.950073
maxx,maxy = BB[0]
minx,miny = BB[1]
dx = 1333.3333333333
dy = 1333.3333333333
nx = int(math.ceil(abs(maxx - minx)/dx))
ny = int(math.ceil(abs(maxy - miny)/dy))
POLYGONS = mp.Manager().list()
bucket = ParallelBucket()
processed = 0
count = 0
total = ny*nx
limit = total/mp.cpu_count()
subset = []
ts = time.time()
for i in range(ny):
for j in range(nx):
subset.append((i, j))
if processed == limit:
# compute_subset(subset, BB, POLYGONS)
print 'Computing -> ', count, ' of ', total
bucket.add_job(compute_subset, args=(subset, BB, POLYGONS))
subset = []
processed = 0
count += 1
processed += 1
bucket.joinall()
print 'Computing Final ', len(subset)
POLYGONS = list(POLYGONS)
compute_subset(subset, BB, POLYGONS)
bucket.joinall()
POLYGONS = map(getShape, list(POLYGONS))
CENTROIDS = []
i = 1
for pol in POLYGONS:
geojson = json.loads(pol)
poly = shape(geojson)
CENTROIDS.append([i, poly.centroid.x, poly.centroid.y])
i += 1
return pd.DataFrame(CENTROIDS, columns=['ID', 'lon', 'lat']) | create_grid.py | import sys
import time
import os
import math
import json
import geopy
import pandas as pd
import numpy as np
import shapefile as shp
import math
import multiprocessing as mp
from pyproj import Proj, transform
from geopy.distance import VincentyDistance
from shapely.geometry import box, mapping, shape
lon_km = 0.004491576420629531
lat_km = 0.0045218473851471735
import multiprocessing as mp
class ParallelBucket:
def __init__(self, cpu_limit=True):
self.jobs = []
if cpu_limit:
self.ncpus = mp.cpu_count()
else:
self.ncpus = float("inf")
def add_job(self, func, args=()):
t = mp.Process(target=func, args=args)
t.start()
self.jobs.append(t)
if len(self.jobs) >= self.ncpus:
self.joinall()
def joinall(self):
for job in self.jobs:
job.join()
self.jobs = []
def getShape(coords):
minx, miny, maxx, maxy = coords
return json.dumps(mapping(box(minx, miny, maxx, maxy)))
def convert_proj(coord, in_proj='epsg:3857', out_proj='epsg:4326'):
inProj = Proj(init=in_proj)
outProj = Proj(init=out_proj)
x1, y1 = coord
x2, y2 = transform(inProj, outProj, x1, y1)
return [x2, y2]
def get_polygon(i, j, BB):
maxx,maxy = BB[0]
minx,miny = BB[1]
dx = 1333.3333333333
dy = 1333.3333333333
vertices = []
vertices.append(convert_proj([min(minx+dx*j,maxx), max(maxy-dy*i,miny)]))
vertices.append(convert_proj([min(minx+dx*(j+1),maxx), max(maxy-dy*i,miny)]))
vertices.append(convert_proj([min(minx+dx*(j+1),maxx), max(maxy-dy*(i+1),miny)]))
vertices.append(convert_proj([min(minx+dx*j,maxx), max(maxy-dy*(i+1),miny)]))
pol = np.array(vertices)
bb = [pol[:,:1].min(), pol[:,1:].min(), pol[:,:1].max(), pol[:,1:].max()]
return bb
def compute_subset(subset, BB, POLYGONS):
for i, j in subset:
bb = get_polygon(i, j, BB)
POLYGONS.append(bb)
def make_grid(BB, cell_size):
BB = [(BB[0], BB[1]), (BB[2], BB[3])]
BB = [convert_proj(coord, in_proj='epsg:4326', out_proj='epsg:3857') for coord in BB]
# minx,maxx,miny,maxy = 448262.080078, 450360.750122, 6262492.020081, 6262938.950073
maxx,maxy = BB[0]
minx,miny = BB[1]
dx = 1333.3333333333
dy = 1333.3333333333
nx = int(math.ceil(abs(maxx - minx)/dx))
ny = int(math.ceil(abs(maxy - miny)/dy))
POLYGONS = mp.Manager().list()
bucket = ParallelBucket()
processed = 0
count = 0
total = ny*nx
limit = total/mp.cpu_count()
subset = []
ts = time.time()
for i in range(ny):
for j in range(nx):
subset.append((i, j))
if processed == limit:
# compute_subset(subset, BB, POLYGONS)
print 'Computing -> ', count, ' of ', total
bucket.add_job(compute_subset, args=(subset, BB, POLYGONS))
subset = []
processed = 0
count += 1
processed += 1
bucket.joinall()
print 'Computing Final ', len(subset)
POLYGONS = list(POLYGONS)
compute_subset(subset, BB, POLYGONS)
bucket.joinall()
POLYGONS = map(getShape, list(POLYGONS))
CENTROIDS = []
i = 1
for pol in POLYGONS:
geojson = json.loads(pol)
poly = shape(geojson)
CENTROIDS.append([i, poly.centroid.x, poly.centroid.y])
i += 1
return pd.DataFrame(CENTROIDS, columns=['ID', 'lon', 'lat']) | 0.135804 | 0.212824 |
import copy
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
from testrail.helper import TestRailError
from testrail.section import Section
from testrail.suite import Suite
class TestSuite(unittest.TestCase):
def setUp(self):
self.mock_suite_data = [
{
"description": "suite description",
"id": 1,
"name": "Setup & Installation",
"project_id": 1,
"url": "http://<server>/index.php?/suites/view/1",
"is_baseline": False,
"is_completed": True,
"is_master": True,
"completed_on": 1453504099
},
{
"description": "suite description 2",
"id": 2,
"name": "Setup & Installation",
"project_id": 1,
"url": "http://<server>/index.php?/suites/view/1",
"is_baseline": False,
"is_completed": False,
"is_master": True,
"completed_on": None
},
]
self.mock_section_data = [
{
"depth": 0,
"description": 'Some description',
"display_order": 1,
"id": 1,
"name": "Prerequisites",
"parent_id": None,
"suite_id": 1
},
{
"depth": 1,
"description": 'some words',
"display_order": 1,
"id": 2,
"name": "Prerequisites2",
"parent_id": 1,
"suite_id": 1
}
]
self.mock_project_data = [
{
"announcement": "..",
"completed_on": 1653504099,
"id": 1,
"is_completed": False,
"name": "Project1",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1",
"suite_mode": 3
},
{
"announcement": "..",
"completed_on": 1453504099,
"id": 2,
"is_completed": True,
"name": "Project2",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1",
"suite_mode": 3
}
]
self.section = Section(self.mock_section_data[1])
def test_get_id_type(self):
self.assertEqual(type(self.section.id), int)
def test_get_id(self):
self.assertEqual(self.section.id, 2)
def test_get_depth_type(self):
self.assertEqual(type(self.section.depth), int)
def test_get_depth(self):
self.assertEqual(self.section.depth, 1)
def test_get_display_order_type(self):
self.assertEqual(type(self.section.display_order), int)
def test_get_display_order(self):
self.assertEqual(self.section.display_order, 1)
def test_get_description_type(self):
self.assertEqual(type(self.section.description), str)
def test_get_description(self):
self.assertEqual(self.section.description, 'some words')
def test_set_description(self):
description = 'new description'
self.section.description = description
self.assertEqual(self.section.description, description)
self.assertEqual(self.section._content['description'], description)
def test_set_description_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.description = 394
self.assertEqual(str(e.exception), 'input must be a string')
def test_get_name_type(self):
self.assertEqual(type(self.section.name), str)
def test_get_name(self):
self.assertEqual(self.section.name, 'Prerequisites2')
def test_set_name(self):
name = 'my new suite'
self.section.name = name
self.assertEqual(self.section.name, name)
self.assertEqual(self.section._content['name'], name)
def test_set_name_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.name = 394
self.assertEqual(str(e.exception), 'input must be a string')
def test_raw_data(self):
self.assertEqual(self.section.raw_data(), self.mock_section_data[1])
def test_raw_data_type(self):
self.assertEqual(type(self.section.raw_data()), dict)
@mock.patch('testrail.api.requests.get')
def test_get_suite_type(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(type(self.section.suite), Suite)
@mock.patch('testrail.api.requests.get')
def test_get_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.suite.id, 1)
@mock.patch('testrail.api.requests.get')
def test_get_suite_invalid_id(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.section._content['suite_id'] = 200
with self.assertRaises(TestRailError) as e:
self.section.suite
self.assertEqual(str(e.exception), "Suite ID '200' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.suite.id, 1)
self.section.suite = Suite(self.mock_suite_data[1])
self.assertEqual(self.section._content['suite_id'], 2)
self.assertEqual(self.section.suite.id, 2)
def test_set_suite_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.suite = 2
self.assertEqual(str(e.exception), 'input must be a Suite')
@mock.patch('testrail.api.requests.get')
def test_set_suite_invalid_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
suite = Suite()
suite._content['id'] = 5
with self.assertRaises(TestRailError) as e:
self.section.suite = suite
self.assertEqual(str(e.exception),
"Suite ID '5' was not found")
def test_set_suite_empty_suite(self):
s = Section({})
self.assertEqual(s.suite.id, None)
self.assertEqual(type(s.suite), Suite)
@mock.patch('testrail.api.requests.get')
def test_get_parent_type(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(type(self.section.parent), Section)
@mock.patch('testrail.api.requests.get')
def test_get_parent(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.parent.id, 1)
@mock.patch('testrail.api.requests.get')
def test_get_parent_invalid_id(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.section._content['parent_id'] = 200
with self.assertRaises(TestRailError) as e:
self.section.parent
self.assertEqual(str(e.exception), "Section ID '200' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_parent(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.parent.id, 1)
self.section.parent = Section(self.mock_section_data[1])
self.assertEqual(self.section._content['parent_id'], 2)
self.assertEqual(self.section.parent.id, 2)
def test_set_parent_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.parent = 2
self.assertEqual(str(e.exception), 'input must be a Section')
@mock.patch('testrail.api.requests.get')
def test_set_parent_invalid_section(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
section = Section({})
section._content['id'] = 5
with self.assertRaises(TestRailError) as e:
self.section.parent = section
self.assertEqual(str(e.exception),
"Section ID '5' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_parent_empty_section(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.section.parent = Section({})
self.assertEqual(str(e.exception),
"Section ID 'None' was not found") | tests/test_section.py | import copy
import mock
try:
import unittest2 as unittest
except ImportError:
import unittest
from testrail.helper import TestRailError
from testrail.section import Section
from testrail.suite import Suite
class TestSuite(unittest.TestCase):
def setUp(self):
self.mock_suite_data = [
{
"description": "suite description",
"id": 1,
"name": "Setup & Installation",
"project_id": 1,
"url": "http://<server>/index.php?/suites/view/1",
"is_baseline": False,
"is_completed": True,
"is_master": True,
"completed_on": 1453504099
},
{
"description": "suite description 2",
"id": 2,
"name": "Setup & Installation",
"project_id": 1,
"url": "http://<server>/index.php?/suites/view/1",
"is_baseline": False,
"is_completed": False,
"is_master": True,
"completed_on": None
},
]
self.mock_section_data = [
{
"depth": 0,
"description": 'Some description',
"display_order": 1,
"id": 1,
"name": "Prerequisites",
"parent_id": None,
"suite_id": 1
},
{
"depth": 1,
"description": 'some words',
"display_order": 1,
"id": 2,
"name": "Prerequisites2",
"parent_id": 1,
"suite_id": 1
}
]
self.mock_project_data = [
{
"announcement": "..",
"completed_on": 1653504099,
"id": 1,
"is_completed": False,
"name": "Project1",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1",
"suite_mode": 3
},
{
"announcement": "..",
"completed_on": 1453504099,
"id": 2,
"is_completed": True,
"name": "Project2",
"show_announcement": True,
"url": "http://<server>/index.php?/projects/overview/1",
"suite_mode": 3
}
]
self.section = Section(self.mock_section_data[1])
def test_get_id_type(self):
self.assertEqual(type(self.section.id), int)
def test_get_id(self):
self.assertEqual(self.section.id, 2)
def test_get_depth_type(self):
self.assertEqual(type(self.section.depth), int)
def test_get_depth(self):
self.assertEqual(self.section.depth, 1)
def test_get_display_order_type(self):
self.assertEqual(type(self.section.display_order), int)
def test_get_display_order(self):
self.assertEqual(self.section.display_order, 1)
def test_get_description_type(self):
self.assertEqual(type(self.section.description), str)
def test_get_description(self):
self.assertEqual(self.section.description, 'some words')
def test_set_description(self):
description = 'new description'
self.section.description = description
self.assertEqual(self.section.description, description)
self.assertEqual(self.section._content['description'], description)
def test_set_description_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.description = 394
self.assertEqual(str(e.exception), 'input must be a string')
def test_get_name_type(self):
self.assertEqual(type(self.section.name), str)
def test_get_name(self):
self.assertEqual(self.section.name, 'Prerequisites2')
def test_set_name(self):
name = 'my new suite'
self.section.name = name
self.assertEqual(self.section.name, name)
self.assertEqual(self.section._content['name'], name)
def test_set_name_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.name = 394
self.assertEqual(str(e.exception), 'input must be a string')
def test_raw_data(self):
self.assertEqual(self.section.raw_data(), self.mock_section_data[1])
def test_raw_data_type(self):
self.assertEqual(type(self.section.raw_data()), dict)
@mock.patch('testrail.api.requests.get')
def test_get_suite_type(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(type(self.section.suite), Suite)
@mock.patch('testrail.api.requests.get')
def test_get_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.suite.id, 1)
@mock.patch('testrail.api.requests.get')
def test_get_suite_invalid_id(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.section._content['suite_id'] = 200
with self.assertRaises(TestRailError) as e:
self.section.suite
self.assertEqual(str(e.exception), "Suite ID '200' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.suite.id, 1)
self.section.suite = Suite(self.mock_suite_data[1])
self.assertEqual(self.section._content['suite_id'], 2)
self.assertEqual(self.section.suite.id, 2)
def test_set_suite_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.suite = 2
self.assertEqual(str(e.exception), 'input must be a Suite')
@mock.patch('testrail.api.requests.get')
def test_set_suite_invalid_suite(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_suite_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
suite = Suite()
suite._content['id'] = 5
with self.assertRaises(TestRailError) as e:
self.section.suite = suite
self.assertEqual(str(e.exception),
"Suite ID '5' was not found")
def test_set_suite_empty_suite(self):
s = Section({})
self.assertEqual(s.suite.id, None)
self.assertEqual(type(s.suite), Suite)
@mock.patch('testrail.api.requests.get')
def test_get_parent_type(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(type(self.section.parent), Section)
@mock.patch('testrail.api.requests.get')
def test_get_parent(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.parent.id, 1)
@mock.patch('testrail.api.requests.get')
def test_get_parent_invalid_id(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.section._content['parent_id'] = 200
with self.assertRaises(TestRailError) as e:
self.section.parent
self.assertEqual(str(e.exception), "Section ID '200' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_parent(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
self.assertEqual(self.section.parent.id, 1)
self.section.parent = Section(self.mock_section_data[1])
self.assertEqual(self.section._content['parent_id'], 2)
self.assertEqual(self.section.parent.id, 2)
def test_set_parent_invalid_type(self):
with self.assertRaises(TestRailError) as e:
self.section.parent = 2
self.assertEqual(str(e.exception), 'input must be a Section')
@mock.patch('testrail.api.requests.get')
def test_set_parent_invalid_section(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
section = Section({})
section._content['id'] = 5
with self.assertRaises(TestRailError) as e:
self.section.parent = section
self.assertEqual(str(e.exception),
"Section ID '5' was not found")
@mock.patch('testrail.api.requests.get')
def test_set_parent_empty_section(self, mock_get):
mock_response = mock.Mock()
mock_response.json.return_value = copy.deepcopy(self.mock_section_data)
mock_response.status_code = 200
mock_get.return_value = mock_response
with self.assertRaises(TestRailError) as e:
self.section.parent = Section({})
self.assertEqual(str(e.exception),
"Section ID 'None' was not found") | 0.512937 | 0.371222 |
import os
import sys
import numpy as np
from time import time
import argparse
from py_data_getter import data_getter
from py_db import db
db = db('nba_shots')
def initiate(start_year, end_year):
start_time = time()
print "-------------------------"
print "percentiles.py"
print '\n\ncalculating percentile baselines'
for year in range(start_year,end_year+1):
season_start = year
season_id = str(season_start)+str(season_start%100+1).zfill(2)[-2:]
process_percentiles(season_id)
print '\n\nupdating yearly percentiles'
update_yearly_percentiles()
print '\n\nupdating career percentiles'
update_career_percentiles()
end_time = time()
elapsed_time = float(end_time - start_time)
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
print "percentiles.py"
print "-------------------------"
def process_percentiles(season_id):
print season_id
for season_type in ('Reg', 'Post', 'Pre'):
for _type in ('Player', 'Team'):
qry = """SELECT %s_id, season_id,
efg_plus,
ROUND(attempts/games,1) AS volume,
ShotSkillPlus,
paa_per_game,
par_per_game
FROM shots_%s_relative_year
JOIN(
SELECT
%s_id, season_id, ROUND(sum_efg_plus/attempts,2) AS ShotSkillPlus
FROM(
SELECT %s_id, season_id, SUM(attempts*zone_efg_plus) AS sum_efg_plus
FROM shots_%s_Relative_Year r
WHERE season_type = '%s'
AND shot_zone_area = 'all'
AND shot_zone_basic != 'all'
AND season_id = %s
GROUP BY %s_id, season_id
) a
JOIN(
SELECT %s_id, season_id, attempts
FROM shots_%s_Relative_Year r
WHERE season_type = '%s'
AND shot_zone_area = 'all'
AND shot_zone_basic = 'all'
AND season_id = %s
GROUP BY %s_id, season_id
) b USING (%s_id, season_id)
) ShotSkill USING (%s_id, season_id)
WHERE season_type = '%s'
AND season_id = %s
AND shot_zone_basic = 'All'
AND games > 2
AND attempts > 25;"""
query = qry % (_type, _type, _type, _type, _type, season_type, season_id, _type, _type, _type, season_type, season_id, _type, _type, _type, season_type, season_id)
# raw_input(query)
res = db.query(query)
if res == ():
continue
EFGplus_list = []
AttemptsPerGame_list = []
shotSkillPlus_list = []
PAAperGame_list = []
PARperGame_list = []
for row in res:
foo, foo, EFGplus, AttemptsPerGame, shotSkillPlus, PAAperGame, PARperGame = row
EFGplus_list.append(float(EFGplus))
AttemptsPerGame_list.append(float(AttemptsPerGame))
shotSkillPlus_list.append(float(shotSkillPlus))
PAAperGame_list.append(float(PAAperGame))
PARperGame_list.append(float(PARperGame))
for cat in ('EFG', 'AttemptsPerGame', 'shotSkill', 'PAAperGame', 'PARperGame'):
entries = []
# print '\t', '('+str(len(res))+')', season_type, _type, cat
if cat == 'EFG':
arry = np.array(EFGplus_list)
elif cat == 'AttemptsPerGame':
arry = np.array(AttemptsPerGame_list)
elif cat == 'shotSkill':
arry = np.array(shotSkillPlus_list)
elif cat == 'PAAperGame':
arry = np.array(PAAperGame_list)
elif cat == 'PARperGame':
arry = np.array(PARperGame_list)
for i in range(0,101):
pv = np.percentile(arry, i)
percentile_value = np.percentile(arry, i)
# print _type, cat, i, percentile_value
entry = {'season_id':season_id, 'season_type':season_type, 'player_team':_type, 'category':cat, 'percentile':i, 'floor_value': percentile_value}
# print entry
entries.append(entry)
table = "percentile_baselines"
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
def update_yearly_percentiles():
for _type in ('Player', 'Team'):
print '\t', _type
entries = []
relative_qry = """SELECT
%s_id, season_id, season_type,
r.attempts, r.games,
r.attempts/r.games, efg_plus, paa/r.games, par/r.games, ShotSkillPlus
FROM shots_%s_relative_Year r
JOIN shot_skill_plus_%s_Year ss USING (%s_id, season_id, season_type)
WHERE shot_zone_basic = 'all'
AND season_type != 'AS';"""
relative_query = relative_qry % (_type, _type, _type, _type)
relative_res = db.query(relative_query)
for i, row in enumerate(relative_res):
entry = {}
_id, season_id, season_type, attempts, games, att_per_game, efg_plus, paa, par, ShotSkillPlus = row
id_key = _type+'_id'
entry[id_key] = _id
entry['season_id'] = season_id
entry['season_type'] = season_type
entry['games'] = games
entry['attempts'] = attempts
category_dict = {
'AttemptsPerGame': att_per_game,
'EFG': efg_plus,
'PAAperGame': paa,
'PARperGame': par,
'shotSkill': ShotSkillPlus,
}
for category, category_value in category_dict.items():
qry = """SELECT IFNULL(MIN(percentile),100)
FROM percentile_baselines
WHERE season_type = '%s'
AND player_team = '%s'
AND season_id = %s
AND category = '%s'
AND IFNULL(%s,0) <= floor_value;"""
query = qry % (season_type, _type, season_id, category, category_value)
# print query
category_percentile = db.query(query)[0][0]
category_key = category+'_percentile'
entry[category_key] = category_percentile
entries.append(entry)
table = "percentiles_%s_Year" % (_type)
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
# Career percentiles are WEIGHTED AVERAGE percentiles over the players career
def update_career_percentiles():
for _type in ('Player', 'Team'):
print '\t', _type
entries = []
qry = """SELECT
%s_id, ssp.season_id, season_type, SUM(attempts), SUM(games),
ROUND(SUM(AttemptsPerGame_percentile*games)/SUM(games),1),
ROUND(SUM(EFG_Percentile*attempts)/SUM(attempts),1),
ROUND(SUM(PAAperGame_percentile*attempts)/SUM(attempts),1),
ROUND(SUM(PARperGame_percentile*attempts)/SUM(attempts),1),
ROUND(SUM(shotSkill_Percentile*attempts)/SUM(attempts),1)
FROM percentiles_%s_Year
JOIN (SELECT %s_id, season_id, season_type FROM shot_skill_plus_%s_Career) ssp USING (%s_id, season_type)
GROUP By %s_id, season_type;"""
query = qry % (_type, _type, _type, _type, _type, _type)
res = db.query(query)
for i, row in enumerate(res):
entry = {}
_id, season_id, season_type, attempts, games, AttemptsPerGame_percentile, EFG_Percentile, PAAperGame_percentile, PARperGame_percentile, shotSkill_Percentile = row
id_key = _type+'_id'
entry[id_key] = _id
entry['season_id'] = season_id
entry['season_type'] = season_type
entry['games'] = games
entry['attempts'] = attempts
entry['AttemptsPerGame_percentile'] = AttemptsPerGame_percentile
entry['EFG_Percentile'] = EFG_Percentile
entry['PAAperGame_percentile'] = PAAperGame_percentile
entry['PARperGame_percentile'] = PAAperGame_percentile
entry['shotSkill_Percentile'] = shotSkill_Percentile
entries.append(entry)
table = "percentiles_%s_Career" % (_type)
db.query("TRUNCATE TABLE %s;" % (table))
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--start_year',type=int,default=1996)
parser.add_argument('--end_year',type=int,default=2018)
args = parser.parse_args()
initiate(args.start_year, args.end_year) | processing/percentiles.py | import os
import sys
import numpy as np
from time import time
import argparse
from py_data_getter import data_getter
from py_db import db
db = db('nba_shots')
def initiate(start_year, end_year):
start_time = time()
print "-------------------------"
print "percentiles.py"
print '\n\ncalculating percentile baselines'
for year in range(start_year,end_year+1):
season_start = year
season_id = str(season_start)+str(season_start%100+1).zfill(2)[-2:]
process_percentiles(season_id)
print '\n\nupdating yearly percentiles'
update_yearly_percentiles()
print '\n\nupdating career percentiles'
update_career_percentiles()
end_time = time()
elapsed_time = float(end_time - start_time)
print "time elapsed (in seconds): " + str(elapsed_time)
print "time elapsed (in minutes): " + str(elapsed_time/60.0)
print "percentiles.py"
print "-------------------------"
def process_percentiles(season_id):
print season_id
for season_type in ('Reg', 'Post', 'Pre'):
for _type in ('Player', 'Team'):
qry = """SELECT %s_id, season_id,
efg_plus,
ROUND(attempts/games,1) AS volume,
ShotSkillPlus,
paa_per_game,
par_per_game
FROM shots_%s_relative_year
JOIN(
SELECT
%s_id, season_id, ROUND(sum_efg_plus/attempts,2) AS ShotSkillPlus
FROM(
SELECT %s_id, season_id, SUM(attempts*zone_efg_plus) AS sum_efg_plus
FROM shots_%s_Relative_Year r
WHERE season_type = '%s'
AND shot_zone_area = 'all'
AND shot_zone_basic != 'all'
AND season_id = %s
GROUP BY %s_id, season_id
) a
JOIN(
SELECT %s_id, season_id, attempts
FROM shots_%s_Relative_Year r
WHERE season_type = '%s'
AND shot_zone_area = 'all'
AND shot_zone_basic = 'all'
AND season_id = %s
GROUP BY %s_id, season_id
) b USING (%s_id, season_id)
) ShotSkill USING (%s_id, season_id)
WHERE season_type = '%s'
AND season_id = %s
AND shot_zone_basic = 'All'
AND games > 2
AND attempts > 25;"""
query = qry % (_type, _type, _type, _type, _type, season_type, season_id, _type, _type, _type, season_type, season_id, _type, _type, _type, season_type, season_id)
# raw_input(query)
res = db.query(query)
if res == ():
continue
EFGplus_list = []
AttemptsPerGame_list = []
shotSkillPlus_list = []
PAAperGame_list = []
PARperGame_list = []
for row in res:
foo, foo, EFGplus, AttemptsPerGame, shotSkillPlus, PAAperGame, PARperGame = row
EFGplus_list.append(float(EFGplus))
AttemptsPerGame_list.append(float(AttemptsPerGame))
shotSkillPlus_list.append(float(shotSkillPlus))
PAAperGame_list.append(float(PAAperGame))
PARperGame_list.append(float(PARperGame))
for cat in ('EFG', 'AttemptsPerGame', 'shotSkill', 'PAAperGame', 'PARperGame'):
entries = []
# print '\t', '('+str(len(res))+')', season_type, _type, cat
if cat == 'EFG':
arry = np.array(EFGplus_list)
elif cat == 'AttemptsPerGame':
arry = np.array(AttemptsPerGame_list)
elif cat == 'shotSkill':
arry = np.array(shotSkillPlus_list)
elif cat == 'PAAperGame':
arry = np.array(PAAperGame_list)
elif cat == 'PARperGame':
arry = np.array(PARperGame_list)
for i in range(0,101):
pv = np.percentile(arry, i)
percentile_value = np.percentile(arry, i)
# print _type, cat, i, percentile_value
entry = {'season_id':season_id, 'season_type':season_type, 'player_team':_type, 'category':cat, 'percentile':i, 'floor_value': percentile_value}
# print entry
entries.append(entry)
table = "percentile_baselines"
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
def update_yearly_percentiles():
for _type in ('Player', 'Team'):
print '\t', _type
entries = []
relative_qry = """SELECT
%s_id, season_id, season_type,
r.attempts, r.games,
r.attempts/r.games, efg_plus, paa/r.games, par/r.games, ShotSkillPlus
FROM shots_%s_relative_Year r
JOIN shot_skill_plus_%s_Year ss USING (%s_id, season_id, season_type)
WHERE shot_zone_basic = 'all'
AND season_type != 'AS';"""
relative_query = relative_qry % (_type, _type, _type, _type)
relative_res = db.query(relative_query)
for i, row in enumerate(relative_res):
entry = {}
_id, season_id, season_type, attempts, games, att_per_game, efg_plus, paa, par, ShotSkillPlus = row
id_key = _type+'_id'
entry[id_key] = _id
entry['season_id'] = season_id
entry['season_type'] = season_type
entry['games'] = games
entry['attempts'] = attempts
category_dict = {
'AttemptsPerGame': att_per_game,
'EFG': efg_plus,
'PAAperGame': paa,
'PARperGame': par,
'shotSkill': ShotSkillPlus,
}
for category, category_value in category_dict.items():
qry = """SELECT IFNULL(MIN(percentile),100)
FROM percentile_baselines
WHERE season_type = '%s'
AND player_team = '%s'
AND season_id = %s
AND category = '%s'
AND IFNULL(%s,0) <= floor_value;"""
query = qry % (season_type, _type, season_id, category, category_value)
# print query
category_percentile = db.query(query)[0][0]
category_key = category+'_percentile'
entry[category_key] = category_percentile
entries.append(entry)
table = "percentiles_%s_Year" % (_type)
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
# Career percentiles are WEIGHTED AVERAGE percentiles over the players career
def update_career_percentiles():
for _type in ('Player', 'Team'):
print '\t', _type
entries = []
qry = """SELECT
%s_id, ssp.season_id, season_type, SUM(attempts), SUM(games),
ROUND(SUM(AttemptsPerGame_percentile*games)/SUM(games),1),
ROUND(SUM(EFG_Percentile*attempts)/SUM(attempts),1),
ROUND(SUM(PAAperGame_percentile*attempts)/SUM(attempts),1),
ROUND(SUM(PARperGame_percentile*attempts)/SUM(attempts),1),
ROUND(SUM(shotSkill_Percentile*attempts)/SUM(attempts),1)
FROM percentiles_%s_Year
JOIN (SELECT %s_id, season_id, season_type FROM shot_skill_plus_%s_Career) ssp USING (%s_id, season_type)
GROUP By %s_id, season_type;"""
query = qry % (_type, _type, _type, _type, _type, _type)
res = db.query(query)
for i, row in enumerate(res):
entry = {}
_id, season_id, season_type, attempts, games, AttemptsPerGame_percentile, EFG_Percentile, PAAperGame_percentile, PARperGame_percentile, shotSkill_Percentile = row
id_key = _type+'_id'
entry[id_key] = _id
entry['season_id'] = season_id
entry['season_type'] = season_type
entry['games'] = games
entry['attempts'] = attempts
entry['AttemptsPerGame_percentile'] = AttemptsPerGame_percentile
entry['EFG_Percentile'] = EFG_Percentile
entry['PAAperGame_percentile'] = PAAperGame_percentile
entry['PARperGame_percentile'] = PAAperGame_percentile
entry['shotSkill_Percentile'] = shotSkill_Percentile
entries.append(entry)
table = "percentiles_%s_Career" % (_type)
db.query("TRUNCATE TABLE %s;" % (table))
if entries != []:
for i in range(0, len(entries), 1000):
db.insertRowDict(entries[i: i + 1000], table, insertMany=True, replace=True, rid=0,debug=1)
db.conn.commit()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--start_year',type=int,default=1996)
parser.add_argument('--end_year',type=int,default=2018)
args = parser.parse_args()
initiate(args.start_year, args.end_year) | 0.087692 | 0.139075 |
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import SimpleRNN, Dense, Activation
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(path="ibdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
print("Type: ", type(X_train))
print("Type: ", type(Y_train))
print("X train shape: ", X_train.shape)
print("Y train shape: ", Y_train.shape)
# %% EDA
print("Y train values: ", np.unique(Y_train))
print("Y test values: ", np.unique(Y_test))
unique, counts = np.unique(Y_train, return_counts=True)
print("Y train distribution: ", dict(zip(unique, counts)))
unique, counts = np.unique(Y_test, return_counts=True)
print("Y testdistribution: ", dict(zip(unique, counts)))
plt.figure()
sns.countplot(Y_train)
plt.xlabel("Classes")
plt.ylabel("Freq")
plt.title("Y train")
plt.figure()
sns.countplot(Y_test)
plt.xlabel("Classes")
plt.ylabel("Freq")
plt.title("Y test")
d = X_train[0]
print(d)
print(len(d))
review_len_train = []
review_len_test = []
for i, ii in zip(X_train, X_test):
review_len_train.append(len(i))
review_len_test.append(len(ii))
sns.distplot(review_len_train, hist_kws={"alpha": 0.3})
sns.distplot(review_len_test, hist_kws={"alpha": 0.3})
print("Train mean:", np.mean(review_len_train))
print("Train median:", np.median(review_len_train))
print("Train mode:", stats.mode(review_len_train))
# number of words
word_index = imdb.get_word_index()
print(type(word_index))
print(len(word_index))
for keys, values in word_index.items():
if values == 22:
print(keys)
def whatItSay(index=24):
reverse_index = dict([(value, key) for (key, value) in word_index.items()])
decode_review = " ".join([reverse_index.get(i - 3, "!") for i in X_train[index]])
print(decode_review)
print(Y_train[index])
return decode_review
decoded_review = whatItSay(36)
#Preprocess
num_words = 15000
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=num_words)
maxlen = 130
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
print(X_train[5])
for i in X_train[0:10]:
print(len(i))
decoded_review = whatItSay(5) | preprocessing.py | import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from keras.datasets import imdb
from keras.preprocessing.sequence import pad_sequences
from keras.models import Sequential
from keras.layers.embeddings import Embedding
from keras.layers import SimpleRNN, Dense, Activation
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(path="ibdb.npz",
num_words=None,
skip_top=0,
maxlen=None,
seed=113,
start_char=1,
oov_char=2,
index_from=3)
print("Type: ", type(X_train))
print("Type: ", type(Y_train))
print("X train shape: ", X_train.shape)
print("Y train shape: ", Y_train.shape)
# %% EDA
print("Y train values: ", np.unique(Y_train))
print("Y test values: ", np.unique(Y_test))
unique, counts = np.unique(Y_train, return_counts=True)
print("Y train distribution: ", dict(zip(unique, counts)))
unique, counts = np.unique(Y_test, return_counts=True)
print("Y testdistribution: ", dict(zip(unique, counts)))
plt.figure()
sns.countplot(Y_train)
plt.xlabel("Classes")
plt.ylabel("Freq")
plt.title("Y train")
plt.figure()
sns.countplot(Y_test)
plt.xlabel("Classes")
plt.ylabel("Freq")
plt.title("Y test")
d = X_train[0]
print(d)
print(len(d))
review_len_train = []
review_len_test = []
for i, ii in zip(X_train, X_test):
review_len_train.append(len(i))
review_len_test.append(len(ii))
sns.distplot(review_len_train, hist_kws={"alpha": 0.3})
sns.distplot(review_len_test, hist_kws={"alpha": 0.3})
print("Train mean:", np.mean(review_len_train))
print("Train median:", np.median(review_len_train))
print("Train mode:", stats.mode(review_len_train))
# number of words
word_index = imdb.get_word_index()
print(type(word_index))
print(len(word_index))
for keys, values in word_index.items():
if values == 22:
print(keys)
def whatItSay(index=24):
reverse_index = dict([(value, key) for (key, value) in word_index.items()])
decode_review = " ".join([reverse_index.get(i - 3, "!") for i in X_train[index]])
print(decode_review)
print(Y_train[index])
return decode_review
decoded_review = whatItSay(36)
#Preprocess
num_words = 15000
(X_train, Y_train), (X_test, Y_test) = imdb.load_data(num_words=num_words)
maxlen = 130
X_train = pad_sequences(X_train, maxlen=maxlen)
X_test = pad_sequences(X_test, maxlen=maxlen)
print(X_train[5])
for i in X_train[0:10]:
print(len(i))
decoded_review = whatItSay(5) | 0.387227 | 0.307657 |
import os
import time
from abc import abstractmethod
from . import helpers
from .config import client
class ClientTrader:
def __init__(self):
self._config = client.create(self.broker_type)
def prepare(self, config_path=None, user=None, password=None, exe_path=None, comm_password=None,
**kwargs):
"""
登陆客户端
:param config_path: 登陆配置文件,跟参数登陆方式二选一
:param user: 账号
:param password: <PASSWORD>
:param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe'
:param comm_password: <PASSWORD>
:return:
"""
if config_path is not None:
account = helpers.file2dict(config_path)
user = account['user']
password = account['password']
self.login(user, password, exe_path or self._config.DEFAULT_EXE_PATH, comm_password, **kwargs)
@abstractmethod
def login(self, user, password, exe_path, comm_password=None, **kwargs):
pass
@property
@abstractmethod
def broker_type(self):
pass
@property
@abstractmethod
def balance(self):
pass
@property
@abstractmethod
def position(self):
pass
@property
@abstractmethod
def cancel_entrusts(self):
pass
@property
@abstractmethod
def today_entrusts(self):
pass
@property
@abstractmethod
def today_trades(self):
pass
@abstractmethod
def cancel_entrust(self, entrust_no):
pass
@abstractmethod
def buy(self, security, price, amount, **kwargs):
pass
@abstractmethod
def sell(self, security, price, amount, **kwargs):
pass
def auto_ipo(self):
raise NotImplementedError
def _run_exe_path(self, exe_path):
return os.path.join(
os.path.dirname(exe_path), 'xiadan.exe'
)
def _wait(self, seconds):
time.sleep(seconds)
def exit(self):
self._app.kill()
def _close_prompt_windows(self):
self._wait(1)
for w in self._app.windows(class_name='#32770'):
if w.window_text() != self._config.TITLE:
w.close()
self._wait(1) | Stock/Trade/Broker/YhNew/clienttrader.py |
import os
import time
from abc import abstractmethod
from . import helpers
from .config import client
class ClientTrader:
def __init__(self):
self._config = client.create(self.broker_type)
def prepare(self, config_path=None, user=None, password=None, exe_path=None, comm_password=None,
**kwargs):
"""
登陆客户端
:param config_path: 登陆配置文件,跟参数登陆方式二选一
:param user: 账号
:param password: <PASSWORD>
:param exe_path: 客户端路径类似 r'C:\\htzqzyb2\\xiadan.exe', 默认 r'C:\\htzqzyb2\\xiadan.exe'
:param comm_password: <PASSWORD>
:return:
"""
if config_path is not None:
account = helpers.file2dict(config_path)
user = account['user']
password = account['password']
self.login(user, password, exe_path or self._config.DEFAULT_EXE_PATH, comm_password, **kwargs)
@abstractmethod
def login(self, user, password, exe_path, comm_password=None, **kwargs):
pass
@property
@abstractmethod
def broker_type(self):
pass
@property
@abstractmethod
def balance(self):
pass
@property
@abstractmethod
def position(self):
pass
@property
@abstractmethod
def cancel_entrusts(self):
pass
@property
@abstractmethod
def today_entrusts(self):
pass
@property
@abstractmethod
def today_trades(self):
pass
@abstractmethod
def cancel_entrust(self, entrust_no):
pass
@abstractmethod
def buy(self, security, price, amount, **kwargs):
pass
@abstractmethod
def sell(self, security, price, amount, **kwargs):
pass
def auto_ipo(self):
raise NotImplementedError
def _run_exe_path(self, exe_path):
return os.path.join(
os.path.dirname(exe_path), 'xiadan.exe'
)
def _wait(self, seconds):
time.sleep(seconds)
def exit(self):
self._app.kill()
def _close_prompt_windows(self):
self._wait(1)
for w in self._app.windows(class_name='#32770'):
if w.window_text() != self._config.TITLE:
w.close()
self._wait(1) | 0.44746 | 0.089614 |
import functools
import os
import pathlib
import sys
import pytest
import typer
from _pytest.logging import LogCaptureFixture
from loguru import logger
@pytest.fixture(scope="session")
def tmp_cwd(tmp_path_factory):
cwd = pathlib.Path.cwd()
tmp_wd = tmp_path_factory.mktemp("bucky_integration")
os.chdir(tmp_wd)
yield
os.chdir(cwd)
@pytest.fixture
def cli():
"""Yield a click.testing.CliRunner to invoke the CLI."""
class_ = typer.testing.CliRunner
def invoke_wrapper(f):
"""Augment CliRunner.invoke to emit its output to stdout.
This enables pytest to show the output in its logs on test
failures.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
echo = kwargs.pop("echo", False)
result = f(*args, **kwargs)
if echo is True:
sys.stdout.write(result.stdout)
sys.stderr.write(result.stderr)
return result
return wrapper
class_.invoke = invoke_wrapper(class_.invoke)
cli_runner = class_(mix_stderr=False)
yield cli_runner
# Have pytest caplog capture loguru output
@pytest.fixture
def caplog(caplog: LogCaptureFixture):
handler_id = logger.add(caplog.handler, format="{message}")
yield caplog
logger.remove(handler_id)
# Add integration test marker that only runs with --integration cli arg
def pytest_addoption(parser):
parser.addoption("--integration", action="store_true", default=False, help="run integration tests")
def pytest_configure(config):
config.addinivalue_line("markers", "integration: mark integrations tests to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--integration"):
# --runslow given in cli: do not skip slow tests
return
skip_integ = pytest.mark.skip(reason="need --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integ) | tests/conftest.py | import functools
import os
import pathlib
import sys
import pytest
import typer
from _pytest.logging import LogCaptureFixture
from loguru import logger
@pytest.fixture(scope="session")
def tmp_cwd(tmp_path_factory):
cwd = pathlib.Path.cwd()
tmp_wd = tmp_path_factory.mktemp("bucky_integration")
os.chdir(tmp_wd)
yield
os.chdir(cwd)
@pytest.fixture
def cli():
"""Yield a click.testing.CliRunner to invoke the CLI."""
class_ = typer.testing.CliRunner
def invoke_wrapper(f):
"""Augment CliRunner.invoke to emit its output to stdout.
This enables pytest to show the output in its logs on test
failures.
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
echo = kwargs.pop("echo", False)
result = f(*args, **kwargs)
if echo is True:
sys.stdout.write(result.stdout)
sys.stderr.write(result.stderr)
return result
return wrapper
class_.invoke = invoke_wrapper(class_.invoke)
cli_runner = class_(mix_stderr=False)
yield cli_runner
# Have pytest caplog capture loguru output
@pytest.fixture
def caplog(caplog: LogCaptureFixture):
handler_id = logger.add(caplog.handler, format="{message}")
yield caplog
logger.remove(handler_id)
# Add integration test marker that only runs with --integration cli arg
def pytest_addoption(parser):
parser.addoption("--integration", action="store_true", default=False, help="run integration tests")
def pytest_configure(config):
config.addinivalue_line("markers", "integration: mark integrations tests to run")
def pytest_collection_modifyitems(config, items):
if config.getoption("--integration"):
# --runslow given in cli: do not skip slow tests
return
skip_integ = pytest.mark.skip(reason="need --integration option to run")
for item in items:
if "integration" in item.keywords:
item.add_marker(skip_integ) | 0.379608 | 0.181807 |
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__date__ = "08 Mar 2016"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.raycing.materials as rm
import BalderBL
showIn3D = False
BalderBL.showIn3D = showIn3D
stripe = 'Si'
E0 = 9000
dE = 4
si111_1 = rm.CrystalSi(hkl=(1, 1, 1), tK=-171+273.15)
si111_2 = rm.CrystalSi(hkl=(1, 1, 1), tK=-140+273.15)
si311_1 = rm.CrystalSi(hkl=(3, 1, 1), tK=-171+273.15)
si311_2 = rm.CrystalSi(hkl=(3, 1, 1), tK=-140+273.15)
def define_plots():
plots = []
plot = xrtp.XYCPlot(
'beamFSMDCM', (1,),
xaxis=xrtp.XYCAxis(r'$x$', 'mm'), yaxis=xrtp.XYCAxis(r'$z$', 'mm'),
caxis=xrtp.XYCAxis('energy', 'eV'), title='DCM')
plot.xaxis.limits = [-7., 7.]
plot.yaxis.limits = [38.1-7., 38.1+7.]
plot.fluxFormatStr = '%.1p'
plot.textPanel = plot.fig.text(0.88, 0.8, '',
transform=plot.fig.transFigure, size=14,
color='r', ha='center')
plots.append(plot)
for plot in plots:
plot.caxis.limits = [E0 - dE, E0 + dE]
plot.caxis.offset = E0
return plots
def plot_generator(plots, beamLine):
energies = np.linspace(E0 - dE*0.66, E0 + dE*0.66, 7)
# crystals = 'Si111', 'Si311'
crystals = 'Si111',
for crystal in crystals:
if crystal == 'Si111':
beamLine.dcm.surface = crystal,
beamLine.dcm.material = si111_1,
beamLine.dcm.material2 = si111_2,
elif crystal == 'Si311':
beamLine.dcm.surface = crystal,
beamLine.dcm.material = si311_1,
beamLine.dcm.material2 = si311_2,
for energy in energies:
BalderBL.align_beamline(beamLine, energy=energy)
thetaDeg = np.degrees(
beamLine.dcm.bragg - 2*beamLine.vcm.pitch)
baseName = '{0}_{1:.0f}.png'.format(crystal, thetaDeg*1e4)
for plot in plots:
plot.saveName = baseName + '.png'
# plot.persistentName = baseName + '.pickle'
if hasattr(plot, 'textPanel'):
plot.textPanel.set_text(
'{0}\n$\\theta$ = {1:.3f}$^o$'.format(
crystal, thetaDeg))
if showIn3D:
beamLine.glowFrameName = baseName + '.jpg'
yield
def main():
myBalder = BalderBL.build_beamline(
stripe=stripe, eMinRays=E0-dE, eMaxRays=E0+dE)
if showIn3D:
myBalder.glow(centerAt='VFM', startFrom=7,
generator=plot_generator, generatorArgs=[[], myBalder])
return
plots = define_plots()
xrtr.run_ray_tracing(plots, repeats=16, generator=plot_generator,
beamLine=myBalder, globalNorm=True, processes='half')
if __name__ == '__main__':
main() | examples/withRaycing/02_Balder_BL/traceDCMBalderBL.py | # -*- coding: utf-8 -*-
__author__ = "<NAME>"
__date__ = "08 Mar 2016"
import os, sys; sys.path.append(os.path.join('..', '..', '..')) # analysis:ignore
import numpy as np
import xrt.plotter as xrtp
import xrt.runner as xrtr
import xrt.backends.raycing.materials as rm
import BalderBL
showIn3D = False
BalderBL.showIn3D = showIn3D
stripe = 'Si'
E0 = 9000
dE = 4
si111_1 = rm.CrystalSi(hkl=(1, 1, 1), tK=-171+273.15)
si111_2 = rm.CrystalSi(hkl=(1, 1, 1), tK=-140+273.15)
si311_1 = rm.CrystalSi(hkl=(3, 1, 1), tK=-171+273.15)
si311_2 = rm.CrystalSi(hkl=(3, 1, 1), tK=-140+273.15)
def define_plots():
plots = []
plot = xrtp.XYCPlot(
'beamFSMDCM', (1,),
xaxis=xrtp.XYCAxis(r'$x$', 'mm'), yaxis=xrtp.XYCAxis(r'$z$', 'mm'),
caxis=xrtp.XYCAxis('energy', 'eV'), title='DCM')
plot.xaxis.limits = [-7., 7.]
plot.yaxis.limits = [38.1-7., 38.1+7.]
plot.fluxFormatStr = '%.1p'
plot.textPanel = plot.fig.text(0.88, 0.8, '',
transform=plot.fig.transFigure, size=14,
color='r', ha='center')
plots.append(plot)
for plot in plots:
plot.caxis.limits = [E0 - dE, E0 + dE]
plot.caxis.offset = E0
return plots
def plot_generator(plots, beamLine):
energies = np.linspace(E0 - dE*0.66, E0 + dE*0.66, 7)
# crystals = 'Si111', 'Si311'
crystals = 'Si111',
for crystal in crystals:
if crystal == 'Si111':
beamLine.dcm.surface = crystal,
beamLine.dcm.material = si111_1,
beamLine.dcm.material2 = si111_2,
elif crystal == 'Si311':
beamLine.dcm.surface = crystal,
beamLine.dcm.material = si311_1,
beamLine.dcm.material2 = si311_2,
for energy in energies:
BalderBL.align_beamline(beamLine, energy=energy)
thetaDeg = np.degrees(
beamLine.dcm.bragg - 2*beamLine.vcm.pitch)
baseName = '{0}_{1:.0f}.png'.format(crystal, thetaDeg*1e4)
for plot in plots:
plot.saveName = baseName + '.png'
# plot.persistentName = baseName + '.pickle'
if hasattr(plot, 'textPanel'):
plot.textPanel.set_text(
'{0}\n$\\theta$ = {1:.3f}$^o$'.format(
crystal, thetaDeg))
if showIn3D:
beamLine.glowFrameName = baseName + '.jpg'
yield
def main():
myBalder = BalderBL.build_beamline(
stripe=stripe, eMinRays=E0-dE, eMaxRays=E0+dE)
if showIn3D:
myBalder.glow(centerAt='VFM', startFrom=7,
generator=plot_generator, generatorArgs=[[], myBalder])
return
plots = define_plots()
xrtr.run_ray_tracing(plots, repeats=16, generator=plot_generator,
beamLine=myBalder, globalNorm=True, processes='half')
if __name__ == '__main__':
main() | 0.38445 | 0.267378 |
import sys
import argparse
from argparse import ArgumentParser
import logging
import easywrk
from easywrk.commands import help_command, request_command, run_command, view_config_command
from easywrk.commands import list_command, init_command
from easywrk.commands import register_cmd_help
def setup_config_argparse(parser:ArgumentParser):
parser.add_argument(
"-c", "--config",
dest="config_file",
default="./easywrk.toml",
help="Location of the config file, default is easywrk.toml in current working directory."
)
parser.add_argument(
"-f", "--file",
dest="env_file",
default="./.env",
help="Location of the .env file, defaults to .env file in current working directory."
)
def setup_argparse():
parser = argparse.ArgumentParser(prog='easywrk')
parser.add_argument(
"-V", "--version",
dest="show_version",
action="store_true",
help="show version number and quit"
)
subparsers = parser.add_subparsers(
title='These are common easywrk commands used in various situations',
metavar='command')
# help command
name = "help"
help_parser = subparsers.add_parser(
name,
help="print command help"
)
help_parser.set_defaults(handle=help_command)
register_cmd_help(name, help_parser)
help_parser.add_argument(
"name", nargs=1,
help="command name"
)
# init command
name = "init"
init_parser = subparsers.add_parser(
name,
help="create config file in current directory"
)
init_parser.set_defaults(handle=init_command)
register_cmd_help(name, init_parser)
# run command
name = "run"
run_parser = subparsers.add_parser(
name,
help="run single benchmark api"
)
run_parser.set_defaults(handle=run_command)
register_cmd_help(name, run_parser)
run_parser.add_argument(
"name", nargs=1,
help="api name"
)
setup_config_argparse(run_parser)
run_parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
default=False,
help="use request mock to response data and do not call wrk tool"
)
run_parser.add_argument(
"--no-print-response-body",
dest="print_response_body",
action="store_false",
default=False,
help="do not print response body"
)
run_parser.add_argument(
"--print-response-body",
dest="print_response_body",
action="store_true",
help="print response body"
)
run_parser.add_argument(
"--no-print-request-body",
dest="print_request_body",
action="store_false",
default=False,
help="do not print request body"
)
run_parser.add_argument(
"--print-request-body",
dest="print_request_body",
action="store_true",
help="print request body"
)
# request command
name = "request"
request_parser = subparsers.add_parser(
name,
help="request single api"
)
request_parser.set_defaults(handle=request_command)
register_cmd_help(name, request_parser)
request_parser.add_argument(
"name", nargs=1,
help="api name"
)
setup_config_argparse(request_parser)
request_parser.add_argument(
"--no-print-response-body",
dest="print_response_body",
action="store_false",
default=True,
help="do not print response body"
)
request_parser.add_argument(
"--print-response-body",
dest="print_response_body",
action="store_true",
help="print response body"
)
request_parser.add_argument(
"--no-print-request-body",
dest="print_request_body",
action="store_false",
default=True,
help="do not print request body"
)
request_parser.add_argument(
"--print-request-body",
dest="print_request_body",
action="store_true",
help="print request body"
)
request_parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
default=False,
help="use request mock to response data"
)
# view-config command
name = "view-config"
view_config_parser = subparsers.add_parser(
name,
help="view config file"
)
view_config_parser.set_defaults(handle=view_config_command)
register_cmd_help(name, view_config_parser)
setup_config_argparse(view_config_parser)
# list command
name = "list"
list_parser = subparsers.add_parser(
name,
help="list all api name and desc"
)
list_parser.set_defaults(handle = list_command)
register_cmd_help(name, list_parser)
setup_config_argparse(list_parser)
return parser
def cli(argv, other_argv):
logging.basicConfig(level=logging.INFO, format="%(message)s")
parser = setup_argparse()
args = parser.parse_args(argv)
if args.show_version:
print("easywrk %s" % easywrk.__version__)
return
if hasattr(args, 'handle'):
args.handle(args, other_argv)
else:
parser.print_help()
def main():
argv = sys.argv[1:]
other_argv = None
index = None
for i, arg in enumerate(argv):
if arg == '--':
index = i
break
if index is not None:
other_argv = argv[index+1:]
argv = argv[:index]
cli(argv, other_argv)
if __name__ == '__main__':
main() | easywrk/cli.py |
import sys
import argparse
from argparse import ArgumentParser
import logging
import easywrk
from easywrk.commands import help_command, request_command, run_command, view_config_command
from easywrk.commands import list_command, init_command
from easywrk.commands import register_cmd_help
def setup_config_argparse(parser:ArgumentParser):
parser.add_argument(
"-c", "--config",
dest="config_file",
default="./easywrk.toml",
help="Location of the config file, default is easywrk.toml in current working directory."
)
parser.add_argument(
"-f", "--file",
dest="env_file",
default="./.env",
help="Location of the .env file, defaults to .env file in current working directory."
)
def setup_argparse():
parser = argparse.ArgumentParser(prog='easywrk')
parser.add_argument(
"-V", "--version",
dest="show_version",
action="store_true",
help="show version number and quit"
)
subparsers = parser.add_subparsers(
title='These are common easywrk commands used in various situations',
metavar='command')
# help command
name = "help"
help_parser = subparsers.add_parser(
name,
help="print command help"
)
help_parser.set_defaults(handle=help_command)
register_cmd_help(name, help_parser)
help_parser.add_argument(
"name", nargs=1,
help="command name"
)
# init command
name = "init"
init_parser = subparsers.add_parser(
name,
help="create config file in current directory"
)
init_parser.set_defaults(handle=init_command)
register_cmd_help(name, init_parser)
# run command
name = "run"
run_parser = subparsers.add_parser(
name,
help="run single benchmark api"
)
run_parser.set_defaults(handle=run_command)
register_cmd_help(name, run_parser)
run_parser.add_argument(
"name", nargs=1,
help="api name"
)
setup_config_argparse(run_parser)
run_parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
default=False,
help="use request mock to response data and do not call wrk tool"
)
run_parser.add_argument(
"--no-print-response-body",
dest="print_response_body",
action="store_false",
default=False,
help="do not print response body"
)
run_parser.add_argument(
"--print-response-body",
dest="print_response_body",
action="store_true",
help="print response body"
)
run_parser.add_argument(
"--no-print-request-body",
dest="print_request_body",
action="store_false",
default=False,
help="do not print request body"
)
run_parser.add_argument(
"--print-request-body",
dest="print_request_body",
action="store_true",
help="print request body"
)
# request command
name = "request"
request_parser = subparsers.add_parser(
name,
help="request single api"
)
request_parser.set_defaults(handle=request_command)
register_cmd_help(name, request_parser)
request_parser.add_argument(
"name", nargs=1,
help="api name"
)
setup_config_argparse(request_parser)
request_parser.add_argument(
"--no-print-response-body",
dest="print_response_body",
action="store_false",
default=True,
help="do not print response body"
)
request_parser.add_argument(
"--print-response-body",
dest="print_response_body",
action="store_true",
help="print response body"
)
request_parser.add_argument(
"--no-print-request-body",
dest="print_request_body",
action="store_false",
default=True,
help="do not print request body"
)
request_parser.add_argument(
"--print-request-body",
dest="print_request_body",
action="store_true",
help="print request body"
)
request_parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
default=False,
help="use request mock to response data"
)
# view-config command
name = "view-config"
view_config_parser = subparsers.add_parser(
name,
help="view config file"
)
view_config_parser.set_defaults(handle=view_config_command)
register_cmd_help(name, view_config_parser)
setup_config_argparse(view_config_parser)
# list command
name = "list"
list_parser = subparsers.add_parser(
name,
help="list all api name and desc"
)
list_parser.set_defaults(handle = list_command)
register_cmd_help(name, list_parser)
setup_config_argparse(list_parser)
return parser
def cli(argv, other_argv):
logging.basicConfig(level=logging.INFO, format="%(message)s")
parser = setup_argparse()
args = parser.parse_args(argv)
if args.show_version:
print("easywrk %s" % easywrk.__version__)
return
if hasattr(args, 'handle'):
args.handle(args, other_argv)
else:
parser.print_help()
def main():
argv = sys.argv[1:]
other_argv = None
index = None
for i, arg in enumerate(argv):
if arg == '--':
index = i
break
if index is not None:
other_argv = argv[index+1:]
argv = argv[:index]
cli(argv, other_argv)
if __name__ == '__main__':
main() | 0.299822 | 0.07333 |
import itertools
from collections import UserDict
from operator import itemgetter
from typing import Dict, Iterator, Set, Tuple, TypeVar, Union, cast
from pyproj import Proj, Transformer
from shapely.geometry import LineString, MultiLineString, MultiPolygon, Polygon
from shapely.geometry.base import BaseGeometry
from shapely.ops import linemerge, transform, unary_union
from ....utils.cache import cached_property
from ....utils.geometry import reorient
from .. import Overpass
from ..core import Relation
class RelationsDict(UserDict):
def __missing__(self, key):
value = self[key] = list()
return value
def include(self, chunk: BaseGeometry, role: str):
if isinstance(chunk, MultiLineString):
for c in chunk:
self[role].append(c)
else:
self[role].append(chunk)
T = TypeVar("T", bound="Boundary")
class Boundary(Relation):
"""A class to parse boundary=* relations.
boundary=* relations are used for grouping boundaries and marking
enclaves/exclaves.
Reference: https://wiki.openstreetmap.org/wiki/Relation:boundary
Tags:
- type (boundary)
- boundary (administrative)
- land_area (administrative)
- name
- admin_level
Relation members:
- outer 1+
- inner 0+
- admin_center 0-1
- label 0-1
- subarea 0+
"""
def __init__(self, json):
super().__init__(json)
self.known_chunks = RelationsDict()
self.parent: Overpass = json["_parent"]
self.parsed_keys = cast(
Dict[int, Dict[str, Union[int, str]]],
dict(
(elt["ref"], elt)
for elt in self.parent.all_members[self.json["id_"]]
),
)
self._build_geometry_parts()
self._make_geometry(self.known_chunks)
def simplify(
self: T,
resolution: float,
bounds: Union[None, Tuple[float, float, float, float]] = None,
) -> T:
if bounds is None:
bounds = self.parent.bounds
b0, b1, b2, b3 = bounds
def simplify_shape(shape_: BaseGeometry) -> BaseGeometry:
proj = Proj(
proj="aea", # equivalent projection
lat_1=b1,
lat_2=b3,
lat_0=(b1 + b3) / 2,
lon_0=(b0 + b2) / 2,
)
forward = Transformer.from_proj(
Proj("EPSG:4326"), proj, always_xy=True
)
backward = Transformer.from_proj(
proj, Proj("EPSG:4326"), always_xy=True
)
return transform(
backward.transform,
transform(forward.transform, shape_).simplify(resolution),
)
rel_dict = RelationsDict()
for role, shapes in self.known_chunks.items():
for elt in shapes:
rel_dict.include(simplify_shape(elt), role)
self._make_geometry(rel_dict)
return self
def _make_geometry(self, parts: RelationsDict):
outer = linemerge(parts["outer"])
inner = linemerge(parts["inner"])
if isinstance(outer, MultiLineString):
if isinstance(inner, MultiLineString):
list_ = [
Polygon(
o,
holes=list(i for i in inner if Polygon(o).contains(i)),
)
for o in outer
if len(o.coords) > 2
]
shape = MultiPolygon(list_)
else:
list_ = [
Polygon(
o,
holes=[inner] if Polygon(o).contains(inner) else None,
)
for o in outer
if len(o.coords) > 2
]
shape = MultiPolygon(list_)
else:
if isinstance(inner, LineString):
shape = Polygon(outer, [inner])
else:
shape = Polygon(outer, inner)
self.shape = reorient(shape)
self.json["geometry"] = self.shape
def intersections(self) -> Iterator[Tuple[int, Set[int]]]:
all_sets: Dict[int, Set[int]] = dict(
(key, set(e["ref"] for e in list_))
for key, list_ in self.parent.all_members.items()
)
key_i = self.json["id_"]
set_i = all_sets[key_i]
for key_j, set_j in all_sets.items():
if key_i == key_j:
continue
intersection = set_i.intersection(set_j)
if len(intersection) > 0:
yield key_j, intersection
@cached_property
def neighbours(self) -> Set[int]:
return set(key for key, _value in self.intersections())
def include(self, elements: Set[int]) -> None:
for role, it in itertools.groupby(
(self.parsed_keys[key] for key in elements), key=itemgetter("role")
):
elts = list(elt["geometry"] for elt in it)
chunk = unary_union(elts)
self.known_chunks.include(chunk, role)
def _build_geometry_parts(self) -> BaseGeometry:
all_: Set[int] = set(self.parsed_keys.keys())
for _id, intersection in self.intersections():
self.include(intersection)
all_ -= intersection
self.include(all_)
# Forcing the inheritance for proper registration
class Land_Area(Boundary):
pass | cartes/osm/overpass/relations/boundary.py | import itertools
from collections import UserDict
from operator import itemgetter
from typing import Dict, Iterator, Set, Tuple, TypeVar, Union, cast
from pyproj import Proj, Transformer
from shapely.geometry import LineString, MultiLineString, MultiPolygon, Polygon
from shapely.geometry.base import BaseGeometry
from shapely.ops import linemerge, transform, unary_union
from ....utils.cache import cached_property
from ....utils.geometry import reorient
from .. import Overpass
from ..core import Relation
class RelationsDict(UserDict):
def __missing__(self, key):
value = self[key] = list()
return value
def include(self, chunk: BaseGeometry, role: str):
if isinstance(chunk, MultiLineString):
for c in chunk:
self[role].append(c)
else:
self[role].append(chunk)
T = TypeVar("T", bound="Boundary")
class Boundary(Relation):
"""A class to parse boundary=* relations.
boundary=* relations are used for grouping boundaries and marking
enclaves/exclaves.
Reference: https://wiki.openstreetmap.org/wiki/Relation:boundary
Tags:
- type (boundary)
- boundary (administrative)
- land_area (administrative)
- name
- admin_level
Relation members:
- outer 1+
- inner 0+
- admin_center 0-1
- label 0-1
- subarea 0+
"""
def __init__(self, json):
super().__init__(json)
self.known_chunks = RelationsDict()
self.parent: Overpass = json["_parent"]
self.parsed_keys = cast(
Dict[int, Dict[str, Union[int, str]]],
dict(
(elt["ref"], elt)
for elt in self.parent.all_members[self.json["id_"]]
),
)
self._build_geometry_parts()
self._make_geometry(self.known_chunks)
def simplify(
self: T,
resolution: float,
bounds: Union[None, Tuple[float, float, float, float]] = None,
) -> T:
if bounds is None:
bounds = self.parent.bounds
b0, b1, b2, b3 = bounds
def simplify_shape(shape_: BaseGeometry) -> BaseGeometry:
proj = Proj(
proj="aea", # equivalent projection
lat_1=b1,
lat_2=b3,
lat_0=(b1 + b3) / 2,
lon_0=(b0 + b2) / 2,
)
forward = Transformer.from_proj(
Proj("EPSG:4326"), proj, always_xy=True
)
backward = Transformer.from_proj(
proj, Proj("EPSG:4326"), always_xy=True
)
return transform(
backward.transform,
transform(forward.transform, shape_).simplify(resolution),
)
rel_dict = RelationsDict()
for role, shapes in self.known_chunks.items():
for elt in shapes:
rel_dict.include(simplify_shape(elt), role)
self._make_geometry(rel_dict)
return self
def _make_geometry(self, parts: RelationsDict):
outer = linemerge(parts["outer"])
inner = linemerge(parts["inner"])
if isinstance(outer, MultiLineString):
if isinstance(inner, MultiLineString):
list_ = [
Polygon(
o,
holes=list(i for i in inner if Polygon(o).contains(i)),
)
for o in outer
if len(o.coords) > 2
]
shape = MultiPolygon(list_)
else:
list_ = [
Polygon(
o,
holes=[inner] if Polygon(o).contains(inner) else None,
)
for o in outer
if len(o.coords) > 2
]
shape = MultiPolygon(list_)
else:
if isinstance(inner, LineString):
shape = Polygon(outer, [inner])
else:
shape = Polygon(outer, inner)
self.shape = reorient(shape)
self.json["geometry"] = self.shape
def intersections(self) -> Iterator[Tuple[int, Set[int]]]:
all_sets: Dict[int, Set[int]] = dict(
(key, set(e["ref"] for e in list_))
for key, list_ in self.parent.all_members.items()
)
key_i = self.json["id_"]
set_i = all_sets[key_i]
for key_j, set_j in all_sets.items():
if key_i == key_j:
continue
intersection = set_i.intersection(set_j)
if len(intersection) > 0:
yield key_j, intersection
@cached_property
def neighbours(self) -> Set[int]:
return set(key for key, _value in self.intersections())
def include(self, elements: Set[int]) -> None:
for role, it in itertools.groupby(
(self.parsed_keys[key] for key in elements), key=itemgetter("role")
):
elts = list(elt["geometry"] for elt in it)
chunk = unary_union(elts)
self.known_chunks.include(chunk, role)
def _build_geometry_parts(self) -> BaseGeometry:
all_: Set[int] = set(self.parsed_keys.keys())
for _id, intersection in self.intersections():
self.include(intersection)
all_ -= intersection
self.include(all_)
# Forcing the inheritance for proper registration
class Land_Area(Boundary):
pass | 0.814164 | 0.308047 |
if __name__ == "__main__":
from sys import path
path.append('webapi')
# Load .flaskenv
from os.path import isfile
if isfile('.flaskenv'):
from libs.basics.system import load_export_file
load_export_file('.flaskenv')
# Pre-load config
from libs.config import Config
config = Config()
# Install python packages
from libs.basics.network import is_up
if is_up("8.8.8.8") or is_up("1.1.1.1"):
from sys import executable
from subprocess import check_call
from os import listdir
from os.path import isfile, join
check_call([executable, '-m', 'pip', 'install', '--upgrade', 'pip', 'wheel'])
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', 'requirements.txt'])
blueprint_path = config.get('webapi', 'plugin_path')
for blueprint in listdir(blueprint_path):
requirements_path = join(blueprint_path, blueprint, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
macro_path = config.get('webapi', 'macro_path')
for macro in listdir(macro_path):
requirements_path = join(macro_path, macro, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
else:
print("Internet not reachable")
# Create application
from webapi import create_app
webapi = create_app()
# Start application
from os import getenv
if getenv('FLASK_ENV') == 'development':
webapi.run(
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000')),
debug=bool(getenv('FLASK_DEBUG', True))
)
else:
from waitress import serve
serve(
webapi,
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000'))
) | src/webapi.py | if __name__ == "__main__":
from sys import path
path.append('webapi')
# Load .flaskenv
from os.path import isfile
if isfile('.flaskenv'):
from libs.basics.system import load_export_file
load_export_file('.flaskenv')
# Pre-load config
from libs.config import Config
config = Config()
# Install python packages
from libs.basics.network import is_up
if is_up("8.8.8.8") or is_up("1.1.1.1"):
from sys import executable
from subprocess import check_call
from os import listdir
from os.path import isfile, join
check_call([executable, '-m', 'pip', 'install', '--upgrade', 'pip', 'wheel'])
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', 'requirements.txt'])
blueprint_path = config.get('webapi', 'plugin_path')
for blueprint in listdir(blueprint_path):
requirements_path = join(blueprint_path, blueprint, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
macro_path = config.get('webapi', 'macro_path')
for macro in listdir(macro_path):
requirements_path = join(macro_path, macro, 'requirements.txt')
if isfile(requirements_path):
check_call([executable, '-m', 'pip', 'install', '--upgrade', '-r', requirements_path])
else:
print("Internet not reachable")
# Create application
from webapi import create_app
webapi = create_app()
# Start application
from os import getenv
if getenv('FLASK_ENV') == 'development':
webapi.run(
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000')),
debug=bool(getenv('FLASK_DEBUG', True))
)
else:
from waitress import serve
serve(
webapi,
host=getenv('FLASK_RUN_HOST', '0.0.0.0'),
port=int(getenv('FLASK_RUN_PORT', '5000'))
) | 0.224225 | 0.04736 |
# COMMAND ----------
# DBTITLE 1,Create Database
# MAGIC %sql
# MAGIC CREATE DATABASE IF NOT EXISTS dvd_objects
# COMMAND ----------
import pyspark
from pyspark.sql.functions import to_date, col
#LOAD CSV FILE - INCLUDES 5M ROWS
actor = (spark.read.format('csv')
.option('header', 'True')
.option("inferSchema", "true")
.load('/mnt/dvd/actor.csv'))
display(actor)
# COMMAND ----------
# DBTITLE 1,Create Actors Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.actor
# MAGIC (
# MAGIC actor_id INT,
# MAGIC first_name STRING,
# MAGIC last_name STRING,
# MAGIC last_update TIMESTAMP
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/actor.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Address Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.address
# MAGIC (
# MAGIC address_id INT,
# MAGIC address STRING,
# MAGIC address2 STRING,
# MAGIC district STRING,
# MAGIC city_id INT,
# MAGIC postal_code INT,
# MAGIC phone INT,
# MAGIC last_update TIMESTAMP
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/address.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Category Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.category
# MAGIC (
# MAGIC category_id INT
# MAGIC , name STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/category.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create City Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.city
# MAGIC (
# MAGIC city_id INT
# MAGIC , city STRING
# MAGIC , country_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/city.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create County Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.country
# MAGIC (
# MAGIC country_id INT
# MAGIC , country STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/country.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Customer Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.customer
# MAGIC (
# MAGIC customer_id INT
# MAGIC , store_id INT
# MAGIC , first_name STRING
# MAGIC , last_name STRING
# MAGIC , email STRING
# MAGIC , address_id INT
# MAGIC , activebool STRING
# MAGIC , create_date STRING
# MAGIC , last_update STRING
# MAGIC , active INT
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/customer.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film
# MAGIC (
# MAGIC film_id INT
# MAGIC , title STRING
# MAGIC , description STRING
# MAGIC , release_year INT
# MAGIC , language_id INT
# MAGIC , rental_duration INT
# MAGIC , rental_rate FLOAT
# MAGIC , length INT
# MAGIC , replacement_cost FLOAT
# MAGIC , rating STRING
# MAGIC , last_update STRING
# MAGIC , special_features STRING
# MAGIC , fulltext STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film_actor Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film_actor
# MAGIC (
# MAGIC actor_id INT
# MAGIC , film_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film_actor.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film_category Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film_category
# MAGIC (
# MAGIC film_id INT
# MAGIC , category_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film_category.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Inventory Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.inventory
# MAGIC (
# MAGIC inventory_id INT
# MAGIC , film_id INT
# MAGIC , store_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/inventory.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Language Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.language
# MAGIC (
# MAGIC language_id INT
# MAGIC , name STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/language.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Payment Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.payment
# MAGIC (
# MAGIC payment_id INT
# MAGIC , customer_id INT
# MAGIC , staff_id INT
# MAGIC , rental_id INT
# MAGIC , amount FLOAT
# MAGIC , payment_date STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/payment.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Rental Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.rental
# MAGIC (
# MAGIC rental_id INT
# MAGIC , rental_date STRING
# MAGIC , inventory_id INT
# MAGIC , customer_id INT
# MAGIC , return_date STRING
# MAGIC , staff_id INT
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/rental.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Staff Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.staff
# MAGIC (
# MAGIC staff_id INT
# MAGIC , first_name STRING
# MAGIC , last_name STRING
# MAGIC , address_id INT
# MAGIC , email STRING
# MAGIC , store_id INT
# MAGIC , active STRING
# MAGIC , username STRING
# MAGIC , password STRING
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/staff.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Store Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.store
# MAGIC (
# MAGIC store_id INT
# MAGIC , manager_staff_id INT
# MAGIC , address_id INT
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/store.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ---------- | src/Utils/Create DVD Database.py |
# COMMAND ----------
# DBTITLE 1,Create Database
# MAGIC %sql
# MAGIC CREATE DATABASE IF NOT EXISTS dvd_objects
# COMMAND ----------
import pyspark
from pyspark.sql.functions import to_date, col
#LOAD CSV FILE - INCLUDES 5M ROWS
actor = (spark.read.format('csv')
.option('header', 'True')
.option("inferSchema", "true")
.load('/mnt/dvd/actor.csv'))
display(actor)
# COMMAND ----------
# DBTITLE 1,Create Actors Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.actor
# MAGIC (
# MAGIC actor_id INT,
# MAGIC first_name STRING,
# MAGIC last_name STRING,
# MAGIC last_update TIMESTAMP
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/actor.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Address Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.address
# MAGIC (
# MAGIC address_id INT,
# MAGIC address STRING,
# MAGIC address2 STRING,
# MAGIC district STRING,
# MAGIC city_id INT,
# MAGIC postal_code INT,
# MAGIC phone INT,
# MAGIC last_update TIMESTAMP
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/address.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Category Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.category
# MAGIC (
# MAGIC category_id INT
# MAGIC , name STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/category.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create City Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.city
# MAGIC (
# MAGIC city_id INT
# MAGIC , city STRING
# MAGIC , country_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/city.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create County Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.country
# MAGIC (
# MAGIC country_id INT
# MAGIC , country STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/country.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Customer Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.customer
# MAGIC (
# MAGIC customer_id INT
# MAGIC , store_id INT
# MAGIC , first_name STRING
# MAGIC , last_name STRING
# MAGIC , email STRING
# MAGIC , address_id INT
# MAGIC , activebool STRING
# MAGIC , create_date STRING
# MAGIC , last_update STRING
# MAGIC , active INT
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/customer.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film
# MAGIC (
# MAGIC film_id INT
# MAGIC , title STRING
# MAGIC , description STRING
# MAGIC , release_year INT
# MAGIC , language_id INT
# MAGIC , rental_duration INT
# MAGIC , rental_rate FLOAT
# MAGIC , length INT
# MAGIC , replacement_cost FLOAT
# MAGIC , rating STRING
# MAGIC , last_update STRING
# MAGIC , special_features STRING
# MAGIC , fulltext STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film_actor Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film_actor
# MAGIC (
# MAGIC actor_id INT
# MAGIC , film_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film_actor.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Film_category Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.film_category
# MAGIC (
# MAGIC film_id INT
# MAGIC , category_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/film_category.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Inventory Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.inventory
# MAGIC (
# MAGIC inventory_id INT
# MAGIC , film_id INT
# MAGIC , store_id INT
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/inventory.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Language Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.language
# MAGIC (
# MAGIC language_id INT
# MAGIC , name STRING
# MAGIC , last_update STRING
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/language.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Payment Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.payment
# MAGIC (
# MAGIC payment_id INT
# MAGIC , customer_id INT
# MAGIC , staff_id INT
# MAGIC , rental_id INT
# MAGIC , amount FLOAT
# MAGIC , payment_date STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/payment.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Rental Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.rental
# MAGIC (
# MAGIC rental_id INT
# MAGIC , rental_date STRING
# MAGIC , inventory_id INT
# MAGIC , customer_id INT
# MAGIC , return_date STRING
# MAGIC , staff_id INT
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/rental.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Staff Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.staff
# MAGIC (
# MAGIC staff_id INT
# MAGIC , first_name STRING
# MAGIC , last_name STRING
# MAGIC , address_id INT
# MAGIC , email STRING
# MAGIC , store_id INT
# MAGIC , active STRING
# MAGIC , username STRING
# MAGIC , password STRING
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/staff.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ----------
# DBTITLE 1,Create Store Table
# MAGIC %sql
# MAGIC create table if not exists
# MAGIC dvd_objects.store
# MAGIC (
# MAGIC store_id INT
# MAGIC , manager_staff_id INT
# MAGIC , address_id INT
# MAGIC , last_update STRING
# MAGIC
# MAGIC )
# MAGIC using csv
# MAGIC options (
# MAGIC path '/mnt/dvd/store.csv',
# MAGIC sep ',',
# MAGIC header true
# MAGIC )
# COMMAND ---------- | 0.35031 | 0.082254 |
from string import ascii_letters
import numpy as np
from dataclasses import dataclass, field
from crossover import single_point
from mutate import exchange2
from selection import roulette_wheel
CHAR_VOCAB = np.array(list(ascii_letters))
TARGET = "Hello World"
TARGET_LIST = list(TARGET)
TARGET_INDICES = np.array([TARGET_LIST.index(i) for i in list(TARGET)])
class evaluate_genome:
def evaluate(self, indices: list[int]) -> np.float32:
diff = np.sum(np.abs(TARGET_INDICES - indices), dtype=np.float32)
return diff
def __indices_to_string(indices: list[int]) -> str:
return "".join(CHAR_VOCAB[indices])
@dataclass
class island:
population: np.ndarray = field(init=False, repr=False)
fitness: np.ndarray = field(init=False, repr=False)
population_size: int
genome_size: int
prob_mutation: float = field(default=0.25)
prob_crossover: float = field(default=0.75)
random_state: np.random.RandomState = field(default=np.random.RandomState(0))
def __post_init__(self):
self.population = self.random_state.randint(
low=0,
high=CHAR_VOCAB.shape[0],
size=(self.population_size, self.genome_size),
)
self.fitness = np.apply_along_axis(self.evaluate, 1, self.population)
def step(self, init: bool = False):
"""Performs an iteration; producing a new population."""
parent1 = self.population[self.select()]
parent2 = self.population[self.select()]
child1, child2 = self.crossover((parent1, parent2))
if self.random_state.rand() < self.prob_mutation:
child1 = self.mutate(child1)
if self.random_state.rand() < self.prob_mutation:
child2 = self.mutate(child2)
fitness1 = self.evaluate(child1)
fitness2 = self.evaluate(child2)
return self
def make_island(*args, **kwargs) -> island:
name = "Island"
base_class = island
ops = (roulette_wheel, single_point, exchange2, evaluate_genome)
cls = type(name, (base_class, *ops), {})
return cls(**kwargs)
if __name__ == "__main__":
# configuration = (roulette_wheel, single_point, exchange2, evalute_genome)
island0 = make_island(population_size=10, genome_size=TARGET_INDICES.shape[0])
island0.step() | src/pythogen/main.py | from string import ascii_letters
import numpy as np
from dataclasses import dataclass, field
from crossover import single_point
from mutate import exchange2
from selection import roulette_wheel
CHAR_VOCAB = np.array(list(ascii_letters))
TARGET = "Hello World"
TARGET_LIST = list(TARGET)
TARGET_INDICES = np.array([TARGET_LIST.index(i) for i in list(TARGET)])
class evaluate_genome:
def evaluate(self, indices: list[int]) -> np.float32:
diff = np.sum(np.abs(TARGET_INDICES - indices), dtype=np.float32)
return diff
def __indices_to_string(indices: list[int]) -> str:
return "".join(CHAR_VOCAB[indices])
@dataclass
class island:
population: np.ndarray = field(init=False, repr=False)
fitness: np.ndarray = field(init=False, repr=False)
population_size: int
genome_size: int
prob_mutation: float = field(default=0.25)
prob_crossover: float = field(default=0.75)
random_state: np.random.RandomState = field(default=np.random.RandomState(0))
def __post_init__(self):
self.population = self.random_state.randint(
low=0,
high=CHAR_VOCAB.shape[0],
size=(self.population_size, self.genome_size),
)
self.fitness = np.apply_along_axis(self.evaluate, 1, self.population)
def step(self, init: bool = False):
"""Performs an iteration; producing a new population."""
parent1 = self.population[self.select()]
parent2 = self.population[self.select()]
child1, child2 = self.crossover((parent1, parent2))
if self.random_state.rand() < self.prob_mutation:
child1 = self.mutate(child1)
if self.random_state.rand() < self.prob_mutation:
child2 = self.mutate(child2)
fitness1 = self.evaluate(child1)
fitness2 = self.evaluate(child2)
return self
def make_island(*args, **kwargs) -> island:
name = "Island"
base_class = island
ops = (roulette_wheel, single_point, exchange2, evaluate_genome)
cls = type(name, (base_class, *ops), {})
return cls(**kwargs)
if __name__ == "__main__":
# configuration = (roulette_wheel, single_point, exchange2, evalute_genome)
island0 = make_island(population_size=10, genome_size=TARGET_INDICES.shape[0])
island0.step() | 0.738009 | 0.4133 |
from __future__ import division
from typing import Dict, List, Set, Any
import os
import shutil
import subprocess
from collections import defaultdict
import flye.utils.fasta_parser as fp
from bioclass.sam import GeneralSamFile
class RgNode(object):
__slots__ = ("in_edges", "out_edges")
def __init__(self):
self.in_edges = []
self.out_edges = []
def is_bifurcation(self):
return len(self.in_edges) != 1 or len(self.out_edges) != 1
class EdgeSequence(object):
__slots__ = ("edge_seq_name", "edge_seq_len", "orig_seq_id", "orig_seq_len",
"orig_seq_start", "orig_seq_end","barcodes")
def __init__(self, edge_seq_name=None, edge_seq_len=0, orig_seq_id="*",
orig_seq_len=0, orig_seq_start=0, orig_seq_end=0):
self.edge_seq_name = edge_seq_name
self.edge_seq_len = edge_seq_len
self.orig_seq_id = orig_seq_id
self.orig_seq_len = orig_seq_len
self.orig_seq_start = orig_seq_start
self.orig_seq_end = orig_seq_end
barcodes: Dict[str, List[str]]
self.barcodes={}# key=barcode value=[positions]
def addbarcode(self,barcode,position)->None:
"""
对seq添加barcode
如果barcode已经出现,则记录不同的出现位置
:param barcode:
:param position:
:return:
"""
self.barcodes.setdefault(barcode,[]).append(position)
class RgEdge(object):
edge_sequences: List[EdgeSequence]
__slots__ = ("node_left", "node_right", "edge_id", "repetitive",
"self_complement", "resolved", "mean_coverage",
"alt_group", "edge_sequences")
def __init__(self, node_left:RgNode=None, node_right:RgNode=None,edge_id=None):
self.node_left = node_left
self.node_right = node_right
self.edge_id = edge_id
self.repetitive = False
self.self_complement = False
self.resolved = False
self.mean_coverage = 0
self.edge_sequences = []
self.alt_group = -1
@property
def barcodes(self)->Dict[str,int]:
"""
对于每个Edge而言,就是把每个seq的barocdes合并,相同barcodes数目相加
:return:
"""
d: Dict[str, List[List[int]]]={}
for seq in self.edge_sequences:
for k,v in seq.barcodes.items():
d.setdefault(k,[]).append(v)
return d
def length(self):
if not self.edge_sequences:
return 0
return sum([s.edge_seq_len
for s in self.edge_sequences]) // len(self.edge_sequences)
def __repr__(self):
return "(id={0}, len={1}, cov={2} rep={3})" \
.format(self.edge_id, self.length(),
self.mean_coverage, self.repetitive)
def adjacentEdges(self):
"""
:return:返回一条边所有的邻近边的集合,不包括自身
"""
edges=set()
for e in self.node_left.in_edges:
edges.add(e)
for e in self.node_left.out_edges:
edges.add(e)
for e in self.node_right.in_edges:
edges.add(e)
for e in self.node_right.out_edges:
edges.add(e)
edges.remove(self)
return edges
class RepeatGraph(object):
__slots__ = ("nodes", "edges", "edges_fasta")
def __init__(self, edges_fasta:Dict[str,str],
edges:Dict[int,RgEdge]={},
nodes:List[RgNode]=[]):
self.nodes = nodes
self.edges = edges #key = edge id
self.edges_fasta = edges_fasta #这是一个字典,key是edge_disjointigid,value是fastastr
def add_node(self)->RgNode:
self.nodes.append(RgNode())
return self.nodes[-1]
def add_edge(self, edge):
self.edges[edge.edge_id] = edge
edge.node_left.out_edges.append(edge)
edge.node_right.in_edges.append(edge)
def remove_edge(self, edge):
_remove_from_list(edge.node_left.out_edges, edge)
_remove_from_list(edge.node_right.in_edges, edge)
del self.edges[edge.edge_id]
def complement_edge(self, edge:RgEdge)->RgEdge:
if edge.self_complement:
return edge
return self.edges[-edge.edge_id]
def complement_node(self,node):
if node.out_edges!=[]:
return self.complement_edge(node.out_edges[0]).node_right
elif node.in_edges!=[]:
return self.complement_edge(node.in_edges[0]).node_left
return None
def get_unbranching_paths(self):
unbranching_paths = []
visited_edges = set()
for edge in self.edges.values():
if edge in visited_edges:
continue
traversed = [edge]
if not edge.self_complement:
cur_node = edge.node_left
while (not cur_node.is_bifurcation() and
len(cur_node.in_edges) > 0 and
cur_node.in_edges[0] not in visited_edges and
not cur_node.in_edges[0].self_complement):
traversed.append(cur_node.in_edges[0])
visited_edges.add(cur_node.in_edges[0])
cur_node = cur_node.in_edges[0].node_left
traversed = traversed[::-1]
cur_node = edge.node_right
while (not cur_node.is_bifurcation() and
len(cur_node.out_edges) > 0 and
cur_node.out_edges[0] not in visited_edges and
not cur_node.out_edges[0].self_complement):
traversed.append(cur_node.out_edges[0])
visited_edges.add(cur_node.out_edges[0])
cur_node = cur_node.out_edges[0].node_right
unbranching_paths.append(traversed)
return unbranching_paths
def load_from_file(self, filename):
id_to_node = {}
cur_edge = None
with open(filename, "r") as f:
for line in f:
tokens = line.strip().split()
if tokens[0] == "Edge":
(edge_id, left_node, right_node, repetitive,
self_complement, resolved, mean_coverage, alt_group) = tokens[1:]
if left_node not in id_to_node:
id_to_node[left_node] = self.add_node()
if right_node not in id_to_node:
id_to_node[right_node] = self.add_node()
cur_edge = RgEdge(id_to_node[left_node],
id_to_node[right_node],
_to_signed_id(int(edge_id)))
cur_edge.repetitive = bool(int(repetitive))
cur_edge.self_complement = bool(int(self_complement))
cur_edge.resolved = bool(int(resolved))
cur_edge.mean_coverage = int(mean_coverage)
cur_edge.alt_group = int(alt_group)
self.add_edge(cur_edge)
elif tokens[0] == "Sequence":
(edge_seq_name, edge_seq_len, orig_seq_id,
orig_seq_len, orig_seq_start, orig_seq_end) = tokens[1:]
edge_seq = EdgeSequence(edge_seq_name, int(edge_seq_len),
orig_seq_id, orig_seq_len,
orig_seq_start, orig_seq_end)
cur_edge.edge_sequences.append(edge_seq)
else:
raise Exception("Error parsing " + filename)
def dump_to_file(self, filename):
next_node_id = 0
node_ids = {}
for node in self.nodes:
node_ids[node] = next_node_id
next_node_id += 1
with open(filename, "w") as f:
for edge in self.edges.values():
f.write("Edge\t{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n"
.format(_to_unsigned_id(edge.edge_id), node_ids[edge.node_left],
node_ids[edge.node_right], int(edge.repetitive),
int(edge.self_complement), int(edge.resolved),
int(edge.mean_coverage), int(edge.alt_group)))
for seq in edge.edge_sequences:
f.write("\tSequence\t{0} {1} {2} {3} {4} {5}\n"
.format(seq.edge_seq_name, seq.edge_seq_len,
seq.orig_seq_id, seq.orig_seq_len,
seq.orig_seq_start, seq.orig_seq_end))
def output_dot(self, filename):
next_node_id = 0
node_ids = {}
for node in self.nodes:
node_ids[node] = next_node_id
next_node_id += 1
with open(filename, "w") as f:
f.write("digraph {\nnodesep = 0.5;\n"
"node [shape = circle, label = \"\", height = 0.3];\n")
for edge in self.edges.values():
f.write("{0} -> {1} [label = \"id {2}\\l{3} {4}x\", color = \"{5}\"]\n"
.format(node_ids[edge.node_left], node_ids[edge.node_right],
edge.edge_id, edge.length(),edge.mean_coverage,"red" if edge.repetitive else "black"))
f.write("}")
def output_svg(self,prefixname:str):
dot_file=prefixname+'.gv'
svg_file=prefixname+'.svg'
self.output_dot(filename=dot_file)
cmd=['dot','-Tsvg',dot_file,'-o',svg_file]
a=subprocess.Popen(cmd)
a.wait()
def output_edgefasta(self,filename):
fp.write_fasta_dict(filename=filename,fasta_dict=self.edges_fasta)
def output_all(self,outdir):
try:
os.mkdir(outdir)
except FileExistsError:
pass
self.dump_to_file(filename=os.path.join(outdir,'repeat_graph_dump'))
self.output_edgefasta(filename=os.path.join(outdir,'repeat_graph_edges.fasta'))
self.output_svg(prefixname=os.path.join(outdir,'repeat_graph'))
def compute_dict(self)->Dict[str, Set[int]]:
"""
给定一个repeatgraph,计算所有path包含的barcode个数
:return:dict key=repeatgraph中某个path value=该path包含的barcode数目
"""
barcode_path_d={}# key=barcode value=该barcode所支持的路径
for edge in self.edges.values():
for barcode in edge.barcodes:
if barcode in barcode_path_d:
barcode_path_d[barcode].update({edge.edge_id:edge.barcodes[barcode]})
else:
barcode_path_d[barcode]={edge.edge_id:edge.barcodes[barcode]}
barcode_path_d={k:v for k,v in barcode_path_d.items() if len(v)>1}
path_barcode_d={}
for i in barcode_path_d:
key=str(list(barcode_path_d[i].keys()))
path_barcode_d.setdefault(key,list()).append(i)
# max_val=max([len(i) for i in ans.values()])
# ans=dict(filter(lambda x:len(x[1])>=max_val,ans.items()))
return barcode_path_d,path_barcode_d
def separate_path(self, graph_path:List[int], new_seq_id:str, new_seq_seq:str):
"""
Separates the path (and its complement) on the graph.
First and last edges in the path are disconnected from the graph,
and then connected by a new edge. For example,
a path (A -> X -> Y -> ... -> Z -> B) will be transformed
into a new unbranching path A -> N -> B,
where N represents a new edge with the given sequence.
The intermediate path edges remain in the graph (their mean coverage
is modified accordingly) and they acquire the attribute 'resolved'.
Resolved edges could later be cleaned up by using XXX function.
:param graph_path:路径包含的边id列表
"""
def separate_one(edges_path, new_edge_id, new_edge_seq):
left_node = self.add_node()
_remove_from_list(edges_path[0].node_right.in_edges, edges_path[0])
edges_path[0].node_right = left_node
left_node.in_edges.append(edges_path[0])
path_coverage = (edges_path[0].mean_coverage +
edges_path[-1].mean_coverage) // 2
for mid_edge in edges_path[1:-1]:
mid_edge.resolved = True
mid_edge.mean_coverage -= path_coverage
right_node = left_node
if len(edges_path) > 2:
right_node = self.add_node()
new_edge = RgEdge(left_node, right_node, new_edge_id)
self.add_edge(new_edge)
new_edge.mean_coverage = path_coverage
new_edge.edge_sequences.append(new_edge_seq)
_remove_from_list(edges_path[-1].node_left.out_edges, edges_path[-1])
edges_path[-1].node_left = right_node
right_node.out_edges.append(edges_path[-1])
if len(graph_path) < 2:
raise Exception("Path is too short")
fwd_edges = []
rev_edges = []
for e in graph_path:
if e not in self.edges:
raise Exception("Nonexistent edge")
fwd_edges.append(self.edges[e])
rev_edges.append(self.complement_edge(self.edges[e]))
rev_edges = rev_edges[::-1]
new_edge_seq = EdgeSequence("+" + new_seq_id, len(new_seq_seq))
compl_edge_seq = EdgeSequence("-" + new_seq_id, len(new_seq_seq))
self.edges_fasta[new_seq_id] = new_seq_seq
new_edge_id = max(self.edges.keys()) + 1
separate_one(fwd_edges, new_edge_id, new_edge_seq)
separate_one(rev_edges, -new_edge_id, compl_edge_seq)
def find_edgeid_string(self,id:int)->List[str]:
ans=[]
for edge in self.edges.values():
if edge.edge_id==id:
for seq in edge.edge_sequences:
ans.append(self.edges_fasta[seq.edge_seq_name[1:]])
return ans
def updatebarcodes(self,samfile:str):
for sam in GeneralSamFile(path=samfile):
barcode = sam.query_name.split('#')[1]
edge_id=int(sam.ref_name.split('_')[1])
edge_seq_id=int(sam.ref_name.split('_')[2])
if edge_id in self.edges:
self.edges[edge_id].edge_sequences[edge_seq_id].\
addbarcode(barcode=barcode,position=sam.position)
if -edge_id in self.edges:
self.edges[-edge_id].edge_sequences[edge_seq_id]. \
addbarcode(barcode=barcode, position=sam.position)
def _remove_from_list(lst, elem):
lst = [x for x in lst if x != elem]
def _to_signed_id(unsigned_id):
return -(unsigned_id + 1) // 2 if unsigned_id % 2 else unsigned_id // 2 + 1
def _to_unsigned_id(signed_id):
unsigned_id = abs(signed_id) * 2 - 2
return unsigned_id + int(signed_id < 0) | flye/repeat_graph/repeat_graph.py | from __future__ import division
from typing import Dict, List, Set, Any
import os
import shutil
import subprocess
from collections import defaultdict
import flye.utils.fasta_parser as fp
from bioclass.sam import GeneralSamFile
class RgNode(object):
__slots__ = ("in_edges", "out_edges")
def __init__(self):
self.in_edges = []
self.out_edges = []
def is_bifurcation(self):
return len(self.in_edges) != 1 or len(self.out_edges) != 1
class EdgeSequence(object):
__slots__ = ("edge_seq_name", "edge_seq_len", "orig_seq_id", "orig_seq_len",
"orig_seq_start", "orig_seq_end","barcodes")
def __init__(self, edge_seq_name=None, edge_seq_len=0, orig_seq_id="*",
orig_seq_len=0, orig_seq_start=0, orig_seq_end=0):
self.edge_seq_name = edge_seq_name
self.edge_seq_len = edge_seq_len
self.orig_seq_id = orig_seq_id
self.orig_seq_len = orig_seq_len
self.orig_seq_start = orig_seq_start
self.orig_seq_end = orig_seq_end
barcodes: Dict[str, List[str]]
self.barcodes={}# key=barcode value=[positions]
def addbarcode(self,barcode,position)->None:
"""
对seq添加barcode
如果barcode已经出现,则记录不同的出现位置
:param barcode:
:param position:
:return:
"""
self.barcodes.setdefault(barcode,[]).append(position)
class RgEdge(object):
edge_sequences: List[EdgeSequence]
__slots__ = ("node_left", "node_right", "edge_id", "repetitive",
"self_complement", "resolved", "mean_coverage",
"alt_group", "edge_sequences")
def __init__(self, node_left:RgNode=None, node_right:RgNode=None,edge_id=None):
self.node_left = node_left
self.node_right = node_right
self.edge_id = edge_id
self.repetitive = False
self.self_complement = False
self.resolved = False
self.mean_coverage = 0
self.edge_sequences = []
self.alt_group = -1
@property
def barcodes(self)->Dict[str,int]:
"""
对于每个Edge而言,就是把每个seq的barocdes合并,相同barcodes数目相加
:return:
"""
d: Dict[str, List[List[int]]]={}
for seq in self.edge_sequences:
for k,v in seq.barcodes.items():
d.setdefault(k,[]).append(v)
return d
def length(self):
if not self.edge_sequences:
return 0
return sum([s.edge_seq_len
for s in self.edge_sequences]) // len(self.edge_sequences)
def __repr__(self):
return "(id={0}, len={1}, cov={2} rep={3})" \
.format(self.edge_id, self.length(),
self.mean_coverage, self.repetitive)
def adjacentEdges(self):
"""
:return:返回一条边所有的邻近边的集合,不包括自身
"""
edges=set()
for e in self.node_left.in_edges:
edges.add(e)
for e in self.node_left.out_edges:
edges.add(e)
for e in self.node_right.in_edges:
edges.add(e)
for e in self.node_right.out_edges:
edges.add(e)
edges.remove(self)
return edges
class RepeatGraph(object):
__slots__ = ("nodes", "edges", "edges_fasta")
def __init__(self, edges_fasta:Dict[str,str],
edges:Dict[int,RgEdge]={},
nodes:List[RgNode]=[]):
self.nodes = nodes
self.edges = edges #key = edge id
self.edges_fasta = edges_fasta #这是一个字典,key是edge_disjointigid,value是fastastr
def add_node(self)->RgNode:
self.nodes.append(RgNode())
return self.nodes[-1]
def add_edge(self, edge):
self.edges[edge.edge_id] = edge
edge.node_left.out_edges.append(edge)
edge.node_right.in_edges.append(edge)
def remove_edge(self, edge):
_remove_from_list(edge.node_left.out_edges, edge)
_remove_from_list(edge.node_right.in_edges, edge)
del self.edges[edge.edge_id]
def complement_edge(self, edge:RgEdge)->RgEdge:
if edge.self_complement:
return edge
return self.edges[-edge.edge_id]
def complement_node(self,node):
if node.out_edges!=[]:
return self.complement_edge(node.out_edges[0]).node_right
elif node.in_edges!=[]:
return self.complement_edge(node.in_edges[0]).node_left
return None
def get_unbranching_paths(self):
unbranching_paths = []
visited_edges = set()
for edge in self.edges.values():
if edge in visited_edges:
continue
traversed = [edge]
if not edge.self_complement:
cur_node = edge.node_left
while (not cur_node.is_bifurcation() and
len(cur_node.in_edges) > 0 and
cur_node.in_edges[0] not in visited_edges and
not cur_node.in_edges[0].self_complement):
traversed.append(cur_node.in_edges[0])
visited_edges.add(cur_node.in_edges[0])
cur_node = cur_node.in_edges[0].node_left
traversed = traversed[::-1]
cur_node = edge.node_right
while (not cur_node.is_bifurcation() and
len(cur_node.out_edges) > 0 and
cur_node.out_edges[0] not in visited_edges and
not cur_node.out_edges[0].self_complement):
traversed.append(cur_node.out_edges[0])
visited_edges.add(cur_node.out_edges[0])
cur_node = cur_node.out_edges[0].node_right
unbranching_paths.append(traversed)
return unbranching_paths
def load_from_file(self, filename):
id_to_node = {}
cur_edge = None
with open(filename, "r") as f:
for line in f:
tokens = line.strip().split()
if tokens[0] == "Edge":
(edge_id, left_node, right_node, repetitive,
self_complement, resolved, mean_coverage, alt_group) = tokens[1:]
if left_node not in id_to_node:
id_to_node[left_node] = self.add_node()
if right_node not in id_to_node:
id_to_node[right_node] = self.add_node()
cur_edge = RgEdge(id_to_node[left_node],
id_to_node[right_node],
_to_signed_id(int(edge_id)))
cur_edge.repetitive = bool(int(repetitive))
cur_edge.self_complement = bool(int(self_complement))
cur_edge.resolved = bool(int(resolved))
cur_edge.mean_coverage = int(mean_coverage)
cur_edge.alt_group = int(alt_group)
self.add_edge(cur_edge)
elif tokens[0] == "Sequence":
(edge_seq_name, edge_seq_len, orig_seq_id,
orig_seq_len, orig_seq_start, orig_seq_end) = tokens[1:]
edge_seq = EdgeSequence(edge_seq_name, int(edge_seq_len),
orig_seq_id, orig_seq_len,
orig_seq_start, orig_seq_end)
cur_edge.edge_sequences.append(edge_seq)
else:
raise Exception("Error parsing " + filename)
def dump_to_file(self, filename):
next_node_id = 0
node_ids = {}
for node in self.nodes:
node_ids[node] = next_node_id
next_node_id += 1
with open(filename, "w") as f:
for edge in self.edges.values():
f.write("Edge\t{0}\t{1}\t{2}\t{3}\t{4}\t{5}\t{6}\t{7}\n"
.format(_to_unsigned_id(edge.edge_id), node_ids[edge.node_left],
node_ids[edge.node_right], int(edge.repetitive),
int(edge.self_complement), int(edge.resolved),
int(edge.mean_coverage), int(edge.alt_group)))
for seq in edge.edge_sequences:
f.write("\tSequence\t{0} {1} {2} {3} {4} {5}\n"
.format(seq.edge_seq_name, seq.edge_seq_len,
seq.orig_seq_id, seq.orig_seq_len,
seq.orig_seq_start, seq.orig_seq_end))
def output_dot(self, filename):
next_node_id = 0
node_ids = {}
for node in self.nodes:
node_ids[node] = next_node_id
next_node_id += 1
with open(filename, "w") as f:
f.write("digraph {\nnodesep = 0.5;\n"
"node [shape = circle, label = \"\", height = 0.3];\n")
for edge in self.edges.values():
f.write("{0} -> {1} [label = \"id {2}\\l{3} {4}x\", color = \"{5}\"]\n"
.format(node_ids[edge.node_left], node_ids[edge.node_right],
edge.edge_id, edge.length(),edge.mean_coverage,"red" if edge.repetitive else "black"))
f.write("}")
def output_svg(self,prefixname:str):
dot_file=prefixname+'.gv'
svg_file=prefixname+'.svg'
self.output_dot(filename=dot_file)
cmd=['dot','-Tsvg',dot_file,'-o',svg_file]
a=subprocess.Popen(cmd)
a.wait()
def output_edgefasta(self,filename):
fp.write_fasta_dict(filename=filename,fasta_dict=self.edges_fasta)
def output_all(self,outdir):
try:
os.mkdir(outdir)
except FileExistsError:
pass
self.dump_to_file(filename=os.path.join(outdir,'repeat_graph_dump'))
self.output_edgefasta(filename=os.path.join(outdir,'repeat_graph_edges.fasta'))
self.output_svg(prefixname=os.path.join(outdir,'repeat_graph'))
def compute_dict(self)->Dict[str, Set[int]]:
"""
给定一个repeatgraph,计算所有path包含的barcode个数
:return:dict key=repeatgraph中某个path value=该path包含的barcode数目
"""
barcode_path_d={}# key=barcode value=该barcode所支持的路径
for edge in self.edges.values():
for barcode in edge.barcodes:
if barcode in barcode_path_d:
barcode_path_d[barcode].update({edge.edge_id:edge.barcodes[barcode]})
else:
barcode_path_d[barcode]={edge.edge_id:edge.barcodes[barcode]}
barcode_path_d={k:v for k,v in barcode_path_d.items() if len(v)>1}
path_barcode_d={}
for i in barcode_path_d:
key=str(list(barcode_path_d[i].keys()))
path_barcode_d.setdefault(key,list()).append(i)
# max_val=max([len(i) for i in ans.values()])
# ans=dict(filter(lambda x:len(x[1])>=max_val,ans.items()))
return barcode_path_d,path_barcode_d
def separate_path(self, graph_path:List[int], new_seq_id:str, new_seq_seq:str):
"""
Separates the path (and its complement) on the graph.
First and last edges in the path are disconnected from the graph,
and then connected by a new edge. For example,
a path (A -> X -> Y -> ... -> Z -> B) will be transformed
into a new unbranching path A -> N -> B,
where N represents a new edge with the given sequence.
The intermediate path edges remain in the graph (their mean coverage
is modified accordingly) and they acquire the attribute 'resolved'.
Resolved edges could later be cleaned up by using XXX function.
:param graph_path:路径包含的边id列表
"""
def separate_one(edges_path, new_edge_id, new_edge_seq):
left_node = self.add_node()
_remove_from_list(edges_path[0].node_right.in_edges, edges_path[0])
edges_path[0].node_right = left_node
left_node.in_edges.append(edges_path[0])
path_coverage = (edges_path[0].mean_coverage +
edges_path[-1].mean_coverage) // 2
for mid_edge in edges_path[1:-1]:
mid_edge.resolved = True
mid_edge.mean_coverage -= path_coverage
right_node = left_node
if len(edges_path) > 2:
right_node = self.add_node()
new_edge = RgEdge(left_node, right_node, new_edge_id)
self.add_edge(new_edge)
new_edge.mean_coverage = path_coverage
new_edge.edge_sequences.append(new_edge_seq)
_remove_from_list(edges_path[-1].node_left.out_edges, edges_path[-1])
edges_path[-1].node_left = right_node
right_node.out_edges.append(edges_path[-1])
if len(graph_path) < 2:
raise Exception("Path is too short")
fwd_edges = []
rev_edges = []
for e in graph_path:
if e not in self.edges:
raise Exception("Nonexistent edge")
fwd_edges.append(self.edges[e])
rev_edges.append(self.complement_edge(self.edges[e]))
rev_edges = rev_edges[::-1]
new_edge_seq = EdgeSequence("+" + new_seq_id, len(new_seq_seq))
compl_edge_seq = EdgeSequence("-" + new_seq_id, len(new_seq_seq))
self.edges_fasta[new_seq_id] = new_seq_seq
new_edge_id = max(self.edges.keys()) + 1
separate_one(fwd_edges, new_edge_id, new_edge_seq)
separate_one(rev_edges, -new_edge_id, compl_edge_seq)
def find_edgeid_string(self,id:int)->List[str]:
ans=[]
for edge in self.edges.values():
if edge.edge_id==id:
for seq in edge.edge_sequences:
ans.append(self.edges_fasta[seq.edge_seq_name[1:]])
return ans
def updatebarcodes(self,samfile:str):
for sam in GeneralSamFile(path=samfile):
barcode = sam.query_name.split('#')[1]
edge_id=int(sam.ref_name.split('_')[1])
edge_seq_id=int(sam.ref_name.split('_')[2])
if edge_id in self.edges:
self.edges[edge_id].edge_sequences[edge_seq_id].\
addbarcode(barcode=barcode,position=sam.position)
if -edge_id in self.edges:
self.edges[-edge_id].edge_sequences[edge_seq_id]. \
addbarcode(barcode=barcode, position=sam.position)
def _remove_from_list(lst, elem):
lst = [x for x in lst if x != elem]
def _to_signed_id(unsigned_id):
return -(unsigned_id + 1) // 2 if unsigned_id % 2 else unsigned_id // 2 + 1
def _to_unsigned_id(signed_id):
unsigned_id = abs(signed_id) * 2 - 2
return unsigned_id + int(signed_id < 0) | 0.586523 | 0.240173 |
# Modules
import os
import argparse
from collections import defaultdict
from Bio import SeqIO
import numpy as np
from hivwholeseq.datasets import MiSeq_runs
from hivwholeseq.sequencing.filenames import get_consensus_filename, get_merged_consensus_filename, \
get_consensi_alignment_dataset_filename
from hivwholeseq.sequencing.samples import samples
from hivwholeseq.utils.mapping import align_muscle
# Functions
def align_consensi_dataset(dataset, adaIDs, fragments, VERBOSE=0):
'''Align consensi from different samples in a dataset'''
data_folder = dataset['folder']
# Collect consensi
if VERBOSE >= 1:
print 'Collecting consensi...',
consensi = defaultdict(dict)
for adaID in adaIDs:
samplename = dataset['samples'][dataset['adapters'].index(adaID)]
fragments_sample = samples[samplename]['fragments']
for frag in fragments_sample:
frag_gen = frag[:2]
if frag_gen not in fragments:
continue
con_fn = get_consensus_filename(data_folder, adaID, frag_gen)
if os.path.isfile(con_fn):
con = SeqIO.read(con_fn, 'fasta')
consensi[frag_gen][adaID] = con
if 'genomewide' in fragments:
frag_gens = [frag[:2] for frag in fragments_sample]
con_gw_fn = get_merged_consensus_filename(data_folder, adaID, frag_gens)
if os.path.isfile(con_gw_fn):
con = SeqIO.read(con_gw_fn, 'fasta')
consensi['genomewide'][adaID] = con
if VERBOSE >= 1:
print 'done.'
print 'Aligning...',
# Align
alis = {}
for (frag, con_dict) in consensi.iteritems():
if VERBOSE >= 2:
print frag,
ali_frag = align_muscle(*(con_dict.values()))
alis[frag] = ali_frag
if VERBOSE >= 1:
print 'done.'
return alis
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Align consensi from a dataset')
parser.add_argument('--run', required=True,
help='Seq run to analyze (e.g. Tue28)')
parser.add_argument('--adaIDs', nargs='*',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--fragments', nargs='*',
help='Fragment to map (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
args = parser.parse_args()
seq_run = args.run
adaIDs = args.adaIDs
fragments = args.fragments
VERBOSE = args.verbose
# Specify the dataset
dataset = MiSeq_runs[seq_run]
data_folder = dataset['folder']
# If the script is called with no adaID, iterate over all
if not adaIDs:
adaIDs = dataset['adapters']
if VERBOSE >= 3:
print 'adaIDs', adaIDs
if fragments is None:
fragments = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'genomewide']
# Iterate over all requested samples
alis = align_consensi_dataset(dataset, adaIDs, fragments, VERBOSE=VERBOSE)
for (frag, ali) in alis.iteritems():
SeqIO.write(ali, get_consensi_alignment_dataset_filename(data_folder, frag),
'fasta') | hivwholeseq/sequencing/align_consensi_dataset.py | # Modules
import os
import argparse
from collections import defaultdict
from Bio import SeqIO
import numpy as np
from hivwholeseq.datasets import MiSeq_runs
from hivwholeseq.sequencing.filenames import get_consensus_filename, get_merged_consensus_filename, \
get_consensi_alignment_dataset_filename
from hivwholeseq.sequencing.samples import samples
from hivwholeseq.utils.mapping import align_muscle
# Functions
def align_consensi_dataset(dataset, adaIDs, fragments, VERBOSE=0):
'''Align consensi from different samples in a dataset'''
data_folder = dataset['folder']
# Collect consensi
if VERBOSE >= 1:
print 'Collecting consensi...',
consensi = defaultdict(dict)
for adaID in adaIDs:
samplename = dataset['samples'][dataset['adapters'].index(adaID)]
fragments_sample = samples[samplename]['fragments']
for frag in fragments_sample:
frag_gen = frag[:2]
if frag_gen not in fragments:
continue
con_fn = get_consensus_filename(data_folder, adaID, frag_gen)
if os.path.isfile(con_fn):
con = SeqIO.read(con_fn, 'fasta')
consensi[frag_gen][adaID] = con
if 'genomewide' in fragments:
frag_gens = [frag[:2] for frag in fragments_sample]
con_gw_fn = get_merged_consensus_filename(data_folder, adaID, frag_gens)
if os.path.isfile(con_gw_fn):
con = SeqIO.read(con_gw_fn, 'fasta')
consensi['genomewide'][adaID] = con
if VERBOSE >= 1:
print 'done.'
print 'Aligning...',
# Align
alis = {}
for (frag, con_dict) in consensi.iteritems():
if VERBOSE >= 2:
print frag,
ali_frag = align_muscle(*(con_dict.values()))
alis[frag] = ali_frag
if VERBOSE >= 1:
print 'done.'
return alis
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Align consensi from a dataset')
parser.add_argument('--run', required=True,
help='Seq run to analyze (e.g. Tue28)')
parser.add_argument('--adaIDs', nargs='*',
help='Adapter IDs to analyze (e.g. TS2)')
parser.add_argument('--fragments', nargs='*',
help='Fragment to map (e.g. F1 F6)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-3]')
args = parser.parse_args()
seq_run = args.run
adaIDs = args.adaIDs
fragments = args.fragments
VERBOSE = args.verbose
# Specify the dataset
dataset = MiSeq_runs[seq_run]
data_folder = dataset['folder']
# If the script is called with no adaID, iterate over all
if not adaIDs:
adaIDs = dataset['adapters']
if VERBOSE >= 3:
print 'adaIDs', adaIDs
if fragments is None:
fragments = ['F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'genomewide']
# Iterate over all requested samples
alis = align_consensi_dataset(dataset, adaIDs, fragments, VERBOSE=VERBOSE)
for (frag, ali) in alis.iteritems():
SeqIO.write(ali, get_consensi_alignment_dataset_filename(data_folder, frag),
'fasta') | 0.294519 | 0.206954 |
from pysignfe.xml_sped import *
from pysignfe.nfe.manual_500 import ESQUEMA_ATUAL
from .evento_base import DetEvento, InfEventoEnviado, Evento, EnvEvento, InfEventoRecebido, RetEvento, RetEnvEvento, ProcEventoNFe
import os
DIRNAME = os.path.dirname(__file__)
CONDICAO_USO = u'A Carta de Correcao e disciplinada pelo paragrafo 1o-A do art. 7o do Convenio S/N, de 15 de dezembro de 1970 e pode ser utilizada para regularizacao de erro ocorrido na emissao de documento fiscal, desde que o erro nao esteja relacionado com: I - as variaveis que determinam o valor do imposto tais como: base de calculo, aliquota, diferenca de preco, quantidade, valor da operacao ou da prestacao; II - a correcao de dados cadastrais que implique mudanca do remetente ou do destinatario; III - a data de emissao ou de saida.'
class DetEventoCCe(DetEvento):
def __init__(self):
super(DetEventoCCe, self).__init__()
self.versao = TagDecimal(nome=u'detEvento' , codigo=u'HP18', propriedade=u'versao', valor=u'1.00', raiz=u'/')
self.descEvento = TagCaracter(nome=u'descEvento', codigo=u'HP19', tamanho=[ 5, 60, 5], raiz=u'//infEvento/detEvento',valor=u'Carta de Correcao')
self.xCorrecao = TagCaracter(nome=u'xCorrecao', codigo=u'HP20', tamanho=[15, 1000, 15], raiz=u'//infEvento/detEvento')
self.xCondUso = TagCaracter(nome=u'xCondUso', codigo=u'HP20a', raiz=u'//detEvento', valor=CONDICAO_USO)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.descEvento.xml
xml += self.xCorrecao.xml
xml += self.xCondUso.xml
xml += '</detEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.descEvento.xml = arquivo
self.xCorrecao.xml = arquivo
self.xCondUso.xml = arquivo
xml = property(get_xml, set_xml)
class InfEventoEnviadoCCe(InfEventoEnviado):
def __init__(self):
super(InfEventoEnviadoCCe, self).__init__()
self.detEvento = DetEventoCCe()
self.tpEvento.valor = '110110'
class EventoCCe(Evento):
def __init__(self):
super(EventoCCe, self).__init__()
self.infEvento = InfEventoEnviado()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'CCe_v1.00.xsd'
class EnvEventoCCe(EnvEvento):
def __init__(self):
super(EnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'envCCe_v1.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.idLote.xml
for ev in self.evento:
xml += tira_abertura(ev.xml)
xml += u'</envEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.idLote.xml = arquivo
self.evento = self.le_grupo('//envEvento/evento', EventoCCe)
xml = property(get_xml, set_xml)
class InfEventoRecebidoCCe(InfEventoRecebido):
def __init__(self):
super(InfEventoRecebidoCCe, self).__init__()
class RetEventoCCe(RetEvento):
def __init__(self):
super(RetEventoCCe, self).__init__()
class RetEnvEventoCCe(RetEnvEvento):
def __init__(self):
super(RetEnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'retEnvCCe_v1.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.idLote.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.cOrgao.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
for r in self.retEvento:
xml += tira_abertura(r.xml)
xml += u'</retEnvEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.idLote.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.cOrgao.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.retEvento = self.le_grupo('//retEnvEvento/retEvento', RetEventoCCe)
# Monta o dicionário dos retornos
for ret in self.retEvento:
self.dic_retEvento[ret.infEvento.chNFe.valor] = ret
xml = property(get_xml, set_xml)
class ProcEventoNFeCCe(ProcEventoNFe):
def __init__(self):
super(ProcEventoNFeCCe, self).__init__()
self.evento = EventoCCe()
self.retEvento = RetEventoCCe()
self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'procCCeNFe_v1.00.xsd' | gestaoemp/lib/python3.6/site-packages/pysignfe/nfe/manual_500/carta_correcao.py |
from pysignfe.xml_sped import *
from pysignfe.nfe.manual_500 import ESQUEMA_ATUAL
from .evento_base import DetEvento, InfEventoEnviado, Evento, EnvEvento, InfEventoRecebido, RetEvento, RetEnvEvento, ProcEventoNFe
import os
DIRNAME = os.path.dirname(__file__)
CONDICAO_USO = u'A Carta de Correcao e disciplinada pelo paragrafo 1o-A do art. 7o do Convenio S/N, de 15 de dezembro de 1970 e pode ser utilizada para regularizacao de erro ocorrido na emissao de documento fiscal, desde que o erro nao esteja relacionado com: I - as variaveis que determinam o valor do imposto tais como: base de calculo, aliquota, diferenca de preco, quantidade, valor da operacao ou da prestacao; II - a correcao de dados cadastrais que implique mudanca do remetente ou do destinatario; III - a data de emissao ou de saida.'
class DetEventoCCe(DetEvento):
def __init__(self):
super(DetEventoCCe, self).__init__()
self.versao = TagDecimal(nome=u'detEvento' , codigo=u'HP18', propriedade=u'versao', valor=u'1.00', raiz=u'/')
self.descEvento = TagCaracter(nome=u'descEvento', codigo=u'HP19', tamanho=[ 5, 60, 5], raiz=u'//infEvento/detEvento',valor=u'Carta de Correcao')
self.xCorrecao = TagCaracter(nome=u'xCorrecao', codigo=u'HP20', tamanho=[15, 1000, 15], raiz=u'//infEvento/detEvento')
self.xCondUso = TagCaracter(nome=u'xCondUso', codigo=u'HP20a', raiz=u'//detEvento', valor=CONDICAO_USO)
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += self.versao.xml
xml += self.descEvento.xml
xml += self.xCorrecao.xml
xml += self.xCondUso.xml
xml += '</detEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.descEvento.xml = arquivo
self.xCorrecao.xml = arquivo
self.xCondUso.xml = arquivo
xml = property(get_xml, set_xml)
class InfEventoEnviadoCCe(InfEventoEnviado):
def __init__(self):
super(InfEventoEnviadoCCe, self).__init__()
self.detEvento = DetEventoCCe()
self.tpEvento.valor = '110110'
class EventoCCe(Evento):
def __init__(self):
super(EventoCCe, self).__init__()
self.infEvento = InfEventoEnviado()
self.caminho_esquema = os.path.join(DIRNAME, 'schema/', ESQUEMA_ATUAL + '/')
self.arquivo_esquema = 'CCe_v1.00.xsd'
class EnvEventoCCe(EnvEvento):
def __init__(self):
super(EnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'envCCe_v1.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.idLote.xml
for ev in self.evento:
xml += tira_abertura(ev.xml)
xml += u'</envEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.idLote.xml = arquivo
self.evento = self.le_grupo('//envEvento/evento', EventoCCe)
xml = property(get_xml, set_xml)
class InfEventoRecebidoCCe(InfEventoRecebido):
def __init__(self):
super(InfEventoRecebidoCCe, self).__init__()
class RetEventoCCe(RetEvento):
def __init__(self):
super(RetEventoCCe, self).__init__()
class RetEnvEventoCCe(RetEnvEvento):
def __init__(self):
super(RetEnvEventoCCe, self).__init__()
self.caminho_esquema = os.path.join(DIRNAME, u'schema/', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'retEnvCCe_v1.00.xsd'
def get_xml(self):
xml = XMLNFe.get_xml(self)
xml += ABERTURA
xml += self.versao.xml
xml += self.idLote.xml
xml += self.tpAmb.xml
xml += self.verAplic.xml
xml += self.cOrgao.xml
xml += self.cStat.xml
xml += self.xMotivo.xml
for r in self.retEvento:
xml += tira_abertura(r.xml)
xml += u'</retEnvEvento>'
return xml
def set_xml(self, arquivo):
if self._le_xml(arquivo):
self.versao.xml = arquivo
self.idLote.xml = arquivo
self.tpAmb.xml = arquivo
self.verAplic.xml = arquivo
self.cOrgao.xml = arquivo
self.cStat.xml = arquivo
self.xMotivo.xml = arquivo
self.retEvento = self.le_grupo('//retEnvEvento/retEvento', RetEventoCCe)
# Monta o dicionário dos retornos
for ret in self.retEvento:
self.dic_retEvento[ret.infEvento.chNFe.valor] = ret
xml = property(get_xml, set_xml)
class ProcEventoNFeCCe(ProcEventoNFe):
def __init__(self):
super(ProcEventoNFeCCe, self).__init__()
self.evento = EventoCCe()
self.retEvento = RetEventoCCe()
self.caminho_esquema = os.path.join(DIRNAME, u'schema', ESQUEMA_ATUAL + u'/')
self.arquivo_esquema = u'procCCeNFe_v1.00.xsd' | 0.334481 | 0.115811 |
import pandas as pd
from pgmpy.estimators import BayesianEstimator
from pgmpy.models import BayesianModel
from pomegranate.BayesianNetwork import BayesianNetwork
from pomegranate.base import State
from pomegranate.distributions.ConditionalProbabilityTable import ConditionalProbabilityTable
from pomegranate.distributions.DiscreteDistribution import DiscreteDistribution
MOBILE = 'LEN_mobile'
LEFT_ARM = 'LEN_motor_left_arm'
RIGHT_ARM = 'LEN_motor_right_arm'
LEFT_FOOT = 'LEN_motor_left_foot'
RIGHT_FOOT = 'LEN_motor_right_foot'
DESIRE = 'LEN_desire'
BOREDOM = 'RON_BOREDOM'
MOTOR_HYPO = 'RON_MOVEMENT'
def baby_model():
d1 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d2 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d3 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d1])
d4 = ConditionalProbabilityTable(
[['1', '1', '1', 0.1],
['1', '1', '0', 0.9],
['1', '0', '1', 0.1],
['1', '0', '0', 0.9],
['0', '1', '1', 0.9],
['0', '1', '0', 0.1],
['0', '0', '1', 0.9],
['0', '0', '0', 0.1]], [d1, d2])
d5 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d2])
s1 = State(d1, name=BOREDOM)
s2 = State(d2, name=MOTOR_HYPO)
s3 = State(d3, name=DESIRE)
s4 = State(d4, name=MOBILE)
s5 = State(d5, name=LEFT_ARM)
model = BayesianNetwork()
model.add_states(s1, s2, s3, s4, s5)
model.add_edge(s1, s3)
model.add_edge(s1, s4)
model.add_edge(s2, s4)
model.add_edge(s2, s5)
model.bake()
return model
TRAINING_DATA = pd.DataFrame(data={
BOREDOM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
DESIRE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOBILE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOTOR_HYPO: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
LEFT_ARM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]
})
def fully_connected_model(nodes=None):
if not nodes:
nodes = [BOREDOM, DESIRE, MOBILE, MOTOR_HYPO, LEFT_ARM]
network = BayesianModel()
network.add_nodes_from(nodes)
for hypo in nodes:
if 'hypo' in hypo:
for obs in nodes:
if 'obs' in obs or 'motor' in obs:
network.add_edge(u=hypo, v=obs)
network.fit(TRAINING_DATA, estimator=BayesianEstimator, prior_type="BDeu")
return network | peepo/playground/baby_in_crib/model.py | import pandas as pd
from pgmpy.estimators import BayesianEstimator
from pgmpy.models import BayesianModel
from pomegranate.BayesianNetwork import BayesianNetwork
from pomegranate.base import State
from pomegranate.distributions.ConditionalProbabilityTable import ConditionalProbabilityTable
from pomegranate.distributions.DiscreteDistribution import DiscreteDistribution
MOBILE = 'LEN_mobile'
LEFT_ARM = 'LEN_motor_left_arm'
RIGHT_ARM = 'LEN_motor_right_arm'
LEFT_FOOT = 'LEN_motor_left_foot'
RIGHT_FOOT = 'LEN_motor_right_foot'
DESIRE = 'LEN_desire'
BOREDOM = 'RON_BOREDOM'
MOTOR_HYPO = 'RON_MOVEMENT'
def baby_model():
d1 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d2 = DiscreteDistribution({'0': 0.6, '1': 0.4})
d3 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d1])
d4 = ConditionalProbabilityTable(
[['1', '1', '1', 0.1],
['1', '1', '0', 0.9],
['1', '0', '1', 0.1],
['1', '0', '0', 0.9],
['0', '1', '1', 0.9],
['0', '1', '0', 0.1],
['0', '0', '1', 0.9],
['0', '0', '0', 0.1]], [d1, d2])
d5 = ConditionalProbabilityTable(
[['1', '1', 0.1],
['1', '0', 0.9],
['0', '1', 0.9],
['0', '0', 0.1]], [d2])
s1 = State(d1, name=BOREDOM)
s2 = State(d2, name=MOTOR_HYPO)
s3 = State(d3, name=DESIRE)
s4 = State(d4, name=MOBILE)
s5 = State(d5, name=LEFT_ARM)
model = BayesianNetwork()
model.add_states(s1, s2, s3, s4, s5)
model.add_edge(s1, s3)
model.add_edge(s1, s4)
model.add_edge(s2, s4)
model.add_edge(s2, s5)
model.bake()
return model
TRAINING_DATA = pd.DataFrame(data={
BOREDOM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
DESIRE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOBILE: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
MOTOR_HYPO: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0],
LEFT_ARM: [1, 1, 1, 1, 1, 1, 1, 1, 1, 0]
})
def fully_connected_model(nodes=None):
if not nodes:
nodes = [BOREDOM, DESIRE, MOBILE, MOTOR_HYPO, LEFT_ARM]
network = BayesianModel()
network.add_nodes_from(nodes)
for hypo in nodes:
if 'hypo' in hypo:
for obs in nodes:
if 'obs' in obs or 'motor' in obs:
network.add_edge(u=hypo, v=obs)
network.fit(TRAINING_DATA, estimator=BayesianEstimator, prior_type="BDeu")
return network | 0.712332 | 0.436382 |
import argparse
import json
import logging
import os
import shlex
import shutil
import sys
# Hardcoded configuration values
JAVA_LAST_KIOSK_LOG = "/tmp/java-last-kiosk.log"
# Parse CLI arguments
parser = argparse.ArgumentParser(description='JavaFX Last Kiosk Launcher', allow_abbrev=False)
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging')
parser.add_argument('-n', '--dry-run', action='store_true', help='Only output command, do not execute')
args = parser.parse_args()
# Ensure we are running as root
if os.geteuid() != 0:
parser.error("Unable to execute 'java-kiosk' without running as root")
# Initialize formatter for unified debug logs
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Initialize stream handler for logging, only visible when debug argument was given
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
streamHandler.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Initialize file handler for logging, always active
fileHandler = logging.FileHandler(JAVA_LAST_KIOSK_LOG, mode='w')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.DEBUG)
# Initialize logging
logger = logging.getLogger('java-kiosk')
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(logging.DEBUG)
# Search for absolute path of java-kiosk
java_kiosk_path = shutil.which('java-kiosk')
if java_kiosk_path is None:
parser.error("Unable to find 'java-kiosk' binary in current PATH")
logger.debug("Found path to 'java-kiosk' helper script: %s", java_kiosk_path)
# Determine path to persistence file
persistence_path = os.path.join(os.path.expanduser('~'), '.java-last-kiosk')
logger.debug('Determined path to last-java-kiosk persistence file: %s', persistence_path)
# Parse persistence file as JSON to determine arguments
try:
with open(persistence_path, 'r') as persistence_file:
java_kiosk_data = json.load(persistence_file)
except Exception as exc:
logger.error("Unable to open persistence file: %s", exc)
logger.error("Please ensure that 'java-kiosk' was executed successfully before")
parser.error("Unable to continue, arguments for previous 'java-kiosk' invocation are missing")
# Log previously used java-kiosk arguments
logger.debug("Determined previous 'java-kiosk' working directory: %s", java_kiosk_data['cwd'])
logger.debug("Determined previous 'java-kiosk' arguments: %s", java_kiosk_data['args'])
# Either execute java-kiosk or output final command
if not args.dry_run:
# Adjust working directory
logger.debug('Switching work directory to previous location: %s', java_kiosk_data['cwd'])
os.chdir(java_kiosk_data['cwd'])
# Modify environment for java-kiosk to skip persistence update and optionally enable debug
java_kiosk_env = os.environ.copy()
java_kiosk_env['JAVA_KIOSK_VOLATILE'] = 'true'
java_kiosk_env['JAVA_KIOSK_DEBUG'] = 'true' if args.debug else ''
logger.debug("Patched environment for 'java-kiosk': %s", java_kiosk_env)
# Use execve() to replace current process with java-kiosk
exec_binary, exec_argv = java_kiosk_data['args'][0], java_kiosk_data['args']
logger.debug("Executing into [%s] using previous argv: %s", exec_binary, exec_argv)
os.execve(exec_binary, exec_argv, java_kiosk_env)
else:
print(' '.join(map(lambda arg: shlex.quote(arg), java_kiosk_data['args']))) | base/resources/java/java-last-kiosk.py | import argparse
import json
import logging
import os
import shlex
import shutil
import sys
# Hardcoded configuration values
JAVA_LAST_KIOSK_LOG = "/tmp/java-last-kiosk.log"
# Parse CLI arguments
parser = argparse.ArgumentParser(description='JavaFX Last Kiosk Launcher', allow_abbrev=False)
parser.add_argument('-d', '--debug', action='store_true', help='Enable debug logging')
parser.add_argument('-n', '--dry-run', action='store_true', help='Only output command, do not execute')
args = parser.parse_args()
# Ensure we are running as root
if os.geteuid() != 0:
parser.error("Unable to execute 'java-kiosk' without running as root")
# Initialize formatter for unified debug logs
formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')
# Initialize stream handler for logging, only visible when debug argument was given
streamHandler = logging.StreamHandler()
streamHandler.setFormatter(formatter)
streamHandler.setLevel(logging.DEBUG if args.debug else logging.INFO)
# Initialize file handler for logging, always active
fileHandler = logging.FileHandler(JAVA_LAST_KIOSK_LOG, mode='w')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.DEBUG)
# Initialize logging
logger = logging.getLogger('java-kiosk')
logger.addHandler(streamHandler)
logger.addHandler(fileHandler)
logger.setLevel(logging.DEBUG)
# Search for absolute path of java-kiosk
java_kiosk_path = shutil.which('java-kiosk')
if java_kiosk_path is None:
parser.error("Unable to find 'java-kiosk' binary in current PATH")
logger.debug("Found path to 'java-kiosk' helper script: %s", java_kiosk_path)
# Determine path to persistence file
persistence_path = os.path.join(os.path.expanduser('~'), '.java-last-kiosk')
logger.debug('Determined path to last-java-kiosk persistence file: %s', persistence_path)
# Parse persistence file as JSON to determine arguments
try:
with open(persistence_path, 'r') as persistence_file:
java_kiosk_data = json.load(persistence_file)
except Exception as exc:
logger.error("Unable to open persistence file: %s", exc)
logger.error("Please ensure that 'java-kiosk' was executed successfully before")
parser.error("Unable to continue, arguments for previous 'java-kiosk' invocation are missing")
# Log previously used java-kiosk arguments
logger.debug("Determined previous 'java-kiosk' working directory: %s", java_kiosk_data['cwd'])
logger.debug("Determined previous 'java-kiosk' arguments: %s", java_kiosk_data['args'])
# Either execute java-kiosk or output final command
if not args.dry_run:
# Adjust working directory
logger.debug('Switching work directory to previous location: %s', java_kiosk_data['cwd'])
os.chdir(java_kiosk_data['cwd'])
# Modify environment for java-kiosk to skip persistence update and optionally enable debug
java_kiosk_env = os.environ.copy()
java_kiosk_env['JAVA_KIOSK_VOLATILE'] = 'true'
java_kiosk_env['JAVA_KIOSK_DEBUG'] = 'true' if args.debug else ''
logger.debug("Patched environment for 'java-kiosk': %s", java_kiosk_env)
# Use execve() to replace current process with java-kiosk
exec_binary, exec_argv = java_kiosk_data['args'][0], java_kiosk_data['args']
logger.debug("Executing into [%s] using previous argv: %s", exec_binary, exec_argv)
os.execve(exec_binary, exec_argv, java_kiosk_env)
else:
print(' '.join(map(lambda arg: shlex.quote(arg), java_kiosk_data['args']))) | 0.354657 | 0.045205 |
from torch.autograd import Variable
import torch
from .module import Module
from .container import Sequential
from .activation import LogSoftmax
from .. import functional as F
def _assert_no_grad(variable):
assert not variable.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
class _Loss(Module):
def __init__(self, size_average=True):
super(_Loss, self).__init__()
self.size_average = size_average
class _WeightedLoss(_Loss):
def __init__(self, weight=None, size_average=True):
super(_WeightedLoss, self).__init__(size_average)
self.register_buffer('weight', weight)
class L1Loss(_Loss):
r"""Creates a criterion that measures the mean absolute value of the
element-wise difference between input `x` and target `y`:
:math:`{loss}(x, y) = 1/n \sum |x_i - y_i|`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the constructor argument
`size_average=False`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.L1Loss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.l1_loss(input, target, size_average=self.size_average)
class NLLLoss(_WeightedLoss):
r"""The negative log likelihood loss. It is useful to train a classification
problem with n classes
If provided, the optional argument `weights` should be a 1D Tensor assigning
weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The input given through a forward call is expected to contain
log-probabilities of each class: input has to be a 2D Tensor of size
`(minibatch, n)`
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The target that this loss expects is a class index
`(0 to N-1, where N = number of classes)`
The loss can be described as::
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows::
loss(x, class) = -weights[class] * x[class]
or in the case of ignore_index::
loss(x, class) = class != ignoreIndex ? -weights[class] * x[class] : 0
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average
is True, the loss is averaged over non-ignored targets.
Shape:
- Input: :math:`(N, C)` where `C = number of classes`
- Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> m = nn.LogSoftmax()
>>> loss = nn.NLLLoss()
>>> # input is of size nBatch x nClasses = 3 x 5
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor([1, 0, 4]))
>>> output = loss(m(input), target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100):
super(NLLLoss, self).__init__(weight, size_average)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.nll_loss(input, target, self.weight, self.size_average,
self.ignore_index)
class NLLLoss2d(NLLLoss):
r"""This is negative log likehood loss, but for image inputs. It computes
NLL loss per-pixel.
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a 1D Tensor having as many elements,
as there are classes.
size_average: By default, the losses are averaged over observations
for each minibatch. However, if the field size_average is set to
False, the losses are instead summed for each minibatch.
Default: True
Shape:
- Input: :math:`(N, C, H, W)` where `C = number of classes`
- Target: :math:`(N, H, W)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> m = nn.Conv2d(16, 32, (3, 3)).float()
>>> loss = nn.NLLLoss2d()
>>> # input is of size nBatch x nClasses x height x width
>>> input = autograd.Variable(torch.randn(3, 16, 10, 10))
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor(3, 8, 8).random_(0, 4))
>>> output = loss(m(input), target)
>>> output.backward()
"""
pass
class PoissonNLLLoss(_Loss):
r"""Negative log likelihood loss with Poisson distribution of target.
The loss can be described as::
target ~ Pois(input)
loss(input, target) = input - target * log(input) + log(target!)
The last term can be omitted or approximised with Stirling formula. The
approximation is used for target values more than 1. For targets less or
equal to 1 zeros are added to the loss.
Args:
log_input (bool, optional): if True the loss is computed as
`exp(input) - target * input`, if False the loss is
`input - target * log(input)`.
full (bool, optional): whether to compute full loss, i. e. to add the
Stirling approximation term
`target * log(target) - target + 0.5 * log(2 * pi * target)`.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field size_average
is set to False, the losses are instead summed for each minibatch.
Examples::
>>> loss = nn.PoissonNLLLoss()
>>> log_input = autograd.Variable(torch.randn(5, 2), requires_grad=True)
>>> target = autograd.Variable(torch.randn(5, 2))
>>> output = loss(log_input, target)
>>> output.backward()
"""
def __init__(self, log_input=True, full=False, size_average=True):
super(PoissonNLLLoss, self).__init__()
self.log_input = log_input
self.full = full
self.size_average = size_average
def forward(self, log_input, target):
_assert_no_grad(target)
return F.poisson_nll_loss(log_input, target, self.log_input, self.full, self.size_average)
class KLDivLoss(_Loss):
r"""The `Kullback-Leibler divergence`_ Loss
KL divergence is a useful distance measure for continuous distributions
and is often useful when performing direct regression over the space of
(discretely sampled) continuous output distributions.
As with `NLLLoss`, the `input` given is expected to contain
*log-probabilities*, however unlike `ClassNLLLoss`, `input` is not
restricted to a 2D Tensor, because the criterion is applied element-wise.
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The loss can be described as:
.. math:: loss(x, target) = 1/n \sum(target_i * (log(target_i) - x_i))
By default, the losses are averaged for each minibatch over observations
**as well as** over dimensions. However, if the field
`size_average` is set to `False`, the losses are instead summed.
.. _Kullback-Leibler divergence:
https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.kl_div(input, target, size_average=self.size_average)
class MSELoss(_Loss):
r"""Creates a criterion that measures the mean squared error between
`n` elements in the input `x` and target `y`:
:math:`{loss}(x, y) = 1/n \sum |x_i - y_i|^2`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to `False`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.MSELoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.mse_loss(input, target, size_average=self.size_average)
class BCELoss(_WeightedLoss):
r"""Creates a criterion that measures the Binary Cross Entropy
between the target and the output:
.. math:: loss(o, t) = - 1/n \sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
or in the case of the weights argument being specified:
.. math:: loss(o, t) = - 1/n \sum_i weights[i] * (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
By default, the losses are averaged for each minibatch over observations
*as well as* over dimensions. However, if the field `size_average` is set
to `False`, the losses are instead summed.
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.binary_cross_entropy(input, target, weight=self.weight,
size_average=self.size_average)
class BCEWithLogitsLoss(Module):
r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
class. This version is more numerically stable than using a plain `Sigmoid`
followed by a `BCELoss` as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
This Binary Cross Entropy between the target and the output logits
(no sigmoid applied) is:
.. math:: loss(o, t) = - 1/n \sum_i (t[i] * log(sigmoid(o[i])) + (1 - t[i]) * log(1 - sigmoid(o[i])))
or in the case of the weights argument being specified:
.. math:: loss(o, t) = - 1/n \sum_i weights[i] * (t[i] * log(sigmoid(o[i])) + (1 - t[i]) * log(1 - sigmoid(o[i])))
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
By default, the losses are averaged for each minibatch over observations
*as well as* over dimensions. However, if the field `size_average` is set
to `False`, the losses are instead summed.
"""
def __init__(self, weight=None, size_average=True):
super(BCEWithLogitsLoss, self).__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
def forward(self, input, target):
if self.weight is not None:
return F.binary_cross_entropy_with_logits(input, target, Variable(self.weight), self.size_average)
else:
return F.binary_cross_entropy_with_logits(input, target, size_average=self.size_average)
class HingeEmbeddingLoss(_Loss):
r"""Measures the loss given an input `x` which is a 2D mini-batch tensor
and a labels `y`, a 1D tensor containg values (`1` or `-1`).
This is usually used for measuring whether two inputs are similar or
dissimilar, e.g. using the L1 pairwise distance, and is typically used
for learning nonlinear embeddings or semi-supervised learning::
{ x_i, if y_i == 1
loss(x, y) = 1/n {
{ max(0, margin - x_i), if y_i == -1
`x` and `y` arbitrary shapes with a total of `n` elements each
the sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal
variable `size_average=False`.
The `margin` has a default value of `1`, or can be set in the constructor.
"""
def __init__(self, margin=1.0, size_average=True):
super(HingeEmbeddingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input, target):
return F.hinge_embedding_loss(input, target, self.margin, self.size_average)
class MultiLabelMarginLoss(_Loss):
r"""Creates a criterion that optimizes a multi-class multi-classification
hinge loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`)
and output `y` (which is a 2D `Tensor` of target class indices).
For each sample in the mini-batch::
loss(x, y) = sum_ij(max(0, 1 - (x[y[j]] - x[i]))) / x.size(0)
where `i == 0` to `x.size(0)`, `j == 0` to `y.size(0)`,
`y[j] != 0`, and `i != y[j]` for all `i` and `j`.
`y` and `x` must have the same size.
The criterion only considers the first non zero `y[j]` targets.
This allows for different samples to have variable amounts of target classes
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.multilabel_margin_loss(input, target, size_average=self.size_average)
class SmoothL1Loss(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
It is less sensitive to outliers than the `MSELoss` and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by <NAME>).
Also known as the Huber loss::
{ 0.5 * (x_i - y_i)^2, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum {
{ |x_i - y_i| - 0.5, otherwise
`x` and `y` arbitrary shapes with a total of `n` elements each
the sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to `False`
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.smooth_l1_loss(input, target, size_average=self.size_average)
class SoftMarginLoss(_Loss):
r"""Creates a criterion that optimizes a two-class classification
logistic loss between input `x` (a 2D mini-batch Tensor) and
target `y` (which is a tensor containing either `1` or `-1`).
::
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x.nelement()
The normalization by the number of elements in the input can be disabled by
setting `self.size_average` to `False`.
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.soft_margin_loss(input, target, size_average=self.size_average)
class CrossEntropyLoss(_WeightedLoss):
r"""This criterion combines `LogSoftMax` and `NLLLoss` in one single class.
It is useful when training a classification problem with `n` classes.
If provided, the optional argument `weights` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The `input` is expected to contain scores for each class.
`input` has to be a 2D `Tensor` of size `batch x n`.
This criterion expects a class index (0 to nClasses-1) as the
`target` for each value of a 1D tensor of size `n`
The loss can be described as::
loss(x, class) = -log(exp(x[class]) / (\sum_j exp(x[j])))
= -x[class] + log(\sum_j exp(x[j]))
or in the case of the `weights` argument being specified::
loss(x, class) = weights[class] * (-x[class] + log(\sum_j exp(x[j])))
The losses are averaged across observations for each minibatch.
Args:
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets.
Shape:
- Input: :math:`(N, C)` where `C = number of classes`
- Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> loss = nn.CrossEntropyLoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.LongTensor(3).random_(5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100):
super(CrossEntropyLoss, self).__init__(weight, size_average)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.cross_entropy(input, target, self.weight, self.size_average,
self.ignore_index)
class MultiLabelSoftMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-label one-versus-all
loss based on max-entropy, between input `x` (a 2D mini-batch `Tensor`) and
target `y` (a binary 2D `Tensor`). For each sample in the minibatch::
loss(x, y) = - sum_i (y[i] * log( 1 / (1 + exp(-x[i])) )
+ ( (1-y[i]) * log(exp(-x[i]) / (1 + exp(-x[i])) ) )
where `i == 0` to `x.nElement()-1`, `y[i] in {0,1}`.
`y` and `x` must have the same size.
"""
def forward(self, input, target):
return F.multilabel_soft_margin_loss(input, target, self.weight, self.size_average)
class CosineEmbeddingLoss(Module):
r"""Creates a criterion that measures the loss given an input tensors
x1, x2 and a `Tensor` label `y` with values 1 or -1.
This is used for measuring whether two inputs are similar or dissimilar,
using the cosine distance, and is typically used for learning nonlinear
embeddings or semi-supervised learning.
`margin` should be a number from `-1` to `1`, `0` to `0.5` is suggested.
If `margin` is missing, the default value is `0`.
The loss function for each sample is::
{ 1 - cos(x1, x2), if y == 1
loss(x, y) = {
{ max(0, cos(x1, x2) - margin), if y == -1
If the internal variable `size_average` is equal to `True`,
the loss function averages the loss over the batch samples;
if `size_average` is `False`, then the loss function sums over the
batch samples. By default, `size_average = True`.
"""
def __init__(self, margin=0, size_average=True):
super(CosineEmbeddingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input1, input2, target):
return F.cosine_embedding_loss(input1, input2, target, self.margin, self.size_average)
class MarginRankingLoss(Module):
r"""Creates a criterion that measures the loss given
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is::
loss(x, y) = max(0, -y * (x1 - x2) + margin)
if the internal variable `size_average = True`,
the loss function averages the loss over the batch samples;
if `size_average = False`, then the loss function sums over the batch
samples.
By default, `size_average` equals to `True`.
"""
def __init__(self, margin=0, size_average=True):
super(MarginRankingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input1, input2, target):
return F.margin_ranking_loss(input1, input2, target, self.margin, self.size_average)
class MultiMarginLoss(Module):
r"""Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`) and
output `y` (which is a 1D tensor of target class indices,
`0` <= `y` <= `x.size(1)`):
For each mini-batch sample::
loss(x, y) = sum_i(max(0, (margin - x[y] + x[i]))^p) / x.size(0)
where `i == 0` to `x.size(0)` and `i != y`.
Optionally, you can give non-equal weighting on the classes by passing
a 1D `weights` tensor into the constructor.
The loss function then becomes:
loss(x, y) = sum_i(max(0, w[y] * (margin - x[y] - x[i]))^p) / x.size(0)
By default, the losses are averaged over observations for each minibatch.
However, if the field `size_average` is set to `False`,
the losses are instead summed.
"""
def __init__(self, p=1, margin=1, weight=None, size_average=True):
super(MultiMarginLoss, self).__init__()
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
assert weight is None or weight.dim() == 1
self.p = p
self.margin = margin
self.size_average = size_average
self.weight = weight
def forward(self, input, target):
return F.multi_margin_loss(input, target, self.p, self.margin,
self.weight, self.size_average)
class TripletMarginLoss(Module):
r"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3 and a margin with a value greater than 0.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses`_ by
<NAME>, <NAME> et al.
.. math::
L(a, p, n) = \frac{1}{N} \left( \sum_{i=1}^N \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} \right)
where :math:`d(x_i, y_i) = \| {\bf x}_i - {\bf y}_i \|_2^2`.
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
p: the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, margin=1.0, p=2, eps=1e-6, swap=False):
super(TripletMarginLoss, self).__init__()
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
return F.triplet_margin_loss(anchor, positive, negative, self.margin,
self.p, self.eps, self.swap)
# TODO: L1HingeEmbeddingCriterion
# TODO: MSECriterion weight
# TODO: ClassSimplexCriterion | torch/nn/modules/loss.py | from torch.autograd import Variable
import torch
from .module import Module
from .container import Sequential
from .activation import LogSoftmax
from .. import functional as F
def _assert_no_grad(variable):
assert not variable.requires_grad, \
"nn criterions don't compute the gradient w.r.t. targets - please " \
"mark these variables as volatile or not requiring gradients"
class _Loss(Module):
def __init__(self, size_average=True):
super(_Loss, self).__init__()
self.size_average = size_average
class _WeightedLoss(_Loss):
def __init__(self, weight=None, size_average=True):
super(_WeightedLoss, self).__init__(size_average)
self.register_buffer('weight', weight)
class L1Loss(_Loss):
r"""Creates a criterion that measures the mean absolute value of the
element-wise difference between input `x` and target `y`:
:math:`{loss}(x, y) = 1/n \sum |x_i - y_i|`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the constructor argument
`size_average=False`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.L1Loss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.l1_loss(input, target, size_average=self.size_average)
class NLLLoss(_WeightedLoss):
r"""The negative log likelihood loss. It is useful to train a classification
problem with n classes
If provided, the optional argument `weights` should be a 1D Tensor assigning
weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The input given through a forward call is expected to contain
log-probabilities of each class: input has to be a 2D Tensor of size
`(minibatch, n)`
Obtaining log-probabilities in a neural network is easily achieved by
adding a `LogSoftmax` layer in the last layer of your network.
You may use `CrossEntropyLoss` instead, if you prefer not to add an extra
layer.
The target that this loss expects is a class index
`(0 to N-1, where N = number of classes)`
The loss can be described as::
loss(x, class) = -x[class]
or in the case of the weights argument it is specified as follows::
loss(x, class) = -weights[class] * x[class]
or in the case of ignore_index::
loss(x, class) = class != ignoreIndex ? -weights[class] * x[class] : 0
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average
is True, the loss is averaged over non-ignored targets.
Shape:
- Input: :math:`(N, C)` where `C = number of classes`
- Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> m = nn.LogSoftmax()
>>> loss = nn.NLLLoss()
>>> # input is of size nBatch x nClasses = 3 x 5
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor([1, 0, 4]))
>>> output = loss(m(input), target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100):
super(NLLLoss, self).__init__(weight, size_average)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.nll_loss(input, target, self.weight, self.size_average,
self.ignore_index)
class NLLLoss2d(NLLLoss):
r"""This is negative log likehood loss, but for image inputs. It computes
NLL loss per-pixel.
Args:
weight (Tensor, optional): a manual rescaling weight given to each
class. If given, has to be a 1D Tensor having as many elements,
as there are classes.
size_average: By default, the losses are averaged over observations
for each minibatch. However, if the field size_average is set to
False, the losses are instead summed for each minibatch.
Default: True
Shape:
- Input: :math:`(N, C, H, W)` where `C = number of classes`
- Target: :math:`(N, H, W)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> m = nn.Conv2d(16, 32, (3, 3)).float()
>>> loss = nn.NLLLoss2d()
>>> # input is of size nBatch x nClasses x height x width
>>> input = autograd.Variable(torch.randn(3, 16, 10, 10))
>>> # each element in target has to have 0 <= value < nclasses
>>> target = autograd.Variable(torch.LongTensor(3, 8, 8).random_(0, 4))
>>> output = loss(m(input), target)
>>> output.backward()
"""
pass
class PoissonNLLLoss(_Loss):
r"""Negative log likelihood loss with Poisson distribution of target.
The loss can be described as::
target ~ Pois(input)
loss(input, target) = input - target * log(input) + log(target!)
The last term can be omitted or approximised with Stirling formula. The
approximation is used for target values more than 1. For targets less or
equal to 1 zeros are added to the loss.
Args:
log_input (bool, optional): if True the loss is computed as
`exp(input) - target * input`, if False the loss is
`input - target * log(input)`.
full (bool, optional): whether to compute full loss, i. e. to add the
Stirling approximation term
`target * log(target) - target + 0.5 * log(2 * pi * target)`.
size_average (bool, optional): By default, the losses are averaged over
observations for each minibatch. However, if the field size_average
is set to False, the losses are instead summed for each minibatch.
Examples::
>>> loss = nn.PoissonNLLLoss()
>>> log_input = autograd.Variable(torch.randn(5, 2), requires_grad=True)
>>> target = autograd.Variable(torch.randn(5, 2))
>>> output = loss(log_input, target)
>>> output.backward()
"""
def __init__(self, log_input=True, full=False, size_average=True):
super(PoissonNLLLoss, self).__init__()
self.log_input = log_input
self.full = full
self.size_average = size_average
def forward(self, log_input, target):
_assert_no_grad(target)
return F.poisson_nll_loss(log_input, target, self.log_input, self.full, self.size_average)
class KLDivLoss(_Loss):
r"""The `Kullback-Leibler divergence`_ Loss
KL divergence is a useful distance measure for continuous distributions
and is often useful when performing direct regression over the space of
(discretely sampled) continuous output distributions.
As with `NLLLoss`, the `input` given is expected to contain
*log-probabilities*, however unlike `ClassNLLLoss`, `input` is not
restricted to a 2D Tensor, because the criterion is applied element-wise.
This criterion expects a `target` `Tensor` of the same size as the
`input` `Tensor`.
The loss can be described as:
.. math:: loss(x, target) = 1/n \sum(target_i * (log(target_i) - x_i))
By default, the losses are averaged for each minibatch over observations
**as well as** over dimensions. However, if the field
`size_average` is set to `False`, the losses are instead summed.
.. _Kullback-Leibler divergence:
https://en.wikipedia.org/wiki/Kullback-Leibler_divergence
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.kl_div(input, target, size_average=self.size_average)
class MSELoss(_Loss):
r"""Creates a criterion that measures the mean squared error between
`n` elements in the input `x` and target `y`:
:math:`{loss}(x, y) = 1/n \sum |x_i - y_i|^2`
`x` and `y` arbitrary shapes with a total of `n` elements each.
The sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to `False`.
Args:
size_average (bool, optional): By default, the losses are averaged
over observations for each minibatch. However, if the field
size_average is set to False, the losses are instead summed for
each minibatch. Default: True
Shape:
- Input: :math:`(N, *)` where `*` means, any number of additional
dimensions
- Target: :math:`(N, *)`, same shape as the input
Examples::
>>> loss = nn.MSELoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.randn(3, 5))
>>> output = loss(input, target)
>>> output.backward()
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.mse_loss(input, target, size_average=self.size_average)
class BCELoss(_WeightedLoss):
r"""Creates a criterion that measures the Binary Cross Entropy
between the target and the output:
.. math:: loss(o, t) = - 1/n \sum_i (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
or in the case of the weights argument being specified:
.. math:: loss(o, t) = - 1/n \sum_i weights[i] * (t[i] * log(o[i]) + (1 - t[i]) * log(1 - o[i]))
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
By default, the losses are averaged for each minibatch over observations
*as well as* over dimensions. However, if the field `size_average` is set
to `False`, the losses are instead summed.
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.binary_cross_entropy(input, target, weight=self.weight,
size_average=self.size_average)
class BCEWithLogitsLoss(Module):
r"""This loss combines a `Sigmoid` layer and the `BCELoss` in one single
class. This version is more numerically stable than using a plain `Sigmoid`
followed by a `BCELoss` as, by combining the operations into one layer,
we take advantage of the log-sum-exp trick for numerical stability.
This Binary Cross Entropy between the target and the output logits
(no sigmoid applied) is:
.. math:: loss(o, t) = - 1/n \sum_i (t[i] * log(sigmoid(o[i])) + (1 - t[i]) * log(1 - sigmoid(o[i])))
or in the case of the weights argument being specified:
.. math:: loss(o, t) = - 1/n \sum_i weights[i] * (t[i] * log(sigmoid(o[i])) + (1 - t[i]) * log(1 - sigmoid(o[i])))
This is used for measuring the error of a reconstruction in for example
an auto-encoder. Note that the targets `t[i]` should be numbers
between 0 and 1.
By default, the losses are averaged for each minibatch over observations
*as well as* over dimensions. However, if the field `size_average` is set
to `False`, the losses are instead summed.
"""
def __init__(self, weight=None, size_average=True):
super(BCEWithLogitsLoss, self).__init__()
self.size_average = size_average
self.register_buffer('weight', weight)
def forward(self, input, target):
if self.weight is not None:
return F.binary_cross_entropy_with_logits(input, target, Variable(self.weight), self.size_average)
else:
return F.binary_cross_entropy_with_logits(input, target, size_average=self.size_average)
class HingeEmbeddingLoss(_Loss):
r"""Measures the loss given an input `x` which is a 2D mini-batch tensor
and a labels `y`, a 1D tensor containg values (`1` or `-1`).
This is usually used for measuring whether two inputs are similar or
dissimilar, e.g. using the L1 pairwise distance, and is typically used
for learning nonlinear embeddings or semi-supervised learning::
{ x_i, if y_i == 1
loss(x, y) = 1/n {
{ max(0, margin - x_i), if y_i == -1
`x` and `y` arbitrary shapes with a total of `n` elements each
the sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal
variable `size_average=False`.
The `margin` has a default value of `1`, or can be set in the constructor.
"""
def __init__(self, margin=1.0, size_average=True):
super(HingeEmbeddingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input, target):
return F.hinge_embedding_loss(input, target, self.margin, self.size_average)
class MultiLabelMarginLoss(_Loss):
r"""Creates a criterion that optimizes a multi-class multi-classification
hinge loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`)
and output `y` (which is a 2D `Tensor` of target class indices).
For each sample in the mini-batch::
loss(x, y) = sum_ij(max(0, 1 - (x[y[j]] - x[i]))) / x.size(0)
where `i == 0` to `x.size(0)`, `j == 0` to `y.size(0)`,
`y[j] != 0`, and `i != y[j]` for all `i` and `j`.
`y` and `x` must have the same size.
The criterion only considers the first non zero `y[j]` targets.
This allows for different samples to have variable amounts of target classes
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.multilabel_margin_loss(input, target, size_average=self.size_average)
class SmoothL1Loss(_Loss):
r"""Creates a criterion that uses a squared term if the absolute
element-wise error falls below 1 and an L1 term otherwise.
It is less sensitive to outliers than the `MSELoss` and in some cases
prevents exploding gradients (e.g. see "Fast R-CNN" paper by <NAME>).
Also known as the Huber loss::
{ 0.5 * (x_i - y_i)^2, if |x_i - y_i| < 1
loss(x, y) = 1/n \sum {
{ |x_i - y_i| - 0.5, otherwise
`x` and `y` arbitrary shapes with a total of `n` elements each
the sum operation still operates over all the elements, and divides by `n`.
The division by `n` can be avoided if one sets the internal variable
`size_average` to `False`
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.smooth_l1_loss(input, target, size_average=self.size_average)
class SoftMarginLoss(_Loss):
r"""Creates a criterion that optimizes a two-class classification
logistic loss between input `x` (a 2D mini-batch Tensor) and
target `y` (which is a tensor containing either `1` or `-1`).
::
loss(x, y) = sum_i (log(1 + exp(-y[i]*x[i]))) / x.nelement()
The normalization by the number of elements in the input can be disabled by
setting `self.size_average` to `False`.
"""
def forward(self, input, target):
_assert_no_grad(target)
return F.soft_margin_loss(input, target, size_average=self.size_average)
class CrossEntropyLoss(_WeightedLoss):
r"""This criterion combines `LogSoftMax` and `NLLLoss` in one single class.
It is useful when training a classification problem with `n` classes.
If provided, the optional argument `weights` should be a 1D `Tensor`
assigning weight to each of the classes.
This is particularly useful when you have an unbalanced training set.
The `input` is expected to contain scores for each class.
`input` has to be a 2D `Tensor` of size `batch x n`.
This criterion expects a class index (0 to nClasses-1) as the
`target` for each value of a 1D tensor of size `n`
The loss can be described as::
loss(x, class) = -log(exp(x[class]) / (\sum_j exp(x[j])))
= -x[class] + log(\sum_j exp(x[j]))
or in the case of the `weights` argument being specified::
loss(x, class) = weights[class] * (-x[class] + log(\sum_j exp(x[j])))
The losses are averaged across observations for each minibatch.
Args:
weight (Tensor, optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size "nclasses"
size_average (bool, optional): By default, the losses are averaged over observations for each minibatch.
However, if the field size_average is set to False, the losses are
instead summed for each minibatch.
ignore_index (int, optional): Specifies a target value that is ignored
and does not contribute to the input gradient. When size_average is
True, the loss is averaged over non-ignored targets.
Shape:
- Input: :math:`(N, C)` where `C = number of classes`
- Target: :math:`(N)` where each value is `0 <= targets[i] <= C-1`
Examples::
>>> loss = nn.CrossEntropyLoss()
>>> input = autograd.Variable(torch.randn(3, 5), requires_grad=True)
>>> target = autograd.Variable(torch.LongTensor(3).random_(5))
>>> output = loss(input, target)
>>> output.backward()
"""
def __init__(self, weight=None, size_average=True, ignore_index=-100):
super(CrossEntropyLoss, self).__init__(weight, size_average)
self.ignore_index = ignore_index
def forward(self, input, target):
_assert_no_grad(target)
return F.cross_entropy(input, target, self.weight, self.size_average,
self.ignore_index)
class MultiLabelSoftMarginLoss(_WeightedLoss):
r"""Creates a criterion that optimizes a multi-label one-versus-all
loss based on max-entropy, between input `x` (a 2D mini-batch `Tensor`) and
target `y` (a binary 2D `Tensor`). For each sample in the minibatch::
loss(x, y) = - sum_i (y[i] * log( 1 / (1 + exp(-x[i])) )
+ ( (1-y[i]) * log(exp(-x[i]) / (1 + exp(-x[i])) ) )
where `i == 0` to `x.nElement()-1`, `y[i] in {0,1}`.
`y` and `x` must have the same size.
"""
def forward(self, input, target):
return F.multilabel_soft_margin_loss(input, target, self.weight, self.size_average)
class CosineEmbeddingLoss(Module):
r"""Creates a criterion that measures the loss given an input tensors
x1, x2 and a `Tensor` label `y` with values 1 or -1.
This is used for measuring whether two inputs are similar or dissimilar,
using the cosine distance, and is typically used for learning nonlinear
embeddings or semi-supervised learning.
`margin` should be a number from `-1` to `1`, `0` to `0.5` is suggested.
If `margin` is missing, the default value is `0`.
The loss function for each sample is::
{ 1 - cos(x1, x2), if y == 1
loss(x, y) = {
{ max(0, cos(x1, x2) - margin), if y == -1
If the internal variable `size_average` is equal to `True`,
the loss function averages the loss over the batch samples;
if `size_average` is `False`, then the loss function sums over the
batch samples. By default, `size_average = True`.
"""
def __init__(self, margin=0, size_average=True):
super(CosineEmbeddingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input1, input2, target):
return F.cosine_embedding_loss(input1, input2, target, self.margin, self.size_average)
class MarginRankingLoss(Module):
r"""Creates a criterion that measures the loss given
inputs `x1`, `x2`, two 1D mini-batch `Tensor`s,
and a label 1D mini-batch tensor `y` with values (`1` or `-1`).
If `y == 1` then it assumed the first input should be ranked higher
(have a larger value) than the second input, and vice-versa for `y == -1`.
The loss function for each sample in the mini-batch is::
loss(x, y) = max(0, -y * (x1 - x2) + margin)
if the internal variable `size_average = True`,
the loss function averages the loss over the batch samples;
if `size_average = False`, then the loss function sums over the batch
samples.
By default, `size_average` equals to `True`.
"""
def __init__(self, margin=0, size_average=True):
super(MarginRankingLoss, self).__init__()
self.margin = margin
self.size_average = size_average
def forward(self, input1, input2, target):
return F.margin_ranking_loss(input1, input2, target, self.margin, self.size_average)
class MultiMarginLoss(Module):
r"""Creates a criterion that optimizes a multi-class classification hinge
loss (margin-based loss) between input `x` (a 2D mini-batch `Tensor`) and
output `y` (which is a 1D tensor of target class indices,
`0` <= `y` <= `x.size(1)`):
For each mini-batch sample::
loss(x, y) = sum_i(max(0, (margin - x[y] + x[i]))^p) / x.size(0)
where `i == 0` to `x.size(0)` and `i != y`.
Optionally, you can give non-equal weighting on the classes by passing
a 1D `weights` tensor into the constructor.
The loss function then becomes:
loss(x, y) = sum_i(max(0, w[y] * (margin - x[y] - x[i]))^p) / x.size(0)
By default, the losses are averaged over observations for each minibatch.
However, if the field `size_average` is set to `False`,
the losses are instead summed.
"""
def __init__(self, p=1, margin=1, weight=None, size_average=True):
super(MultiMarginLoss, self).__init__()
if p != 1 and p != 2:
raise ValueError("only p == 1 and p == 2 supported")
assert weight is None or weight.dim() == 1
self.p = p
self.margin = margin
self.size_average = size_average
self.weight = weight
def forward(self, input, target):
return F.multi_margin_loss(input, target, self.p, self.margin,
self.weight, self.size_average)
class TripletMarginLoss(Module):
r"""Creates a criterion that measures the triplet loss given an input
tensors x1, x2, x3 and a margin with a value greater than 0.
This is used for measuring a relative similarity between samples. A triplet
is composed by `a`, `p` and `n`: anchor, positive examples and negative
example respectively. The shape of all input variables should be
:math:`(N, D)`.
The distance swap is described in detail in the paper `Learning shallow
convolutional feature descriptors with triplet losses`_ by
<NAME>, <NAME> et al.
.. math::
L(a, p, n) = \frac{1}{N} \left( \sum_{i=1}^N \max \{d(a_i, p_i) - d(a_i, n_i) + {\rm margin}, 0\} \right)
where :math:`d(x_i, y_i) = \| {\bf x}_i - {\bf y}_i \|_2^2`.
Args:
anchor: anchor input tensor
positive: positive input tensor
negative: negative input tensor
p: the norm degree. Default: 2
Shape:
- Input: :math:`(N, D)` where `D = vector dimension`
- Output: :math:`(N, 1)`
>>> triplet_loss = nn.TripletMarginLoss(margin=1.0, p=2)
>>> input1 = autograd.Variable(torch.randn(100, 128))
>>> input2 = autograd.Variable(torch.randn(100, 128))
>>> input3 = autograd.Variable(torch.randn(100, 128))
>>> output = triplet_loss(input1, input2, input3)
>>> output.backward()
.. _Learning shallow convolutional feature descriptors with triplet losses:
http://www.iis.ee.ic.ac.uk/%7Evbalnt/shallow_descr/TFeat_paper.pdf
"""
def __init__(self, margin=1.0, p=2, eps=1e-6, swap=False):
super(TripletMarginLoss, self).__init__()
self.margin = margin
self.p = p
self.eps = eps
self.swap = swap
def forward(self, anchor, positive, negative):
return F.triplet_margin_loss(anchor, positive, negative, self.margin,
self.p, self.eps, self.swap)
# TODO: L1HingeEmbeddingCriterion
# TODO: MSECriterion weight
# TODO: ClassSimplexCriterion | 0.968812 | 0.734381 |
from itertools import combinations
from string import ascii_letters
import editdistance
from gensim.models import KeyedVectors
from tqdm import tqdm
class CodeMaster:
''' A robo code master in the Codenames game '''
def __init__(self, red_words, blue_words, bad_words, model=None, word_pairs=None):
self._red_words = red_words
self._blue_words = blue_words
self._bad_words = bad_words
self._guessed_words = []
if model is None:
# Load from the default model path
model_path = './models/GoogleNews-vectors-negative300.bin'
print(f"Loading maodel from: {model_path}\n...")
self._model = KeyedVectors.load_word2vec_format(model_path, binary=True)
print("Model loaded.")
else:
print("Using model passed in to __init__")
self._model = model
"""
A list of word pairs, their score, and hints like:
("word1", "word2", 0.4, [("hint1", 0.5), ...]),
...
}
"""
if word_pairs is None:
print("Computing all word pair similarities...")
self._word_pairs = self._init_word_pair_similarities()
print("Word similarities computed!")
else:
self._word_pairs = word_pairs
def _init_word_pair_similarities(self):
"""Compute the similarities between all words in the input."""
words = self._red_words + self._blue_words + self._bad_words
word_pairs = []
for w1, w2 in tqdm([*self._compute_word_pairs(words)]):
# TODO: support more than 2 words here
# Do it by doing all pairwise similarities
# Then averaging them, and include the std dev of similarities for ref
sim = round(self._model.similarity(w1, w2), 3)
suggestions = self._most_similar(positive=[w1, w2], topn=5)
word_pairs.append(
(w1, w2, sim, suggestions)
)
word_pairs = sorted(word_pairs, key=lambda v: v[2], reverse=True)
return word_pairs
def _compute_word_pairs(self, words):
"""Get a list of tuples for all word pairs."""
# Sort the words first so the tuples are always ordered the same
return combinations(sorted(words), r=2)
def _most_similar(self, *args, **kwargs):
"""Wrap gensim's most_similar function to filter similar words or n_grams.
Use like:
most_similar(
positive = ["belt", "stone"],
negative = ["buck", "nurse"],
topn = 10
)
"""
topn = kwargs.get("topn", 10)
# Query for extra, since we filter some bad ones out
kwargs["topn"] = topn + 20
words = self._model.most_similar(*args, **kwargs)
words = [(w.lower(), n) for w, n in words]
exclude_substrings = True
if exclude_substrings:
input_words = kwargs["positive"]
words = [
(w.lower(), round(n, 3))
for w, n in words
if not (
any(c not in ascii_letters for c in w) or
any(w in i_w for i_w in input_words) or
any(i_w in w for i_w in input_words) or
any(editdistance.eval(w, i_w) <= 3 for i_w in input_words)
)
]
return words
def give_hint(self, player, clue_size=2):
"""Give a hint for what word to guess."""
if clue_size > 2:
raise NotImplementedError("Clue size must be 1 or 2")
if player.lower() == "red":
good_words = self._red_words
bad_words = self._blue_words + self._bad_words
elif player.lower() == "blue":
good_words = self._blue_words
bad_words = self._red_words + self._bad_words
else:
raise ValueError("Player must be one of: ['red', 'blue']")
good_words = [w for w in good_words if w not in self._guessed_words]
bad_words = [w for w in bad_words if w not in self._guessed_words]
# print(f"~~Guessed_words: {self._guessed_words}")
# print(f"~~Good words: {good_words}")
return self._give_hint(good_words, bad_words, clue_size=2)
def _give_hint(self, good_words, bad_words, clue_size=2):
"""Get the clue by looking at top similarities in all the given words."""
if len(good_words) == 1:
word_hint_list = self._most_similar(positive=good_words, topn=5)
word_hint, sim = word_hint_list[0]
return word_hint, 1, 0, sim, (good_words[0],)
pairs = [*self._compute_word_pairs(good_words)]
# Find the highest ranking pair from our candidate good pairs.
hint_word = None
do_break = False
for w1, w2, wp_score, hint_words in self._word_pairs:
if (w1, w2) in pairs:
for hint_word, hint_score in hint_words:
if not self._no_alt_for_hint_word(hint_word, hint_score):
# This means we've found a hint word which ranks
# highest in the 2 words we've got
# print((w1, w2, wp_score))
do_break = True
break
else:
# print((w1, w2), hint_word, "failed")
pass
if do_break:
break
# Now return the highest ranking hint for those two.
if hint_word == None:
raise ValueError(f"No Hint word found!")
return hint_word, clue_size, wp_score, hint_score, (w1, w2)
def _no_alt_for_hint_word(self, hint_word, score):
"""Check if there is another pair that would be a better clue for hint word."""
for other_w1, other_w2, _, other_hws in self._word_pairs:
for other_hw, o_score in other_hws:
if hint_word == other_hw and score < o_score:
# print("other words:", (other_w1, other_w2))
return True
# print("looks good")
return False
def _get_highest_ranked_hint(self, w1, w2):
"""Use a model to deterimine which word we should give back as a hint."""
word_hint_list = self._most_similar(positive=[w1, w2], topn=5)
word_hint, sim = word_hint_list[0]
return word_hint, sim
def set_word_to_guessed(self, word):
"""Tell the brain a word that has already been guessed."""
self._guessed_words.append(word) | src/code_master.py | from itertools import combinations
from string import ascii_letters
import editdistance
from gensim.models import KeyedVectors
from tqdm import tqdm
class CodeMaster:
''' A robo code master in the Codenames game '''
def __init__(self, red_words, blue_words, bad_words, model=None, word_pairs=None):
self._red_words = red_words
self._blue_words = blue_words
self._bad_words = bad_words
self._guessed_words = []
if model is None:
# Load from the default model path
model_path = './models/GoogleNews-vectors-negative300.bin'
print(f"Loading maodel from: {model_path}\n...")
self._model = KeyedVectors.load_word2vec_format(model_path, binary=True)
print("Model loaded.")
else:
print("Using model passed in to __init__")
self._model = model
"""
A list of word pairs, their score, and hints like:
("word1", "word2", 0.4, [("hint1", 0.5), ...]),
...
}
"""
if word_pairs is None:
print("Computing all word pair similarities...")
self._word_pairs = self._init_word_pair_similarities()
print("Word similarities computed!")
else:
self._word_pairs = word_pairs
def _init_word_pair_similarities(self):
"""Compute the similarities between all words in the input."""
words = self._red_words + self._blue_words + self._bad_words
word_pairs = []
for w1, w2 in tqdm([*self._compute_word_pairs(words)]):
# TODO: support more than 2 words here
# Do it by doing all pairwise similarities
# Then averaging them, and include the std dev of similarities for ref
sim = round(self._model.similarity(w1, w2), 3)
suggestions = self._most_similar(positive=[w1, w2], topn=5)
word_pairs.append(
(w1, w2, sim, suggestions)
)
word_pairs = sorted(word_pairs, key=lambda v: v[2], reverse=True)
return word_pairs
def _compute_word_pairs(self, words):
"""Get a list of tuples for all word pairs."""
# Sort the words first so the tuples are always ordered the same
return combinations(sorted(words), r=2)
def _most_similar(self, *args, **kwargs):
"""Wrap gensim's most_similar function to filter similar words or n_grams.
Use like:
most_similar(
positive = ["belt", "stone"],
negative = ["buck", "nurse"],
topn = 10
)
"""
topn = kwargs.get("topn", 10)
# Query for extra, since we filter some bad ones out
kwargs["topn"] = topn + 20
words = self._model.most_similar(*args, **kwargs)
words = [(w.lower(), n) for w, n in words]
exclude_substrings = True
if exclude_substrings:
input_words = kwargs["positive"]
words = [
(w.lower(), round(n, 3))
for w, n in words
if not (
any(c not in ascii_letters for c in w) or
any(w in i_w for i_w in input_words) or
any(i_w in w for i_w in input_words) or
any(editdistance.eval(w, i_w) <= 3 for i_w in input_words)
)
]
return words
def give_hint(self, player, clue_size=2):
"""Give a hint for what word to guess."""
if clue_size > 2:
raise NotImplementedError("Clue size must be 1 or 2")
if player.lower() == "red":
good_words = self._red_words
bad_words = self._blue_words + self._bad_words
elif player.lower() == "blue":
good_words = self._blue_words
bad_words = self._red_words + self._bad_words
else:
raise ValueError("Player must be one of: ['red', 'blue']")
good_words = [w for w in good_words if w not in self._guessed_words]
bad_words = [w for w in bad_words if w not in self._guessed_words]
# print(f"~~Guessed_words: {self._guessed_words}")
# print(f"~~Good words: {good_words}")
return self._give_hint(good_words, bad_words, clue_size=2)
def _give_hint(self, good_words, bad_words, clue_size=2):
"""Get the clue by looking at top similarities in all the given words."""
if len(good_words) == 1:
word_hint_list = self._most_similar(positive=good_words, topn=5)
word_hint, sim = word_hint_list[0]
return word_hint, 1, 0, sim, (good_words[0],)
pairs = [*self._compute_word_pairs(good_words)]
# Find the highest ranking pair from our candidate good pairs.
hint_word = None
do_break = False
for w1, w2, wp_score, hint_words in self._word_pairs:
if (w1, w2) in pairs:
for hint_word, hint_score in hint_words:
if not self._no_alt_for_hint_word(hint_word, hint_score):
# This means we've found a hint word which ranks
# highest in the 2 words we've got
# print((w1, w2, wp_score))
do_break = True
break
else:
# print((w1, w2), hint_word, "failed")
pass
if do_break:
break
# Now return the highest ranking hint for those two.
if hint_word == None:
raise ValueError(f"No Hint word found!")
return hint_word, clue_size, wp_score, hint_score, (w1, w2)
def _no_alt_for_hint_word(self, hint_word, score):
"""Check if there is another pair that would be a better clue for hint word."""
for other_w1, other_w2, _, other_hws in self._word_pairs:
for other_hw, o_score in other_hws:
if hint_word == other_hw and score < o_score:
# print("other words:", (other_w1, other_w2))
return True
# print("looks good")
return False
def _get_highest_ranked_hint(self, w1, w2):
"""Use a model to deterimine which word we should give back as a hint."""
word_hint_list = self._most_similar(positive=[w1, w2], topn=5)
word_hint, sim = word_hint_list[0]
return word_hint, sim
def set_word_to_guessed(self, word):
"""Tell the brain a word that has already been guessed."""
self._guessed_words.append(word) | 0.576542 | 0.203609 |
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QApplication, QFrame,
QVBoxLayout, QSplitter, QDesktopWidget)
from .params import Params
from .introduction import Introduction
from .type_of_task import TypeOfTask
from .set_file import SetFile
from .data_check import DataCheck
from .overfitting import Overfitting
from .analysis import Analysis
from .menuview import MenuView
from .results import Results, Results2
from .bias_variance import BiasVariance
from .learning_curve import LearningCurve, LearningCurve2
from .feature_selection import FeatureSelection
from .prediction import Prediction
from .error import Error
class App(QWidget):
def __init__(self, lang='en'):
super().__init__()
self.params = Params(lang)
self.initUI()
def initUI(self):
self.txt2func = {
'はじめに': Introduction, 'Introduction': Introduction,
'分析タスク': TypeOfTask, 'Task': TypeOfTask,
'入力データ': SetFile, 'Input data': SetFile,
'データの確認': DataCheck, 'Data check': DataCheck,
'過学習': Overfitting, 'Overfitting': Overfitting,
'分析の実行': Analysis, 'Analysis': Analysis,
'結果の確認': Results, 'Results': Results,
'バイアスとバリアンス': BiasVariance, 'Bias and Variance': BiasVariance,
'学習曲線': LearningCurve, 'Learning curve': LearningCurve,
'特徴量選択': FeatureSelection, 'Feature selection': FeatureSelection,
'結果の確認2': Results2, 'Results 2': Results2,
'学習曲線2': LearningCurve2, 'Learning curve 2': LearningCurve2,
'予測': Prediction, 'Prediction': Prediction,
'Error': Error}
self.setMinimumSize(1280, 960)
self.setStyleSheet('background-color: rgb(242, 242, 242)')
vbox = QVBoxLayout(self)
vbox.setSpacing(0)
vbox.setContentsMargins(0, 0, 0, 0)
top = QFrame(self)
top.setFrameShape(QFrame.StyledPanel)
top.setFixedHeight(50)
top.setStyleSheet('background-color: white')
self.splitter = QSplitter(Qt.Horizontal, self)
self.splitter.setHandleWidth(0)
self.menuview = MenuView(self.splitter, self.update_content,
self.params)
self.menuview.setWidgetResizable(True)
self.contentview = Introduction(self.splitter,
self.menuview.edit_button, self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.menuview)
self.splitter.addWidget(self.contentview)
vbox.addWidget(top)
vbox.addWidget(self.splitter)
self.setLayout(vbox)
self.center()
# self.showMaximized()
self.setWindowTitle('MALSS interactive')
self.show()
def center(self):
# Get a rectangle of the main window.
qr = self.frameGeometry()
# Figure out the screen resolution; and from this resolution,
# get the center point (x, y)
cp = QDesktopWidget().availableGeometry().center()
# Set the center of the rectangle to the center of the screen.
qr.moveCenter(cp)
self.move(qr.topLeft())
def update_content(self, text):
content = self.splitter.widget(1)
if content is not None:
if text in self.txt2func:
content.hide()
content.deleteLater()
self.contentview =\
self.txt2func[text](self.splitter,
self.menuview.edit_button,
self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.contentview)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | malss/app/app.py |
import sys
from PyQt5.QtCore import Qt
from PyQt5.QtWidgets import (QWidget, QApplication, QFrame,
QVBoxLayout, QSplitter, QDesktopWidget)
from .params import Params
from .introduction import Introduction
from .type_of_task import TypeOfTask
from .set_file import SetFile
from .data_check import DataCheck
from .overfitting import Overfitting
from .analysis import Analysis
from .menuview import MenuView
from .results import Results, Results2
from .bias_variance import BiasVariance
from .learning_curve import LearningCurve, LearningCurve2
from .feature_selection import FeatureSelection
from .prediction import Prediction
from .error import Error
class App(QWidget):
def __init__(self, lang='en'):
super().__init__()
self.params = Params(lang)
self.initUI()
def initUI(self):
self.txt2func = {
'はじめに': Introduction, 'Introduction': Introduction,
'分析タスク': TypeOfTask, 'Task': TypeOfTask,
'入力データ': SetFile, 'Input data': SetFile,
'データの確認': DataCheck, 'Data check': DataCheck,
'過学習': Overfitting, 'Overfitting': Overfitting,
'分析の実行': Analysis, 'Analysis': Analysis,
'結果の確認': Results, 'Results': Results,
'バイアスとバリアンス': BiasVariance, 'Bias and Variance': BiasVariance,
'学習曲線': LearningCurve, 'Learning curve': LearningCurve,
'特徴量選択': FeatureSelection, 'Feature selection': FeatureSelection,
'結果の確認2': Results2, 'Results 2': Results2,
'学習曲線2': LearningCurve2, 'Learning curve 2': LearningCurve2,
'予測': Prediction, 'Prediction': Prediction,
'Error': Error}
self.setMinimumSize(1280, 960)
self.setStyleSheet('background-color: rgb(242, 242, 242)')
vbox = QVBoxLayout(self)
vbox.setSpacing(0)
vbox.setContentsMargins(0, 0, 0, 0)
top = QFrame(self)
top.setFrameShape(QFrame.StyledPanel)
top.setFixedHeight(50)
top.setStyleSheet('background-color: white')
self.splitter = QSplitter(Qt.Horizontal, self)
self.splitter.setHandleWidth(0)
self.menuview = MenuView(self.splitter, self.update_content,
self.params)
self.menuview.setWidgetResizable(True)
self.contentview = Introduction(self.splitter,
self.menuview.edit_button, self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.menuview)
self.splitter.addWidget(self.contentview)
vbox.addWidget(top)
vbox.addWidget(self.splitter)
self.setLayout(vbox)
self.center()
# self.showMaximized()
self.setWindowTitle('MALSS interactive')
self.show()
def center(self):
# Get a rectangle of the main window.
qr = self.frameGeometry()
# Figure out the screen resolution; and from this resolution,
# get the center point (x, y)
cp = QDesktopWidget().availableGeometry().center()
# Set the center of the rectangle to the center of the screen.
qr.moveCenter(cp)
self.move(qr.topLeft())
def update_content(self, text):
content = self.splitter.widget(1)
if content is not None:
if text in self.txt2func:
content.hide()
content.deleteLater()
self.contentview =\
self.txt2func[text](self.splitter,
self.menuview.edit_button,
self.params)
self.contentview.setWidgetResizable(True)
self.splitter.addWidget(self.contentview)
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = App()
sys.exit(app.exec_()) | 0.38943 | 0.241434 |
import os
import sys
"""
Test 72 transform, if transformed data by old transformer is the same in tf2
and tf1
Conclusion: It is the same
"""
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_PATH)
import matplotlib;
matplotlib.use('agg')
import pandas as pd
import tensorflow as tf
import numpy as np
import transformations
def test_transformed_data_tf1_tf2(transformer,
normal_data_name='tf2_normal.pkl',
tf2_transformed_data_name='tf2_old_transformed.pkl'):
save_dir = os.path.join(PROJECT_PATH, 'tests', 'aux_data')
(x_train, y_train), (x_val, y_val), (
x_test, y_test) = pd.read_pickle(os.path.join(save_dir, normal_data_name))
y_train_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_train))
x_train_transform_tf1 = transformer.transform_batch(
np.repeat(x_train, transformer.n_transforms, axis=0),
y_train_transform_tf1)
y_val_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_val))
x_val_transform_tf1 = transformer.transform_batch(
np.repeat(x_val, transformer.n_transforms, axis=0),
y_val_transform_tf1)
y_test_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_test))
x_test_transform_tf1 = transformer.transform_batch(
np.repeat(x_test, transformer.n_transforms, axis=0),
y_test_transform_tf1)
(x_train_transform_tf2, y_train_transform_tf2), (
x_val_transform_tf2, y_val_transform_tf2), (
x_test_transform_tf2, y_test_transform_tf2) = pd.read_pickle(
os.path.join(save_dir, tf2_transformed_data_name))
print(np.mean(x_train_transform_tf1==x_train_transform_tf2))
print(np.mean(y_train_transform_tf1 == y_train_transform_tf2))
print(np.mean(x_test_transform_tf1 == x_test_transform_tf2))
print(np.mean(y_test_transform_tf1 == y_test_transform_tf2))
print(np.mean(x_val_transform_tf1 == x_val_transform_tf2))
print(np.mean(y_val_transform_tf1 == y_val_transform_tf2))
return (x_train_transform_tf1, y_train_transform_tf1, x_val_transform_tf1,
y_val_transform_tf1, x_test_transform_tf1, y_test_transform_tf1), (
x_train_transform_tf2, y_train_transform_tf2, x_val_transform_tf2,
y_val_transform_tf2, x_test_transform_tf2, y_test_transform_tf2)
if __name__ == '__main__':
results_path = os.path.join(PROJECT_PATH, 'results', 'replication')
# Not use all gpu
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# # Transformer
transformer_old = transformations.Transformer()
tf1_data, tf2_data = test_transformed_data_tf1_tf2(transformer_old)
for i in range(len(tf1_data)):
print(np.mean(tf1_data[i] == tf2_data[i])) | tests/test_72_transformation_transformed_data_tf1.py | import os
import sys
"""
Test 72 transform, if transformed data by old transformer is the same in tf2
and tf1
Conclusion: It is the same
"""
PROJECT_PATH = os.path.abspath(
os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(PROJECT_PATH)
import matplotlib;
matplotlib.use('agg')
import pandas as pd
import tensorflow as tf
import numpy as np
import transformations
def test_transformed_data_tf1_tf2(transformer,
normal_data_name='tf2_normal.pkl',
tf2_transformed_data_name='tf2_old_transformed.pkl'):
save_dir = os.path.join(PROJECT_PATH, 'tests', 'aux_data')
(x_train, y_train), (x_val, y_val), (
x_test, y_test) = pd.read_pickle(os.path.join(save_dir, normal_data_name))
y_train_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_train))
x_train_transform_tf1 = transformer.transform_batch(
np.repeat(x_train, transformer.n_transforms, axis=0),
y_train_transform_tf1)
y_val_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_val))
x_val_transform_tf1 = transformer.transform_batch(
np.repeat(x_val, transformer.n_transforms, axis=0),
y_val_transform_tf1)
y_test_transform_tf1 = np.tile(np.arange(transformer.n_transforms),
len(x_test))
x_test_transform_tf1 = transformer.transform_batch(
np.repeat(x_test, transformer.n_transforms, axis=0),
y_test_transform_tf1)
(x_train_transform_tf2, y_train_transform_tf2), (
x_val_transform_tf2, y_val_transform_tf2), (
x_test_transform_tf2, y_test_transform_tf2) = pd.read_pickle(
os.path.join(save_dir, tf2_transformed_data_name))
print(np.mean(x_train_transform_tf1==x_train_transform_tf2))
print(np.mean(y_train_transform_tf1 == y_train_transform_tf2))
print(np.mean(x_test_transform_tf1 == x_test_transform_tf2))
print(np.mean(y_test_transform_tf1 == y_test_transform_tf2))
print(np.mean(x_val_transform_tf1 == x_val_transform_tf2))
print(np.mean(y_val_transform_tf1 == y_val_transform_tf2))
return (x_train_transform_tf1, y_train_transform_tf1, x_val_transform_tf1,
y_val_transform_tf1, x_test_transform_tf1, y_test_transform_tf1), (
x_train_transform_tf2, y_train_transform_tf2, x_val_transform_tf2,
y_val_transform_tf2, x_test_transform_tf2, y_test_transform_tf2)
if __name__ == '__main__':
results_path = os.path.join(PROJECT_PATH, 'results', 'replication')
# Not use all gpu
gpus = tf.config.experimental.list_physical_devices('GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
# # Transformer
transformer_old = transformations.Transformer()
tf1_data, tf2_data = test_transformed_data_tf1_tf2(transformer_old)
for i in range(len(tf1_data)):
print(np.mean(tf1_data[i] == tf2_data[i])) | 0.417865 | 0.402862 |
from __future__ import annotations
from typing import Any, Generic, List, Optional, TypeVar
import jax
import jax.numpy as jnp
from jax.nn import softplus
from jax.scipy import special as jss
from tjax import Generator, RealArray, Shape
from tjax.dataclasses import dataclass
from ..exp_to_nat import ExpToNat
from ..natural_parametrization import EP, NaturalParametrization
from ..parameter import VectorSupport, distribution_parameter
from ..samplable import Samplable
__all__: List[str] = []
# https://github.com/python/mypy/issues/10140
# EP = TypeVar('EP', bound='DirichletCommonEP[Any]')
@dataclass
class DirichletCommonNP(NaturalParametrization[EP, RealArray], Samplable, Generic[EP]):
alpha_minus_one: RealArray = distribution_parameter(VectorSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.alpha_minus_one.shape[:-1]
def log_normalizer(self) -> RealArray:
q = self.alpha_minus_one
return (jnp.sum(jss.gammaln(q + 1.0), axis=-1)
- jss.gammaln(jnp.sum(q, axis=-1) + q.shape[-1]))
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> RealArray:
if shape is not None:
shape += self.shape
return jax.random.dirichlet(rng.key, 1.0 + self.alpha_minus_one, shape)[..., :-1]
# New methods ----------------------------------------------------------------------------------
def dimensions(self) -> int:
return self.alpha_minus_one.shape[-1]
# Private methods ------------------------------------------------------------------------------
def _exp_helper(self) -> RealArray:
q = self.alpha_minus_one
return jss.digamma(q + 1.0) - jss.digamma(jnp.sum(q, axis=-1, keepdims=True) + q.shape[-1])
NP = TypeVar('NP', bound=DirichletCommonNP[Any])
@dataclass
class DirichletCommonEP(ExpToNat[NP, RealArray], Generic[NP]):
mean_log_probability: RealArray = distribution_parameter(VectorSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.mean_log_probability.shape[:-1]
def expected_carrier_measure(self) -> RealArray:
return jnp.zeros(self.shape)
def initial_search_parameters(self) -> RealArray:
return jnp.zeros(self.mean_log_probability.shape)
def search_gradient(self, search_parameters: RealArray) -> RealArray:
return self._natural_gradient(self.search_to_natural(search_parameters)).alpha_minus_one
# New methods ----------------------------------------------------------------------------------
def dimensions(self) -> int:
return self.mean_log_probability.shape[-1]
# Private methods ------------------------------------------------------------------------------
@classmethod
def _transform_nat_helper(cls, search_parameters: RealArray) -> RealArray:
# Run Newton's method on the whole real hyperspace.
return softplus(search_parameters) - 1.0 | efax/_src/distributions/dirichlet_common.py | from __future__ import annotations
from typing import Any, Generic, List, Optional, TypeVar
import jax
import jax.numpy as jnp
from jax.nn import softplus
from jax.scipy import special as jss
from tjax import Generator, RealArray, Shape
from tjax.dataclasses import dataclass
from ..exp_to_nat import ExpToNat
from ..natural_parametrization import EP, NaturalParametrization
from ..parameter import VectorSupport, distribution_parameter
from ..samplable import Samplable
__all__: List[str] = []
# https://github.com/python/mypy/issues/10140
# EP = TypeVar('EP', bound='DirichletCommonEP[Any]')
@dataclass
class DirichletCommonNP(NaturalParametrization[EP, RealArray], Samplable, Generic[EP]):
alpha_minus_one: RealArray = distribution_parameter(VectorSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.alpha_minus_one.shape[:-1]
def log_normalizer(self) -> RealArray:
q = self.alpha_minus_one
return (jnp.sum(jss.gammaln(q + 1.0), axis=-1)
- jss.gammaln(jnp.sum(q, axis=-1) + q.shape[-1]))
def sample(self, rng: Generator, shape: Optional[Shape] = None) -> RealArray:
if shape is not None:
shape += self.shape
return jax.random.dirichlet(rng.key, 1.0 + self.alpha_minus_one, shape)[..., :-1]
# New methods ----------------------------------------------------------------------------------
def dimensions(self) -> int:
return self.alpha_minus_one.shape[-1]
# Private methods ------------------------------------------------------------------------------
def _exp_helper(self) -> RealArray:
q = self.alpha_minus_one
return jss.digamma(q + 1.0) - jss.digamma(jnp.sum(q, axis=-1, keepdims=True) + q.shape[-1])
NP = TypeVar('NP', bound=DirichletCommonNP[Any])
@dataclass
class DirichletCommonEP(ExpToNat[NP, RealArray], Generic[NP]):
mean_log_probability: RealArray = distribution_parameter(VectorSupport())
# Implemented methods --------------------------------------------------------------------------
@property
def shape(self) -> Shape:
return self.mean_log_probability.shape[:-1]
def expected_carrier_measure(self) -> RealArray:
return jnp.zeros(self.shape)
def initial_search_parameters(self) -> RealArray:
return jnp.zeros(self.mean_log_probability.shape)
def search_gradient(self, search_parameters: RealArray) -> RealArray:
return self._natural_gradient(self.search_to_natural(search_parameters)).alpha_minus_one
# New methods ----------------------------------------------------------------------------------
def dimensions(self) -> int:
return self.mean_log_probability.shape[-1]
# Private methods ------------------------------------------------------------------------------
@classmethod
def _transform_nat_helper(cls, search_parameters: RealArray) -> RealArray:
# Run Newton's method on the whole real hyperspace.
return softplus(search_parameters) - 1.0 | 0.905669 | 0.371479 |
import PySimpleGUI as sg
import asyncio
import os
import sys
import time
from collections import deque
import driver
delay_start = None
gra_ids = deque()
async def connect(tc: driver.Thermocycler):
await tc.connect(port='/dev/ttyACM0')
async def open_lid(tc: driver.Thermocycler):
await tc.open()
async def close_lid(tc: driver.Thermocycler):
await tc.close()
async def deactivate_all(tc: driver.Thermocycler):
await tc.deactivate_all()
def prep_job_func(job, hold_time_str, temp_str, lid_temp_str):
init_time = time.time() + 1.0
begin_at = init_time
if len(job) > 0:
begin_at = job[-1][0]
if job[-1][1] != None:
begin_at = begin_at + job[-1][1]
sys.stderr.write('prep_job_func: begin_at=%f\n' % begin_at)
hold_time = None
temp = None
lid_temp = None
try:
hold_time = min(3600, max(1, float(hold_time_str)))
except ValueError:
pass
try:
temp = min(100, max(4, float(temp_str)))
except ValueError:
pass
try:
lid_temp = min(110, max(4, float(lid_temp_str)))
except ValueError:
pass
if temp != None or lid_temp != None:
job.append([begin_at, hold_time, temp, lid_temp])
def add_waiting(tc: driver.Thermocycler, job):
job_new = []
for i in range(len(job)):
d1 = 0
d2 = 0
begin_at = job[i][0]
if i == 0:
d1 = abs(job[0][2] - tc._current_temp)
d2 = abs(job[0][3] - tc._lid_temp)
else:
d1 = abs(job[i][2] - job[i-1][2])
d2 = abs(job[i][3] - job[i-1][3])
if len(job_new) > 0:
begin_at = job_new[-1][0] + job_new[-1][1]
if d1 > 10 or d2 > 10:
d = max(d1, d2) / 10
delay = d * d * 5
job_new.append([begin_at, delay, job[i][2], job[i][3]])
job_new.append([begin_at + delay, job[i][1], job[i][2], job[i][3]])
return job_new
def debug_print_job(job):
for j in job:
s0 = '-'
s1 = '-'
s2 = '-'
s3 = '-'
try:
s0 = time.asctime(time.localtime(j[0]))
except ValueError:
pass
try:
s1 = '%d' % j[1]
except TypeError:
pass
try:
s2 = '%g' % j[2]
except TypeError:
pass
try:
s3 = '%g' % j[3]
except TypeError:
pass
sys.stderr.write('%s %s %s %s\n' % (s0, s1, s2, s3))
def prep_job(window, tc: driver.Thermocycler, save_value, job):
for a in range(5):
k1 = 'c%d' % (a+1)
if window[k1].Get() != '':
s0 = window[k1].Get()
v0 = int(s0)
save_value[k1] = s0
for c in range(v0):
for b in range(5):
k2 = 'c%d%d' % (a+1, b+1)
k21 = '%s_time' % k2
k22 = '%s_btemp' % k2
k23 = '%s_ltemp' % k2
v1 = window[k21].Get()
v2 = window[k22].Get()
v3 = window[k23].Get()
if v1 != None:
save_value[k21] = v1
if v2 != None:
save_value[k22] = v2
if v3 != None:
save_value[k23] = v3
prep_job_func(job, v1, v2, v3)
# job = add_waiting(tc, job)
debug_print_job(job)
def mean(list):
if len(list) == 0:
return 0
sum = 0
for i in list:
sum = sum + i
return sum / len(list)
def is_block_stabilized(temp):
if temp == None:
return False
e = len(log)
if e < 30:
return False
s = max(0, e - 30)
d = 1
v = []
for i in range(s, e):
v.append(log[i][2])
d = abs(mean(v) - temp)
if d < 1:
return True
return False
def is_lid_stabilized(temp):
if temp == None:
return False
e = len(log)
if e < 30:
return False
s = max(0, e - 30)
d = 30
v = []
for i in range(s, e):
v.append(log[i][3])
d = abs(mean(v) - temp)
if temp >= 90 and d < 10:
return True
if temp < 90:
return True
return False
def delay_schedule(job, delay):
for i in range(len(job)):
job[i][0] = job[i][0] + delay
def load_job(window, job_file='pcr_gui.value'):
jv = None
try:
jv = open(job_file, 'r')
except IOError:
sys.stderr.write('load_job: \'%s\' is not found.\n' % job_file)
return
for row in jv.readlines():
key, value = row[:-1].split('\t')
try:
window[key].Update(value)
except ValueError:
sys.stderr.write('load_job: \'%s\' widget is not found.\n' % key)
jv.close()
def save_job(job_value):
jv = open('pcr_gui.value', 'w')
for key in job_value.keys():
jv.write('%s\t%s\n' % (key, job_value[key]))
jv.close()
def run(window, tc: driver.Thermocycler, job):
global delay_start
ct = time.time()
if len(job) == 0:
job_value = {}
prep_job(window, tc, job_value, job)
save_job(job_value)
elif tc._target_temp != None and not is_block_stabilized(tc._target_temp):
if delay_start == None:
delay_start = ct
else:
if delay_start != None:
delay = ct - delay_start
delay_schedule(job, delay)
delay_start = None
begin_at, hold_time, temp, lid_temp = job[0]
sys.stderr.write('time=%f begin_at=%f\n' % (time.time(), begin_at))
if time.time() > begin_at:
temp_str = '-'
hold_time_str = '-'
if temp != None:
temp_str = '%g' % temp
if hold_time != None:
hold_time_str = '%d' % hold_time
asyncio.run(tc.set_temperature(temp, hold_time))
else:
asyncio.run(tc.deactivate_block())
lid_temp_str = '-'
if lid_temp != None:
lid_temp_str = '%g' % lid_temp
asyncio.run(tc.set_lid_temperature(lid_temp))
else:
asyncio.run(tc.deactivate_lid())
sys.stderr.write('run: %s %s %s %s\n' % (time.asctime(time.localtime(begin_at)), hold_time_str, temp_str, lid_temp_str))
job.popleft()
if len(job) == 0:
return False
etc = time.asctime(time.localtime(job[-1][0]))
window['ETC'].Update(value=etc)
return True
def update_graph(window, tc: driver.Thermocycler, log, plot_span):
global gra_ids
temp = deque()
lid_temp = deque()
i = len(log) - 1
while i >= 0 and log[-1][0] - log[i][0] <= 1:
lid_temp.append(log[i][1])
temp.append(log[i][2])
i = i - 1
# sys.stderr.write('update_graph: i=%d\n' % i)
if len(lid_temp) > 5 and len(temp) > 5:
plot_span.append([mean(lid_temp), mean(temp)])
# sys.stderr.write('update_graph: len(plot_span)=%d\n' % len(plot_span))
x = len(plot_span)
if x == 0:
for temp in range(10, 120, 10):
gra_ids.append(window['graph'].DrawLine(point_from=(0, temp), point_to=(900, temp), color='#888888'))
gra_ids.append(window['graph'].DrawText('%2d' % temp, location=(20, temp), color='#555555'))
elif x == 1:
y1 = plot_span[-1][0]
y2 = plot_span[-1][1]
window['graph'].DrawPoint(point=(x, y1), size=5, color='#ffff00')
window['graph'].DrawPoint(point=(x, y2), size=5, color='#ff00ff')
elif x > 1:
y11 = plot_span[-1][0]
y12 = plot_span[-2][0]
y21 = plot_span[-1][1]
y22 = plot_span[-2][1]
window['graph'].DrawLine(point_from=(x, y11), point_to=(x-1, y12), width=2, color='#ffff00')
window['graph'].DrawLine(point_from=(x, y21), point_to=(x-1, y22), width=2, color='#ff00ff')
if x == 900:
plot_span.popleft()
window['graph'].Move(-1, 0)
for id in gra_ids:
window['graph'].MoveFigure(id, 1, 0)
window['CT'].Update(value=time.asctime())
def save_log(log):
t = time.time()
ct = time.localtime(t)
millisec = t - int(t)
time_str = '%04d%02d%02d_%02d%02d%02d.%02d' % (ct.tm_year, ct.tm_mon, ct.tm_mday, ct.tm_hour, ct.tm_min, ct.tm_sec, round(millisec*100))
log_fname = 'tc-%s.log' % time_str
log_file = open(log_fname, 'w')
for i in log:
time_time, lid_temp, block_temp = i
time_msec = round(time_time - int(time_time) * 100)
time_str = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time_time)) + '.%02d' % time_msec
log_file.write('%s\t%g\t%g\n' % (time_str, lid_temp, block_temp))
log_file.close()
async def interrupt_callback(res):
sys.stderr.write(res)
if __name__=='__main__':
sg.theme('DarkAmber')
layout = [ [sg.Text('Incubation #1 cycles'), sg.InputText(size=(3,1), key='c1')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c11_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c11_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c11_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c12_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c12_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c12_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c13_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c13_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c13_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c14_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c14_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c14_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c15_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c15_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c15_ltemp')],
[sg.Text('Incubation #2 cycles'), sg.InputText(size=(3,1), key='c2')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c21_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c21_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c21_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c22_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c22_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c22_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c23_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c23_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c23_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c24_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c24_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c24_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c25_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c25_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c25_ltemp')],
[sg.Text('Incubation #3 cycles'), sg.InputText(size=(3,1), key='c3')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c31_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c31_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c31_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c32_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c32_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c32_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c33_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c33_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c33_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c34_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c34_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c34_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c35_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c35_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c35_ltemp')],
[sg.Text('Incubation #4 cycles'), sg.InputText(size=(3,1), key='c4')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c41_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c41_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c41_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c42_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c42_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c42_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c43_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c43_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c43_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c44_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c44_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c44_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c45_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c45_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c45_ltemp')],
[sg.Text('Incubation #5 cycles'), sg.InputText(size=(3,1), key='c5')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c51_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c51_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c51_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c52_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c52_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c52_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c53_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c53_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c53_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c54_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c54_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c54_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c55_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c55_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c55_ltemp')],
[sg.Button('Open'), sg.Button('Close'), sg.Button('Run'), sg.Button('Stop'), sg.Button('Deactivate'), sg.Button('Save Log')],
[sg.Button('F1'), sg.Button('F2'), sg.Button('F3'), sg.Button('F4'), sg.Button('F5'), sg.Button('F6'), sg.Button('F7'), sg.Button('F8')],
[sg.Text('Estimated Time of Completion'), sg.InputText(size=(23,1), key='ETC'), sg.Text('Current Time'), sg.InputText(size=(23,1), key='CT')],
[sg.Graph(canvas_size=(640, 480), graph_bottom_left=(0,0), graph_top_right=(900,120), key='graph', background_color='#000000')] ]
window = sg.Window('Opentrons Thermocycler', layout)
tc = driver.Thermocycler(interrupt_callback)
asyncio.run(connect(tc))
run_flag = False
log = deque()
plot_span = deque()
ten_count = 0
job = deque()
while True:
event, values = window.read(timeout=100)
if event == 'Open':
run_flag = False
asyncio.run(deactivate_all(tc))
asyncio.run(open_lid(tc))
if event == 'Close':
run_flag = False
asyncio.run(deactivate_all(tc))
asyncio.run(close_lid(tc))
if event == 'Stop':
run_flag = False
asyncio.run(deactivate_all(tc))
if event == 'Deactivate' or event == None:
run_flag = False
asyncio.run(deactivate_all(tc))
save_log(log)
break
if event == 'Save Log':
save_log(log)
pass
if event == 'Run':
run_flag = True
job = deque()
if event == 'F1':
if os.path.exists('F1.value'):
load_job(window, job_file='F1.value')
if event == 'F2':
if os.path.exists('F2.value'):
load_job(window, job_file='F2.value')
if event == 'F3':
if os.path.exists('F3.value'):
load_job(window, job_file='F3.value')
if event == 'F4':
if os.path.exists('F4.value'):
load_job(window, job_file='F4.value')
if event == 'F5':
if os.path.exists('F5.value'):
load_job(window, job_file='F5.value')
if event == 'F6':
if os.path.exists('F6.value'):
load_job(window, job_file='F6.value')
if event == 'F7':
if os.path.exists('F7.value'):
load_job(window, job_file='F7.value')
if event == 'F8':
if os.path.exists('F8.value'):
load_job(window, job_file='F8.value')
if run_flag:
run_flag = run(window, tc, job)
if ten_count == 0:
load_job(window)
update_graph(window, tc, log, plot_span)
if ten_count == 10:
update_graph(window, tc, log, plot_span)
ten_count = 1
ten_count = ten_count + 1
log.append([time.time(), tc._lid_temp, tc._current_temp])
# sys.stderr.write('ten_count=%d\n' % ten_count)
if tc.is_connected():
tc.disconnect()
window.close() | pcr_gui.py |
import PySimpleGUI as sg
import asyncio
import os
import sys
import time
from collections import deque
import driver
delay_start = None
gra_ids = deque()
async def connect(tc: driver.Thermocycler):
await tc.connect(port='/dev/ttyACM0')
async def open_lid(tc: driver.Thermocycler):
await tc.open()
async def close_lid(tc: driver.Thermocycler):
await tc.close()
async def deactivate_all(tc: driver.Thermocycler):
await tc.deactivate_all()
def prep_job_func(job, hold_time_str, temp_str, lid_temp_str):
init_time = time.time() + 1.0
begin_at = init_time
if len(job) > 0:
begin_at = job[-1][0]
if job[-1][1] != None:
begin_at = begin_at + job[-1][1]
sys.stderr.write('prep_job_func: begin_at=%f\n' % begin_at)
hold_time = None
temp = None
lid_temp = None
try:
hold_time = min(3600, max(1, float(hold_time_str)))
except ValueError:
pass
try:
temp = min(100, max(4, float(temp_str)))
except ValueError:
pass
try:
lid_temp = min(110, max(4, float(lid_temp_str)))
except ValueError:
pass
if temp != None or lid_temp != None:
job.append([begin_at, hold_time, temp, lid_temp])
def add_waiting(tc: driver.Thermocycler, job):
job_new = []
for i in range(len(job)):
d1 = 0
d2 = 0
begin_at = job[i][0]
if i == 0:
d1 = abs(job[0][2] - tc._current_temp)
d2 = abs(job[0][3] - tc._lid_temp)
else:
d1 = abs(job[i][2] - job[i-1][2])
d2 = abs(job[i][3] - job[i-1][3])
if len(job_new) > 0:
begin_at = job_new[-1][0] + job_new[-1][1]
if d1 > 10 or d2 > 10:
d = max(d1, d2) / 10
delay = d * d * 5
job_new.append([begin_at, delay, job[i][2], job[i][3]])
job_new.append([begin_at + delay, job[i][1], job[i][2], job[i][3]])
return job_new
def debug_print_job(job):
for j in job:
s0 = '-'
s1 = '-'
s2 = '-'
s3 = '-'
try:
s0 = time.asctime(time.localtime(j[0]))
except ValueError:
pass
try:
s1 = '%d' % j[1]
except TypeError:
pass
try:
s2 = '%g' % j[2]
except TypeError:
pass
try:
s3 = '%g' % j[3]
except TypeError:
pass
sys.stderr.write('%s %s %s %s\n' % (s0, s1, s2, s3))
def prep_job(window, tc: driver.Thermocycler, save_value, job):
for a in range(5):
k1 = 'c%d' % (a+1)
if window[k1].Get() != '':
s0 = window[k1].Get()
v0 = int(s0)
save_value[k1] = s0
for c in range(v0):
for b in range(5):
k2 = 'c%d%d' % (a+1, b+1)
k21 = '%s_time' % k2
k22 = '%s_btemp' % k2
k23 = '%s_ltemp' % k2
v1 = window[k21].Get()
v2 = window[k22].Get()
v3 = window[k23].Get()
if v1 != None:
save_value[k21] = v1
if v2 != None:
save_value[k22] = v2
if v3 != None:
save_value[k23] = v3
prep_job_func(job, v1, v2, v3)
# job = add_waiting(tc, job)
debug_print_job(job)
def mean(list):
if len(list) == 0:
return 0
sum = 0
for i in list:
sum = sum + i
return sum / len(list)
def is_block_stabilized(temp):
if temp == None:
return False
e = len(log)
if e < 30:
return False
s = max(0, e - 30)
d = 1
v = []
for i in range(s, e):
v.append(log[i][2])
d = abs(mean(v) - temp)
if d < 1:
return True
return False
def is_lid_stabilized(temp):
if temp == None:
return False
e = len(log)
if e < 30:
return False
s = max(0, e - 30)
d = 30
v = []
for i in range(s, e):
v.append(log[i][3])
d = abs(mean(v) - temp)
if temp >= 90 and d < 10:
return True
if temp < 90:
return True
return False
def delay_schedule(job, delay):
for i in range(len(job)):
job[i][0] = job[i][0] + delay
def load_job(window, job_file='pcr_gui.value'):
jv = None
try:
jv = open(job_file, 'r')
except IOError:
sys.stderr.write('load_job: \'%s\' is not found.\n' % job_file)
return
for row in jv.readlines():
key, value = row[:-1].split('\t')
try:
window[key].Update(value)
except ValueError:
sys.stderr.write('load_job: \'%s\' widget is not found.\n' % key)
jv.close()
def save_job(job_value):
jv = open('pcr_gui.value', 'w')
for key in job_value.keys():
jv.write('%s\t%s\n' % (key, job_value[key]))
jv.close()
def run(window, tc: driver.Thermocycler, job):
global delay_start
ct = time.time()
if len(job) == 0:
job_value = {}
prep_job(window, tc, job_value, job)
save_job(job_value)
elif tc._target_temp != None and not is_block_stabilized(tc._target_temp):
if delay_start == None:
delay_start = ct
else:
if delay_start != None:
delay = ct - delay_start
delay_schedule(job, delay)
delay_start = None
begin_at, hold_time, temp, lid_temp = job[0]
sys.stderr.write('time=%f begin_at=%f\n' % (time.time(), begin_at))
if time.time() > begin_at:
temp_str = '-'
hold_time_str = '-'
if temp != None:
temp_str = '%g' % temp
if hold_time != None:
hold_time_str = '%d' % hold_time
asyncio.run(tc.set_temperature(temp, hold_time))
else:
asyncio.run(tc.deactivate_block())
lid_temp_str = '-'
if lid_temp != None:
lid_temp_str = '%g' % lid_temp
asyncio.run(tc.set_lid_temperature(lid_temp))
else:
asyncio.run(tc.deactivate_lid())
sys.stderr.write('run: %s %s %s %s\n' % (time.asctime(time.localtime(begin_at)), hold_time_str, temp_str, lid_temp_str))
job.popleft()
if len(job) == 0:
return False
etc = time.asctime(time.localtime(job[-1][0]))
window['ETC'].Update(value=etc)
return True
def update_graph(window, tc: driver.Thermocycler, log, plot_span):
global gra_ids
temp = deque()
lid_temp = deque()
i = len(log) - 1
while i >= 0 and log[-1][0] - log[i][0] <= 1:
lid_temp.append(log[i][1])
temp.append(log[i][2])
i = i - 1
# sys.stderr.write('update_graph: i=%d\n' % i)
if len(lid_temp) > 5 and len(temp) > 5:
plot_span.append([mean(lid_temp), mean(temp)])
# sys.stderr.write('update_graph: len(plot_span)=%d\n' % len(plot_span))
x = len(plot_span)
if x == 0:
for temp in range(10, 120, 10):
gra_ids.append(window['graph'].DrawLine(point_from=(0, temp), point_to=(900, temp), color='#888888'))
gra_ids.append(window['graph'].DrawText('%2d' % temp, location=(20, temp), color='#555555'))
elif x == 1:
y1 = plot_span[-1][0]
y2 = plot_span[-1][1]
window['graph'].DrawPoint(point=(x, y1), size=5, color='#ffff00')
window['graph'].DrawPoint(point=(x, y2), size=5, color='#ff00ff')
elif x > 1:
y11 = plot_span[-1][0]
y12 = plot_span[-2][0]
y21 = plot_span[-1][1]
y22 = plot_span[-2][1]
window['graph'].DrawLine(point_from=(x, y11), point_to=(x-1, y12), width=2, color='#ffff00')
window['graph'].DrawLine(point_from=(x, y21), point_to=(x-1, y22), width=2, color='#ff00ff')
if x == 900:
plot_span.popleft()
window['graph'].Move(-1, 0)
for id in gra_ids:
window['graph'].MoveFigure(id, 1, 0)
window['CT'].Update(value=time.asctime())
def save_log(log):
t = time.time()
ct = time.localtime(t)
millisec = t - int(t)
time_str = '%04d%02d%02d_%02d%02d%02d.%02d' % (ct.tm_year, ct.tm_mon, ct.tm_mday, ct.tm_hour, ct.tm_min, ct.tm_sec, round(millisec*100))
log_fname = 'tc-%s.log' % time_str
log_file = open(log_fname, 'w')
for i in log:
time_time, lid_temp, block_temp = i
time_msec = round(time_time - int(time_time) * 100)
time_str = time.strftime('%Y/%m/%d %H:%M:%S', time.localtime(time_time)) + '.%02d' % time_msec
log_file.write('%s\t%g\t%g\n' % (time_str, lid_temp, block_temp))
log_file.close()
async def interrupt_callback(res):
sys.stderr.write(res)
if __name__=='__main__':
sg.theme('DarkAmber')
layout = [ [sg.Text('Incubation #1 cycles'), sg.InputText(size=(3,1), key='c1')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c11_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c11_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c11_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c12_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c12_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c12_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c13_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c13_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c13_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c14_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c14_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c14_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c15_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c15_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c15_ltemp')],
[sg.Text('Incubation #2 cycles'), sg.InputText(size=(3,1), key='c2')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c21_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c21_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c21_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c22_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c22_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c22_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c23_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c23_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c23_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c24_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c24_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c24_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c25_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c25_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c25_ltemp')],
[sg.Text('Incubation #3 cycles'), sg.InputText(size=(3,1), key='c3')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c31_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c31_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c31_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c32_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c32_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c32_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c33_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c33_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c33_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c34_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c34_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c34_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c35_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c35_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c35_ltemp')],
[sg.Text('Incubation #4 cycles'), sg.InputText(size=(3,1), key='c4')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c41_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c41_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c41_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c42_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c42_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c42_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c43_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c43_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c43_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c44_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c44_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c44_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c45_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c45_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c45_ltemp')],
[sg.Text('Incubation #5 cycles'), sg.InputText(size=(3,1), key='c5')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c51_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c51_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c51_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c52_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c52_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c52_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c53_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c53_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c53_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c54_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c54_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c54_ltemp')],
[sg.Text('Time [sec]'), sg.InputText(size=(5,1), key='c55_time'), sg.Text('Block Temp. [Celsius]'), sg.InputText(size=(4,1), key='c55_btemp'), sg.Text('Lid Temp. [Celsius]'), sg.InputText(size=(4,1), key='c55_ltemp')],
[sg.Button('Open'), sg.Button('Close'), sg.Button('Run'), sg.Button('Stop'), sg.Button('Deactivate'), sg.Button('Save Log')],
[sg.Button('F1'), sg.Button('F2'), sg.Button('F3'), sg.Button('F4'), sg.Button('F5'), sg.Button('F6'), sg.Button('F7'), sg.Button('F8')],
[sg.Text('Estimated Time of Completion'), sg.InputText(size=(23,1), key='ETC'), sg.Text('Current Time'), sg.InputText(size=(23,1), key='CT')],
[sg.Graph(canvas_size=(640, 480), graph_bottom_left=(0,0), graph_top_right=(900,120), key='graph', background_color='#000000')] ]
window = sg.Window('Opentrons Thermocycler', layout)
tc = driver.Thermocycler(interrupt_callback)
asyncio.run(connect(tc))
run_flag = False
log = deque()
plot_span = deque()
ten_count = 0
job = deque()
while True:
event, values = window.read(timeout=100)
if event == 'Open':
run_flag = False
asyncio.run(deactivate_all(tc))
asyncio.run(open_lid(tc))
if event == 'Close':
run_flag = False
asyncio.run(deactivate_all(tc))
asyncio.run(close_lid(tc))
if event == 'Stop':
run_flag = False
asyncio.run(deactivate_all(tc))
if event == 'Deactivate' or event == None:
run_flag = False
asyncio.run(deactivate_all(tc))
save_log(log)
break
if event == 'Save Log':
save_log(log)
pass
if event == 'Run':
run_flag = True
job = deque()
if event == 'F1':
if os.path.exists('F1.value'):
load_job(window, job_file='F1.value')
if event == 'F2':
if os.path.exists('F2.value'):
load_job(window, job_file='F2.value')
if event == 'F3':
if os.path.exists('F3.value'):
load_job(window, job_file='F3.value')
if event == 'F4':
if os.path.exists('F4.value'):
load_job(window, job_file='F4.value')
if event == 'F5':
if os.path.exists('F5.value'):
load_job(window, job_file='F5.value')
if event == 'F6':
if os.path.exists('F6.value'):
load_job(window, job_file='F6.value')
if event == 'F7':
if os.path.exists('F7.value'):
load_job(window, job_file='F7.value')
if event == 'F8':
if os.path.exists('F8.value'):
load_job(window, job_file='F8.value')
if run_flag:
run_flag = run(window, tc, job)
if ten_count == 0:
load_job(window)
update_graph(window, tc, log, plot_span)
if ten_count == 10:
update_graph(window, tc, log, plot_span)
ten_count = 1
ten_count = ten_count + 1
log.append([time.time(), tc._lid_temp, tc._current_temp])
# sys.stderr.write('ten_count=%d\n' % ten_count)
if tc.is_connected():
tc.disconnect()
window.close() | 0.165492 | 0.136608 |
import functools
import re
from typing import (
List,
Union
)
from net_models.models.BaseModels.SharedModels import VRFAddressFamily, VRFModel
from net_parser.config import BaseConfigLine
class IosConfigLine(BaseConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int, name: str = "IosConfigLine"):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosAaaLine")
class IosAaaParser(IosConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosAaaLine")
class IosVrfDefinitionParser(IosConfigLine):
_name_regex = re.compile(pattern=r"^(?:ip )?vrf definition (?P<name>\S+)", flags=re.MULTILINE)
_description_regex = re.compile(pattern=r"^ description (?P<description>.*?)\Z", flags=re.MULTILINE)
_rd_regex = re.compile(pattern=r"^ rd (?P<rd>\S+)\Z", flags=re.MULTILINE)
_address_family_regex = re.compile(pattern=r"^ address-family (?P<afi>\S+)(?: (?P<safi>\S+))?\Z")
_route_target_regex = re.compile(pattern=r"^ route-target (?P<action>import|export) (?P<rt>\S+)(?: (?P<rt_type>\S+))?", flags=re.MULTILINE)
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosVrfDefinitionLine")
@property
def get_type(self):
types = super().get_type
types.append('vrf')
return types
@property
def name(self) -> Union[str, None]:
return self.re_match(regex=self._name_regex, group=1)
@property
def description(self) -> Union[str, None]:
candidates = self.re_search_children(regex=self._description_regex, group=1)
return self.first_candidate_or_none(candidates=candidates)
@property
def rd(self) -> bool:
candidates = self.re_search_children(regex=self._rd_regex, group=1)
return self.first_candidate_or_none(candidates=candidates)
@property
def address_families(self) -> Union[List[VRFAddressFamily], None]:
address_families = []
af_lines = self.re_search_children(regex=self._address_family_regex)
for af_line in af_lines:
data = {}
data.update(af_line.re_search(regex=self._address_family_regex, group="ALL"))
# Route Targets
rt_candidates = af_line.re_search_children(regex=self._route_target_regex, group="ALL")
print(rt_candidates)
if len(rt_candidates):
data['route_targets'] = rt_candidates
if not any(data.values()):
continue
else:
model = VRFAddressFamily(**data)
address_families.append(model)
if len(address_families):
self.logger.debug(f"Found {len(address_families)} AFs for VRF {self.name}")
return address_families
else:
self.logger.debug(f"Found no AFs for VRF {self.name}")
return None
@property
@functools.lru_cache()
def model(self):
data = {
'name': self.name,
'rd': self.rd,
'description': self.description,
'address_families': self.address_families,
}
model = VRFModel(**{k:v for k, v in data.items() if v is not None})
return model
class IosLoggingParser(IosConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosLoggingLine")
@property
def get_type(self):
types = super().get_type
types.append('logging')
return types | net_parser/config/IosSectionParsers.py | import functools
import re
from typing import (
List,
Union
)
from net_models.models.BaseModels.SharedModels import VRFAddressFamily, VRFModel
from net_parser.config import BaseConfigLine
class IosConfigLine(BaseConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int, name: str = "IosConfigLine"):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosAaaLine")
class IosAaaParser(IosConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosAaaLine")
class IosVrfDefinitionParser(IosConfigLine):
_name_regex = re.compile(pattern=r"^(?:ip )?vrf definition (?P<name>\S+)", flags=re.MULTILINE)
_description_regex = re.compile(pattern=r"^ description (?P<description>.*?)\Z", flags=re.MULTILINE)
_rd_regex = re.compile(pattern=r"^ rd (?P<rd>\S+)\Z", flags=re.MULTILINE)
_address_family_regex = re.compile(pattern=r"^ address-family (?P<afi>\S+)(?: (?P<safi>\S+))?\Z")
_route_target_regex = re.compile(pattern=r"^ route-target (?P<action>import|export) (?P<rt>\S+)(?: (?P<rt_type>\S+))?", flags=re.MULTILINE)
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosVrfDefinitionLine")
@property
def get_type(self):
types = super().get_type
types.append('vrf')
return types
@property
def name(self) -> Union[str, None]:
return self.re_match(regex=self._name_regex, group=1)
@property
def description(self) -> Union[str, None]:
candidates = self.re_search_children(regex=self._description_regex, group=1)
return self.first_candidate_or_none(candidates=candidates)
@property
def rd(self) -> bool:
candidates = self.re_search_children(regex=self._rd_regex, group=1)
return self.first_candidate_or_none(candidates=candidates)
@property
def address_families(self) -> Union[List[VRFAddressFamily], None]:
address_families = []
af_lines = self.re_search_children(regex=self._address_family_regex)
for af_line in af_lines:
data = {}
data.update(af_line.re_search(regex=self._address_family_regex, group="ALL"))
# Route Targets
rt_candidates = af_line.re_search_children(regex=self._route_target_regex, group="ALL")
print(rt_candidates)
if len(rt_candidates):
data['route_targets'] = rt_candidates
if not any(data.values()):
continue
else:
model = VRFAddressFamily(**data)
address_families.append(model)
if len(address_families):
self.logger.debug(f"Found {len(address_families)} AFs for VRF {self.name}")
return address_families
else:
self.logger.debug(f"Found no AFs for VRF {self.name}")
return None
@property
@functools.lru_cache()
def model(self):
data = {
'name': self.name,
'rd': self.rd,
'description': self.description,
'address_families': self.address_families,
}
model = VRFModel(**{k:v for k, v in data.items() if v is not None})
return model
class IosLoggingParser(IosConfigLine):
def __init__(self, number: int, text: str, config, verbosity: int):
super().__init__(number=number, text=text, config=config, verbosity=verbosity, name="IosLoggingLine")
@property
def get_type(self):
types = super().get_type
types.append('logging')
return types | 0.616243 | 0.069827 |
import torch
from tqdm.auto import tqdm
import numpy as np
from .random import sample_best_distributed, sample_best_distributed_pointwise
from ..losses import Chamfer
class ShapeSampler:
def __init__(self, method = "random", N = 5):
if (method not in ["random", "kmeans++", "template", "firsttemplate"]) and (method.replace("template", "") == ""):
raise NotImplementedError
self.method = method
self.N = N
def __call__(self, dataset, aligner = None, output = "samples"):
if "template" in self.method:
if self.method in ["template", "firsttemplate"]:
if hasattr(dataset.data, "point_y"):
indices = sample_best_distributed_pointwise(
dataset, self.N, get_firsts = "first" in self.method
)
else:
indices = sample_best_distributed(
dataset.data.y, self.N, get_firsts = "first" in self.method
)
else:
i = int(self.method.replace("template", ""))
unique = np.unique(dataset.data.y)
assert i*len(unique) == self.N, "Invalid number of desired templates"
indices = []
for u in unique:
indices.append(np.random.choice(np.arange(len(dataset.data.y))[dataset.data.y == u], i, replace = False))
indices = np.hstack(indices)
elif self.method == "random":
indices = np.random.choice(
len(dataset), self.N, replace = len(dataset) < self.N
)
elif self.method == "kmeans++":
indices = self.sample_kmeanspp(dataset, aligner)
else:
raise NotImplementedError
if output == "samples":
return [dataset[int(i)] for i in indices]
elif output == "indices":
return indices
else:
raise ValueError
def sample_kmeanspp(self, dataset, aligner = None):
if aligner is not None:
if type(aligner) is str:
aligner = torch.load(aligner)
aligner = {"encoder": aligner.encoder, "anet": aligner.LSMs[0].ANet, "aligner": aligner.LSMs[0].Aligner}
assert "encoder" in aligner.keys()
assert "anet" in aligner.keys()
assert "aligner" in aligner.keys()
aligner["encoder"].eval()
aligner["anet"].eval()
aligner["aligner"].eval()
def do_batch(b, aligner = None):
with torch.no_grad():
b0 = torch.cat(b[0]).cuda().permute(0, 2, 1)
b1 = torch.cat(b[1]).cuda().permute(0, 2, 1)
if aligner is not None:
b0_params = aligner["anet"](aligner["encoder"](b0)[0])
b1 = aligner["aligner"](b1, b0_params)[0]
d = criterion(b0, b1).detach().cpu().numpy()
return d**2
criterion = Chamfer()
init = [np.random.randint(len(dataset))]
distances = []
for k in tqdm(range(self.N - 1), leave=False, desc="KMeans++ init"):
distances.append([])
batch = ([], [])
for i in tqdm(range(len(dataset)), desc=f"Number {k}", leave=False):
batch[0].append(dataset[i].pos.unsqueeze(0))
batch[1].append(dataset[init[-1]].pos.unsqueeze(0))
if len(batch[0]) >= 512:
d = do_batch(batch, aligner)
distances[-1] = np.concatenate([distances[-1], d])
batch = ([], [])
if len(batch[0]) >= 1:
d = do_batch(batch)
distances[-1] = np.concatenate([distances[-1], d])
batch = ([], [])
closer_init = np.min(distances, axis = 0)
tqdm.write(f"Mean minimum Chamfer distance from {1+k} selected shapes: {1000*np.mean(closer_init**.5):.2f}")
closer_init = closer_init / closer_init.sum()
init.append(int(np.random.choice(len(dataset), 1, p = closer_init)[0]))
del criterion
if aligner is not None:
del aligner
torch.cuda.empty_cache()
return init | dlm/utils/shape_sampler.py | import torch
from tqdm.auto import tqdm
import numpy as np
from .random import sample_best_distributed, sample_best_distributed_pointwise
from ..losses import Chamfer
class ShapeSampler:
def __init__(self, method = "random", N = 5):
if (method not in ["random", "kmeans++", "template", "firsttemplate"]) and (method.replace("template", "") == ""):
raise NotImplementedError
self.method = method
self.N = N
def __call__(self, dataset, aligner = None, output = "samples"):
if "template" in self.method:
if self.method in ["template", "firsttemplate"]:
if hasattr(dataset.data, "point_y"):
indices = sample_best_distributed_pointwise(
dataset, self.N, get_firsts = "first" in self.method
)
else:
indices = sample_best_distributed(
dataset.data.y, self.N, get_firsts = "first" in self.method
)
else:
i = int(self.method.replace("template", ""))
unique = np.unique(dataset.data.y)
assert i*len(unique) == self.N, "Invalid number of desired templates"
indices = []
for u in unique:
indices.append(np.random.choice(np.arange(len(dataset.data.y))[dataset.data.y == u], i, replace = False))
indices = np.hstack(indices)
elif self.method == "random":
indices = np.random.choice(
len(dataset), self.N, replace = len(dataset) < self.N
)
elif self.method == "kmeans++":
indices = self.sample_kmeanspp(dataset, aligner)
else:
raise NotImplementedError
if output == "samples":
return [dataset[int(i)] for i in indices]
elif output == "indices":
return indices
else:
raise ValueError
def sample_kmeanspp(self, dataset, aligner = None):
if aligner is not None:
if type(aligner) is str:
aligner = torch.load(aligner)
aligner = {"encoder": aligner.encoder, "anet": aligner.LSMs[0].ANet, "aligner": aligner.LSMs[0].Aligner}
assert "encoder" in aligner.keys()
assert "anet" in aligner.keys()
assert "aligner" in aligner.keys()
aligner["encoder"].eval()
aligner["anet"].eval()
aligner["aligner"].eval()
def do_batch(b, aligner = None):
with torch.no_grad():
b0 = torch.cat(b[0]).cuda().permute(0, 2, 1)
b1 = torch.cat(b[1]).cuda().permute(0, 2, 1)
if aligner is not None:
b0_params = aligner["anet"](aligner["encoder"](b0)[0])
b1 = aligner["aligner"](b1, b0_params)[0]
d = criterion(b0, b1).detach().cpu().numpy()
return d**2
criterion = Chamfer()
init = [np.random.randint(len(dataset))]
distances = []
for k in tqdm(range(self.N - 1), leave=False, desc="KMeans++ init"):
distances.append([])
batch = ([], [])
for i in tqdm(range(len(dataset)), desc=f"Number {k}", leave=False):
batch[0].append(dataset[i].pos.unsqueeze(0))
batch[1].append(dataset[init[-1]].pos.unsqueeze(0))
if len(batch[0]) >= 512:
d = do_batch(batch, aligner)
distances[-1] = np.concatenate([distances[-1], d])
batch = ([], [])
if len(batch[0]) >= 1:
d = do_batch(batch)
distances[-1] = np.concatenate([distances[-1], d])
batch = ([], [])
closer_init = np.min(distances, axis = 0)
tqdm.write(f"Mean minimum Chamfer distance from {1+k} selected shapes: {1000*np.mean(closer_init**.5):.2f}")
closer_init = closer_init / closer_init.sum()
init.append(int(np.random.choice(len(dataset), 1, p = closer_init)[0]))
del criterion
if aligner is not None:
del aligner
torch.cuda.empty_cache()
return init | 0.531453 | 0.456834 |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class AuthenticationLoginRequest:
"""
Attributes:
- user_name
- password
- client_id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'user_name', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'client_id', None, None, ), # 3
)
def __init__(self, user_name=None, password=<PASSWORD>, client_id=None,):
self.user_name = user_name
self.password = password
self.client_id = client_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.user_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationLoginRequest')
if self.user_name is not None:
oprot.writeFieldBegin('user_name', TType.STRING, 1)
oprot.writeString(self.user_name)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 3)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthenticationLoginReply:
"""
Attributes:
- result
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'result', None, None, ), # 1
)
def __init__(self, result=None,):
self.result = result
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.result = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationLoginReply')
if self.result is not None:
oprot.writeFieldBegin('result', TType.BOOL, 1)
oprot.writeBool(self.result)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other) | jet-python/build/lib.linux-x86_64-2.7/jnpr/jet/authentication/ttypes.py |
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class AuthenticationLoginRequest:
"""
Attributes:
- user_name
- password
- client_id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'user_name', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'client_id', None, None, ), # 3
)
def __init__(self, user_name=None, password=<PASSWORD>, client_id=None,):
self.user_name = user_name
self.password = password
self.client_id = client_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.user_name = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.client_id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationLoginRequest')
if self.user_name is not None:
oprot.writeFieldBegin('user_name', TType.STRING, 1)
oprot.writeString(self.user_name)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.client_id is not None:
oprot.writeFieldBegin('client_id', TType.STRING, 3)
oprot.writeString(self.client_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class AuthenticationLoginReply:
"""
Attributes:
- result
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'result', None, None, ), # 1
)
def __init__(self, result=None,):
self.result = result
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.result = iprot.readBool();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('AuthenticationLoginReply')
if self.result is not None:
oprot.writeFieldBegin('result', TType.BOOL, 1)
oprot.writeBool(self.result)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other) | 0.406273 | 0.08772 |
from collections import OrderedDict
import six
from pyangbind.lib.base import PybindBase
from pyangbind.lib.yangtypes import YANGDynClass
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class supported_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module node-topology - based on the path /node/port/available-transceiver/supported-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__max_bw', '__min_bw',)
_yang_name = 'supported-bandwidth'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__min_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
self.__max_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [u'node', u'port', u'available-transceiver', u'supported-bandwidth']
def _get_max_bw(self):
"""
Getter method for max_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/max_bw (string)
"""
return self.__max_bw
def _set_max_bw(self, v, load=False):
"""
Setter method for max_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/max_bw (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_bw is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_bw() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_bw must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:node-topology', defining_module='node-topology', yang_type='string', is_config=True)""",
})
self.__max_bw = t
if hasattr(self, '_set'):
self._set()
def _unset_max_bw(self):
self.__max_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
def _get_min_bw(self):
"""
Getter method for min_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/min_bw (string)
"""
return self.__min_bw
def _set_min_bw(self, v, load=False):
"""
Setter method for min_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/min_bw (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_min_bw is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_min_bw() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """min_bw must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:node-topology', defining_module='node-topology', yang_type='string', is_config=True)""",
})
self.__min_bw = t
if hasattr(self, '_set'):
self._set()
def _unset_min_bw(self):
self.__min_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
max_bw = __builtin__.property(_get_max_bw, _set_max_bw)
min_bw = __builtin__.property(_get_min_bw, _set_min_bw)
_pyangbind_elements = OrderedDict([('max_bw', max_bw), ('min_bw', min_bw), ]) | grpc/rbindings/node/port/available_transceiver/supported_bandwidth/__init__.py | from collections import OrderedDict
import six
from pyangbind.lib.base import PybindBase
from pyangbind.lib.yangtypes import YANGDynClass
# PY3 support of some PY2 keywords (needs improved)
if six.PY3:
import builtins as __builtin__
long = int
elif six.PY2:
import __builtin__
class supported_bandwidth(PybindBase):
"""
This class was auto-generated by the PythonClass plugin for PYANG
from YANG module node-topology - based on the path /node/port/available-transceiver/supported-bandwidth. Each member element of
the container is represented as a class variable - with a specific
YANG type.
"""
__slots__ = ('_path_helper', '_extmethods', '__max_bw', '__min_bw',)
_yang_name = 'supported-bandwidth'
_pybind_generated_by = 'container'
def __init__(self, *args, **kwargs):
self._path_helper = False
self._extmethods = False
self.__min_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
self.__max_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
load = kwargs.pop("load", None)
if args:
if len(args) > 1:
raise TypeError("cannot create a YANG container with >1 argument")
all_attr = True
for e in self._pyangbind_elements:
if not hasattr(args[0], e):
all_attr = False
break
if not all_attr:
raise ValueError("Supplied object did not have the correct attributes")
for e in self._pyangbind_elements:
nobj = getattr(args[0], e)
if nobj._changed() is False:
continue
setmethod = getattr(self, "_set_%s" % e)
if load is None:
setmethod(getattr(args[0], e))
else:
setmethod(getattr(args[0], e), load=load)
def _path(self):
if hasattr(self, "_parent"):
return self._parent._path() + [self._yang_name]
else:
return [u'node', u'port', u'available-transceiver', u'supported-bandwidth']
def _get_max_bw(self):
"""
Getter method for max_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/max_bw (string)
"""
return self.__max_bw
def _set_max_bw(self, v, load=False):
"""
Setter method for max_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/max_bw (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_max_bw is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_max_bw() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """max_bw must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:node-topology', defining_module='node-topology', yang_type='string', is_config=True)""",
})
self.__max_bw = t
if hasattr(self, '_set'):
self._set()
def _unset_max_bw(self):
self.__max_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="max-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
def _get_min_bw(self):
"""
Getter method for min_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/min_bw (string)
"""
return self.__min_bw
def _set_min_bw(self, v, load=False):
"""
Setter method for min_bw, mapped from YANG variable /node/port/available_transceiver/supported_bandwidth/min_bw (string)
If this variable is read-only (config: false) in the
source YANG file, then _set_min_bw is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_min_bw() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v, base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """min_bw must be of a type compatible with string""",
'defined-type': "string",
'generated-type': """YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:node-topology', defining_module='node-topology', yang_type='string', is_config=True)""",
})
self.__min_bw = t
if hasattr(self, '_set'):
self._set()
def _unset_min_bw(self):
self.__min_bw = YANGDynClass(base=six.text_type, is_leaf=True, yang_name="min-bw", parent=self,
path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True,
namespace='urn:node-topology', defining_module='node-topology', yang_type='string',
is_config=True)
max_bw = __builtin__.property(_get_max_bw, _set_max_bw)
min_bw = __builtin__.property(_get_min_bw, _set_min_bw)
_pyangbind_elements = OrderedDict([('max_bw', max_bw), ('min_bw', min_bw), ]) | 0.721056 | 0.089733 |
import numpy as np
import cv2
import os
import pandas as pd
import torch
from torchvision import transforms
class CardImageDataset():
def __init__(self, root_dir='../data', header_file='gicsd_labels.csv', image_dir='images'):
'''
root_dir: location of the dataset dir
header_file: location of the dataset header in the dataset directory
image_dir: location of the images
'''
header_path = os.path.join(root_dir,header_file)
self.data_header = pd.read_csv(header_path, sep=', ', engine='python')
self.image_dir = os.path.join(root_dir,image_dir)
self.header_info, self.image_files, self.classes = self.header_info_extractor()
self.limit = len(self.image_files)
self.length = len(self.image_files) * 8
def __len__(self):
return self.length
def __getitem__(self, idx):
hflip, vflip, rotate = self.data_augmentations(idx)
idx = idx % self.limit
gray_image = self.load_image(self.image_files[idx])
label = torch.LongTensor([self.header_info[idx,-1]])
if hflip:
gray_image = torch.flip(gray_image, dims=[1])
if vflip:
gray_image = torch.flip(gray_image, dims=[2])
if rotate:
gray_image = torch.rot90(gray_image, 1, dims=[2,1])
return {'image': gray_image, 'label': label}
def data_augmentations(self, idx):
places = idx // self.limit
hflip = bool(places & 1)
places = places >> 1
vflip = bool(places & 1)
places = places >> 1
rotate = bool(places & 1)
return hflip, vflip, rotate
def load_image(self, image_file):
'''
image_file: file name of the image in dataset
return: blue channel of the loaded image
'''
file_path = os.path.join(self.image_dir, image_file)
frame = cv2.imread(file_path)[:,:,0].astype(np.float32)
frame = torch.from_numpy(frame)
frame /= 255
frame = torch.unsqueeze(frame, dim=0)
frame = transforms.functional.normalize(frame,
mean=[0.406],
std=[0.225])
return frame
def header_info_extractor(self):
image_files = list(self.data_header['IMAGE_FILENAME'].values)
labels = self.data_header['LABEL'].values.astype(str)
label_set = sorted(list(set(labels)))
new_data_block = []
for row in zip(image_files, labels):
file_name = row[0].split('_')
new_data_block.append(file_name[1:-1] + [row[1]])
new_data_block = np.array(new_data_block)
# chaning labels to numbers can help data processing
for i, x in enumerate(label_set):
new_data_block[new_data_block[:,-1] == x,-1] = i
new_data_block = new_data_block.astype(np.int)
return new_data_block, image_files, label_set
def decode_label(self, label):
return self.classes[label]
def load_image_from_path(file_path='../data/images/GICSD_4_1_33.png'):
frame = cv2.imread(file_path)[:,:,0].astype(np.float32)
frame = torch.from_numpy(frame)
frame /= 255
frame = torch.unsqueeze(frame, dim=0)
frame = transforms.functional.normalize(frame,
mean=[0.406],
std=[0.225])
return frame
if __name__ == '__main__':
dataset = CardImageDataset(root_dir='../data', header_file='gicsd_labels.csv', image_dir='images')
print(len(dataset))
limit = dataset.limit
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = 15, 7
i = 1
d = 1
for k in range(8):
idx = i + limit * k
test_image = dataset[idx]['image']
plt.subplot(2,4,d); plt.imshow(test_image[0]); plt.axis('off')
d += 1 | code/Dataset.py | import numpy as np
import cv2
import os
import pandas as pd
import torch
from torchvision import transforms
class CardImageDataset():
def __init__(self, root_dir='../data', header_file='gicsd_labels.csv', image_dir='images'):
'''
root_dir: location of the dataset dir
header_file: location of the dataset header in the dataset directory
image_dir: location of the images
'''
header_path = os.path.join(root_dir,header_file)
self.data_header = pd.read_csv(header_path, sep=', ', engine='python')
self.image_dir = os.path.join(root_dir,image_dir)
self.header_info, self.image_files, self.classes = self.header_info_extractor()
self.limit = len(self.image_files)
self.length = len(self.image_files) * 8
def __len__(self):
return self.length
def __getitem__(self, idx):
hflip, vflip, rotate = self.data_augmentations(idx)
idx = idx % self.limit
gray_image = self.load_image(self.image_files[idx])
label = torch.LongTensor([self.header_info[idx,-1]])
if hflip:
gray_image = torch.flip(gray_image, dims=[1])
if vflip:
gray_image = torch.flip(gray_image, dims=[2])
if rotate:
gray_image = torch.rot90(gray_image, 1, dims=[2,1])
return {'image': gray_image, 'label': label}
def data_augmentations(self, idx):
places = idx // self.limit
hflip = bool(places & 1)
places = places >> 1
vflip = bool(places & 1)
places = places >> 1
rotate = bool(places & 1)
return hflip, vflip, rotate
def load_image(self, image_file):
'''
image_file: file name of the image in dataset
return: blue channel of the loaded image
'''
file_path = os.path.join(self.image_dir, image_file)
frame = cv2.imread(file_path)[:,:,0].astype(np.float32)
frame = torch.from_numpy(frame)
frame /= 255
frame = torch.unsqueeze(frame, dim=0)
frame = transforms.functional.normalize(frame,
mean=[0.406],
std=[0.225])
return frame
def header_info_extractor(self):
image_files = list(self.data_header['IMAGE_FILENAME'].values)
labels = self.data_header['LABEL'].values.astype(str)
label_set = sorted(list(set(labels)))
new_data_block = []
for row in zip(image_files, labels):
file_name = row[0].split('_')
new_data_block.append(file_name[1:-1] + [row[1]])
new_data_block = np.array(new_data_block)
# chaning labels to numbers can help data processing
for i, x in enumerate(label_set):
new_data_block[new_data_block[:,-1] == x,-1] = i
new_data_block = new_data_block.astype(np.int)
return new_data_block, image_files, label_set
def decode_label(self, label):
return self.classes[label]
def load_image_from_path(file_path='../data/images/GICSD_4_1_33.png'):
frame = cv2.imread(file_path)[:,:,0].astype(np.float32)
frame = torch.from_numpy(frame)
frame /= 255
frame = torch.unsqueeze(frame, dim=0)
frame = transforms.functional.normalize(frame,
mean=[0.406],
std=[0.225])
return frame
if __name__ == '__main__':
dataset = CardImageDataset(root_dir='../data', header_file='gicsd_labels.csv', image_dir='images')
print(len(dataset))
limit = dataset.limit
import matplotlib.pyplot as plt
plt.rcParams["figure.figsize"] = 15, 7
i = 1
d = 1
for k in range(8):
idx = i + limit * k
test_image = dataset[idx]['image']
plt.subplot(2,4,d); plt.imshow(test_image[0]); plt.axis('off')
d += 1 | 0.63477 | 0.397529 |
import demistomock as demisto # noqa
import ExpanseEnrichAttribution
CURRENT_IP = [
{"ip": "1.1.1.1", "attr1": "value1"},
{"ip": "8.8.8.8", "attr1": "value2"},
]
ENRICH_IP = [
{"ipaddress": "1.1.1.1", "provider": "Cloudflare", "ignored": "ignored-right"},
{"ipaddress": "8.8.8.4", "provider": "Google"}
]
RESULT_IP = [
{"ip": "1.1.1.1", "attr1": "value1", "provider": "Cloudflare"},
{"ip": "8.8.8.8", "attr1": "value2"},
]
CURRENT_DEVICE = [
{"serial": "serialA", "attr1": "value1"},
{"serial": "serialB", "attr1": "value2"},
]
ENRICH_DEVICE = [
{"deviceSerial": "serialA", "location": "unknown", "owner": "lmori"},
{"deviceSerial": "serialC", "location": "unknown"}
]
RESULT_DEVICE = [
{"serial": "serialA", "attr1": "value1", "location": "unknown"},
{"serial": "serialB", "attr1": "value2"},
]
CURRENT_USER = [
{"username": "fvigo", "attr1": "value1"},
{"username": "lmori", "attr1": "value2"},
]
ENRICH_USER = [
{"user": "fvigo", "team": "DevRel", "manager": "unknown"},
{"user": "ibojer", "team": "DevRel"}
]
RESULT_USER = [
{"username": "fvigo", "attr1": "value1", "manager": "unknown"},
{"username": "lmori", "attr1": "value2"},
]
def test_enrich_command():
"""
Given:
- nonenriched lists of: ips, users, devices sightings
- enrichment information for ips, users, devices
When
- enriching lists of ips, users, devices
Then
- data is enriched
- enriched output is returned
"""
ip_result = ExpanseEnrichAttribution.enrich_command({
'type': 'IP',
'current': CURRENT_IP,
'enrich': ENRICH_IP,
'enrich_key': 'ipaddress',
'enrich_fields': 'provider'
})
assert ip_result.outputs == RESULT_IP
assert ip_result.outputs_key_field == "ip"
assert ip_result.outputs_prefix == "Expanse.AttributionIP"
device_result = ExpanseEnrichAttribution.enrich_command({
'type': 'Device',
'current': CURRENT_DEVICE,
'enrich': ENRICH_DEVICE,
'enrich_key': 'deviceSerial',
'enrich_fields': 'location'
})
assert device_result.outputs == RESULT_DEVICE
assert device_result.outputs_key_field == "serial"
assert device_result.outputs_prefix == "Expanse.AttributionDevice"
user_result = ExpanseEnrichAttribution.enrich_command({
'type': 'User',
'current': CURRENT_USER,
'enrich': ENRICH_USER,
'enrich_key': 'user',
'enrich_fields': 'manager'
})
assert user_result.outputs == RESULT_USER
assert user_result.outputs_key_field == "username"
assert user_result.outputs_prefix == "Expanse.AttributionUser" | Packs/ExpanseV2/Scripts/ExpanseEnrichAttribution/ExpanseEnrichAttribution_test.py | import demistomock as demisto # noqa
import ExpanseEnrichAttribution
CURRENT_IP = [
{"ip": "1.1.1.1", "attr1": "value1"},
{"ip": "8.8.8.8", "attr1": "value2"},
]
ENRICH_IP = [
{"ipaddress": "1.1.1.1", "provider": "Cloudflare", "ignored": "ignored-right"},
{"ipaddress": "8.8.8.4", "provider": "Google"}
]
RESULT_IP = [
{"ip": "1.1.1.1", "attr1": "value1", "provider": "Cloudflare"},
{"ip": "8.8.8.8", "attr1": "value2"},
]
CURRENT_DEVICE = [
{"serial": "serialA", "attr1": "value1"},
{"serial": "serialB", "attr1": "value2"},
]
ENRICH_DEVICE = [
{"deviceSerial": "serialA", "location": "unknown", "owner": "lmori"},
{"deviceSerial": "serialC", "location": "unknown"}
]
RESULT_DEVICE = [
{"serial": "serialA", "attr1": "value1", "location": "unknown"},
{"serial": "serialB", "attr1": "value2"},
]
CURRENT_USER = [
{"username": "fvigo", "attr1": "value1"},
{"username": "lmori", "attr1": "value2"},
]
ENRICH_USER = [
{"user": "fvigo", "team": "DevRel", "manager": "unknown"},
{"user": "ibojer", "team": "DevRel"}
]
RESULT_USER = [
{"username": "fvigo", "attr1": "value1", "manager": "unknown"},
{"username": "lmori", "attr1": "value2"},
]
def test_enrich_command():
"""
Given:
- nonenriched lists of: ips, users, devices sightings
- enrichment information for ips, users, devices
When
- enriching lists of ips, users, devices
Then
- data is enriched
- enriched output is returned
"""
ip_result = ExpanseEnrichAttribution.enrich_command({
'type': 'IP',
'current': CURRENT_IP,
'enrich': ENRICH_IP,
'enrich_key': 'ipaddress',
'enrich_fields': 'provider'
})
assert ip_result.outputs == RESULT_IP
assert ip_result.outputs_key_field == "ip"
assert ip_result.outputs_prefix == "Expanse.AttributionIP"
device_result = ExpanseEnrichAttribution.enrich_command({
'type': 'Device',
'current': CURRENT_DEVICE,
'enrich': ENRICH_DEVICE,
'enrich_key': 'deviceSerial',
'enrich_fields': 'location'
})
assert device_result.outputs == RESULT_DEVICE
assert device_result.outputs_key_field == "serial"
assert device_result.outputs_prefix == "Expanse.AttributionDevice"
user_result = ExpanseEnrichAttribution.enrich_command({
'type': 'User',
'current': CURRENT_USER,
'enrich': ENRICH_USER,
'enrich_key': 'user',
'enrich_fields': 'manager'
})
assert user_result.outputs == RESULT_USER
assert user_result.outputs_key_field == "username"
assert user_result.outputs_prefix == "Expanse.AttributionUser" | 0.46393 | 0.434701 |
import click, pkg_resources, os
def fileType(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
if str(param)[-2] == 'l':
ctx.obj['filetype'] = str(param)[-4:-1]
else:
ctx.obj['filetype'] = str(param)[-5:-1]
def selectFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
ctx.obj['path'] = str(click.format_filename(value))
if ctx.obj['path'][-3:] == 'yml':
ctx.obj['filetype'] = 'yml'
elif ctx.obj['path'][-4:] == 'json':
ctx.obj['filetype'] = 'json'
else:
click.echo('[ERROR] Unsupported file type')
ctx.exit()
def editFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
if os.path.exists('prog.json'):
ctx.obj['path'] = 'prog.json'
ctx.obj['filetype'] = 'json'
elif os.path.exists('prog.yml'):
ctx.obj['path'] = 'prog.yml'
ctx.obj['filetype'] = 'yml'
else:
click.echo('[ERROR] config file not found')
ctx.exit()
click.edit(require_save=False, filename=ctx.obj['path'])
def genFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
click.echo('[ERROR] Must specify file type or file name')
ctx.exit()
if 'path' not in ctx.obj.keys():
ctx.obj['path'] = 'prog.' + ctx.obj['filetype']
elif 'filetype' not in ctx.obj.keys():
if ctx.obj['path'][-1] == 'l':
ctx.obj['filetype'] = ctx.obj['path'][-3:]
else:
ctx.obj['filetype'] = ctx.obj['path'][-4:]
buffer = pkg_resources.resource_string(
__name__, 'assets/prog.' + ctx.obj['filetype']).decode('utf-8')
with open(ctx.obj['path'], 'wt') as f:
f.write(buffer) | prog/commands.py | import click, pkg_resources, os
def fileType(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
if str(param)[-2] == 'l':
ctx.obj['filetype'] = str(param)[-4:-1]
else:
ctx.obj['filetype'] = str(param)[-5:-1]
def selectFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
ctx.obj['path'] = str(click.format_filename(value))
if ctx.obj['path'][-3:] == 'yml':
ctx.obj['filetype'] = 'yml'
elif ctx.obj['path'][-4:] == 'json':
ctx.obj['filetype'] = 'json'
else:
click.echo('[ERROR] Unsupported file type')
ctx.exit()
def editFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
ctx.obj = dict()
if os.path.exists('prog.json'):
ctx.obj['path'] = 'prog.json'
ctx.obj['filetype'] = 'json'
elif os.path.exists('prog.yml'):
ctx.obj['path'] = 'prog.yml'
ctx.obj['filetype'] = 'yml'
else:
click.echo('[ERROR] config file not found')
ctx.exit()
click.edit(require_save=False, filename=ctx.obj['path'])
def genFile(ctx, param, value):
if not value or ctx.resilient_parsing:
return value
if not isinstance(ctx.obj, dict):
click.echo('[ERROR] Must specify file type or file name')
ctx.exit()
if 'path' not in ctx.obj.keys():
ctx.obj['path'] = 'prog.' + ctx.obj['filetype']
elif 'filetype' not in ctx.obj.keys():
if ctx.obj['path'][-1] == 'l':
ctx.obj['filetype'] = ctx.obj['path'][-3:]
else:
ctx.obj['filetype'] = ctx.obj['path'][-4:]
buffer = pkg_resources.resource_string(
__name__, 'assets/prog.' + ctx.obj['filetype']).decode('utf-8')
with open(ctx.obj['path'], 'wt') as f:
f.write(buffer) | 0.231354 | 0.085862 |
import calendar
import os
import signal
import subprocess
import sys
import time
import deimos.cleanup
import deimos.config
import deimos.containerizer
import deimos.containerizer.docker
from deimos.err import Err
import deimos.flock
from deimos.logger import log
import deimos.sig
import deimos.usage
def cli(argv=None):
deimos.sig.install(lambda _: None)
if argv is None:
argv = sys.argv
sub = argv[1] if len(argv) > 1 else None
if sub in ["-h", "--help", "help"]:
print format_help()
return 0
conf = deimos.config.load_configuration()
if sub == "config":
log.info("Final configuration:")
for _, conf in conf.items():
print "%r" % conf
return 0
if sub == "locks":
deimos.flock.lock_browser(os.path.join(conf.state.root, "mesos"))
return 0
if sub == "state":
cleanup = deimos.cleanup.Cleanup(conf.state.root)
t, rm = time.time(), False
for arg in argv[2:]:
if arg == "--rm":
rm = True
continue
t = calendar.timegm(time.strptime(arg, "%Y-%m-%dT%H:%M:%SZ"))
if rm:
return cleanup.remove(t)
else:
for d in cleanup.dirs(t):
sys.stdout.write(d + "\n")
return 0
if sub not in deimos.containerizer.methods():
print >>sys.stderr, format_help()
print >>sys.stderr, "** Please specify a subcommand **".center(79)
log.error("Bad ARGV: %r" % argv[1:])
return 1
deimos.docker.options = conf.docker.argv()
containerizer = deimos.containerizer.docker.Docker(
container_settings=conf.containers,
index_settings=conf.index,
optimistic_unpack=conf.uris.unpack,
hooks=conf.hooks,
state_root=conf.state.root
)
deimos.usage.report()
try:
result = deimos.containerizer.stdio(containerizer, *argv[1:])
deimos.usage.report()
if result is not None:
if isinstance(result, bool):
return 0 if result else 1
if isinstance(result, int):
return result
if isinstance(result, str):
sys.stdout.write(result)
else:
for item in result:
sys.stdout.write(str(item) + "\n")
except Err as e:
log.error("%s.%s: %s", type(e).__module__, type(e).__name__, str(e))
return 4
except subprocess.CalledProcessError as e:
log.error(str(e))
return 4
except Exception:
log.exception("Unhandled failure in %s", sub)
return 8
return 0
def format_help():
return """
USAGE: deimos launch (--no-fork)?
deimos usage
deimos destroy
deimos wait
deimos observe <mesos-container-id>
deimos locks
deimos state
Deimos provides Mesos integration for Docker, allowing Docker to be used as
an external containerizer.
deimos launch (--no-fork)?
Launches a container and runs the executor or command specified in the
TaskInfo, passed in on standard in.
The launch subcommand always watches the launched container and logs changes
in its lifecycle. By default, it forks off a child to do the watching, as
part of the contract external containerizers have with Mesos. With
--no-fork, launch will watch the container and log in the foreground. This
can be helpful during debugging.
deimos usage
Generates a protobuf description of the resources used by the container.
deimos destroy
Shuts down the specified container.
deimos wait
Reads STDIN to find the container to watch.
deimos observe <mesos-container-id>
Observes the Mesos container ID, in a way that blocks all calls to `wait`.
It is for internal use...probably don't want to play with this one.
deimos locks
List file locks taken by Deimos, associating each file with a PID, an inode,
and a lock level. The same file may appear multiple times.
deimos state (--rm)?
List stale state directories (those with an exit file). With --rm, removes
stale states.
deimos config
Load and display the configuration.
""".strip("\n")
if __name__ == "__main__":
sys.exit(cli(sys.argv)) | deimos/__init__.py | import calendar
import os
import signal
import subprocess
import sys
import time
import deimos.cleanup
import deimos.config
import deimos.containerizer
import deimos.containerizer.docker
from deimos.err import Err
import deimos.flock
from deimos.logger import log
import deimos.sig
import deimos.usage
def cli(argv=None):
deimos.sig.install(lambda _: None)
if argv is None:
argv = sys.argv
sub = argv[1] if len(argv) > 1 else None
if sub in ["-h", "--help", "help"]:
print format_help()
return 0
conf = deimos.config.load_configuration()
if sub == "config":
log.info("Final configuration:")
for _, conf in conf.items():
print "%r" % conf
return 0
if sub == "locks":
deimos.flock.lock_browser(os.path.join(conf.state.root, "mesos"))
return 0
if sub == "state":
cleanup = deimos.cleanup.Cleanup(conf.state.root)
t, rm = time.time(), False
for arg in argv[2:]:
if arg == "--rm":
rm = True
continue
t = calendar.timegm(time.strptime(arg, "%Y-%m-%dT%H:%M:%SZ"))
if rm:
return cleanup.remove(t)
else:
for d in cleanup.dirs(t):
sys.stdout.write(d + "\n")
return 0
if sub not in deimos.containerizer.methods():
print >>sys.stderr, format_help()
print >>sys.stderr, "** Please specify a subcommand **".center(79)
log.error("Bad ARGV: %r" % argv[1:])
return 1
deimos.docker.options = conf.docker.argv()
containerizer = deimos.containerizer.docker.Docker(
container_settings=conf.containers,
index_settings=conf.index,
optimistic_unpack=conf.uris.unpack,
hooks=conf.hooks,
state_root=conf.state.root
)
deimos.usage.report()
try:
result = deimos.containerizer.stdio(containerizer, *argv[1:])
deimos.usage.report()
if result is not None:
if isinstance(result, bool):
return 0 if result else 1
if isinstance(result, int):
return result
if isinstance(result, str):
sys.stdout.write(result)
else:
for item in result:
sys.stdout.write(str(item) + "\n")
except Err as e:
log.error("%s.%s: %s", type(e).__module__, type(e).__name__, str(e))
return 4
except subprocess.CalledProcessError as e:
log.error(str(e))
return 4
except Exception:
log.exception("Unhandled failure in %s", sub)
return 8
return 0
def format_help():
return """
USAGE: deimos launch (--no-fork)?
deimos usage
deimos destroy
deimos wait
deimos observe <mesos-container-id>
deimos locks
deimos state
Deimos provides Mesos integration for Docker, allowing Docker to be used as
an external containerizer.
deimos launch (--no-fork)?
Launches a container and runs the executor or command specified in the
TaskInfo, passed in on standard in.
The launch subcommand always watches the launched container and logs changes
in its lifecycle. By default, it forks off a child to do the watching, as
part of the contract external containerizers have with Mesos. With
--no-fork, launch will watch the container and log in the foreground. This
can be helpful during debugging.
deimos usage
Generates a protobuf description of the resources used by the container.
deimos destroy
Shuts down the specified container.
deimos wait
Reads STDIN to find the container to watch.
deimos observe <mesos-container-id>
Observes the Mesos container ID, in a way that blocks all calls to `wait`.
It is for internal use...probably don't want to play with this one.
deimos locks
List file locks taken by Deimos, associating each file with a PID, an inode,
and a lock level. The same file may appear multiple times.
deimos state (--rm)?
List stale state directories (those with an exit file). With --rm, removes
stale states.
deimos config
Load and display the configuration.
""".strip("\n")
if __name__ == "__main__":
sys.exit(cli(sys.argv)) | 0.28577 | 0.117243 |
import os
import time
from modules.findlorf_main import findlorf_main
from modules.transfix_main import transfix_main
from modules.transfeat_main import transfeat_main
from lib.parsing.gtf_parsing_tools import filter_gtf_file, add_features_to_gtf
def file_exist(outfile, skip_message=True):
if os.path.exists(outfile):
print(time.asctime(), f'File already exist: {outfile}')
if skip_message:
print(time.asctime(), f'Keeping current file')
else:
print(time.asctime(), f'Overwriting file')
return True
else:
return False
def run_transuite(gtf, fasta, outpath, outname, iter_th=5, cds_th=30, pep_th=50, ptc_th=70, chimeric=None):
# 1) Remove CDS information from input annotation file
filtered_gtf = os.path.splitext(gtf)[0] + '_exons.gtf'
if not file_exist(filtered_gtf):
filtered_gtf = filter_gtf_file(gtf)
# 2) Run FindLORF
tfind_name = outname.replace(".gtf", "") + "_longorf"
tfind_folder = os.path.join(outpath, tfind_name)
transfind_gtf = os.path.join(tfind_folder, f"{tfind_name}.gtf")
orf_index_filename = os.path.splitext(os.path.basename(filtered_gtf))[0] + "_ORF_index.json"
orf_index_file = os.path.join(tfind_folder, orf_index_filename)
if not file_exist(transfind_gtf) or not file_exist(orf_index_file):
transfind_gtf, orf_index_file = findlorf_main(filtered_gtf, fasta, outpath, outname, cds_th=cds_th,
filter_gtf=False)
# 3) Run TransFix
tfix_name = outname.replace(".gtf", "") + "_transfix"
tfix_folder = os.path.join(outpath, tfix_name)
transfix_gtf = os.path.join(tfix_folder, f"{tfix_name}.gtf")
if not file_exist(transfix_gtf):
transfix_gtf = transfix_main(transfind_gtf, fasta, outpath, outname, iter_th=iter_th, chimeric=chimeric)
# 3.5) Add extra features to the annotation
transfix_gtf = add_features_to_gtf(transfix_gtf)
# 4) Run TransFeat
tfeat_name = outname.replace(".gtf", "") + "_transfeat"
tfeat_folder = os.path.join(outpath, tfeat_name)
transfeat_table = os.path.join(tfeat_folder, f"{tfeat_name}.csv")
if not file_exist(transfeat_table):
transfeat_table = transfeat_main(transfix_gtf, fasta, outpath, outname, pep_len=pep_th, ptc_len=ptc_th,
uorf_len=10, sj_dist=50, utr3_len=350, orf_index=orf_index_file)
print(time.asctime(), f'Annotation file with fixed start-codon coordinates: {transfix_gtf}')
print(time.asctime(), f'Table with coding characterization of transcripts: {transfeat_table}') | modules/auto_main.py | import os
import time
from modules.findlorf_main import findlorf_main
from modules.transfix_main import transfix_main
from modules.transfeat_main import transfeat_main
from lib.parsing.gtf_parsing_tools import filter_gtf_file, add_features_to_gtf
def file_exist(outfile, skip_message=True):
if os.path.exists(outfile):
print(time.asctime(), f'File already exist: {outfile}')
if skip_message:
print(time.asctime(), f'Keeping current file')
else:
print(time.asctime(), f'Overwriting file')
return True
else:
return False
def run_transuite(gtf, fasta, outpath, outname, iter_th=5, cds_th=30, pep_th=50, ptc_th=70, chimeric=None):
# 1) Remove CDS information from input annotation file
filtered_gtf = os.path.splitext(gtf)[0] + '_exons.gtf'
if not file_exist(filtered_gtf):
filtered_gtf = filter_gtf_file(gtf)
# 2) Run FindLORF
tfind_name = outname.replace(".gtf", "") + "_longorf"
tfind_folder = os.path.join(outpath, tfind_name)
transfind_gtf = os.path.join(tfind_folder, f"{tfind_name}.gtf")
orf_index_filename = os.path.splitext(os.path.basename(filtered_gtf))[0] + "_ORF_index.json"
orf_index_file = os.path.join(tfind_folder, orf_index_filename)
if not file_exist(transfind_gtf) or not file_exist(orf_index_file):
transfind_gtf, orf_index_file = findlorf_main(filtered_gtf, fasta, outpath, outname, cds_th=cds_th,
filter_gtf=False)
# 3) Run TransFix
tfix_name = outname.replace(".gtf", "") + "_transfix"
tfix_folder = os.path.join(outpath, tfix_name)
transfix_gtf = os.path.join(tfix_folder, f"{tfix_name}.gtf")
if not file_exist(transfix_gtf):
transfix_gtf = transfix_main(transfind_gtf, fasta, outpath, outname, iter_th=iter_th, chimeric=chimeric)
# 3.5) Add extra features to the annotation
transfix_gtf = add_features_to_gtf(transfix_gtf)
# 4) Run TransFeat
tfeat_name = outname.replace(".gtf", "") + "_transfeat"
tfeat_folder = os.path.join(outpath, tfeat_name)
transfeat_table = os.path.join(tfeat_folder, f"{tfeat_name}.csv")
if not file_exist(transfeat_table):
transfeat_table = transfeat_main(transfix_gtf, fasta, outpath, outname, pep_len=pep_th, ptc_len=ptc_th,
uorf_len=10, sj_dist=50, utr3_len=350, orf_index=orf_index_file)
print(time.asctime(), f'Annotation file with fixed start-codon coordinates: {transfix_gtf}')
print(time.asctime(), f'Table with coding characterization of transcripts: {transfeat_table}') | 0.274254 | 0.090213 |
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class EnergyManagementSystemSensor(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Sensor`
Declares EMS variable as a sensor
a list of output variables and meters that can be reported are available after a run on
the report (.rdd) or meter dictionary file (.mdd) if the Output:VariableDictionary
has been requested.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'output:variable or output:meter index key name',
{'name': u'Output:Variable or Output:Meter Index Key Name',
'pyname': u'outputvariable_or_outputmeter_index_key_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'output:variable or output:meter name',
{'name': u'Output:Variable or Output:Meter Name',
'pyname': u'outputvariable_or_outputmeter_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'external-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:Sensor',
'pyname': u'EnergyManagementSystemSensor',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def outputvariable_or_outputmeter_index_key_name(self):
"""field `Output:Variable or Output:Meter Index Key Name`
Args:
value (str): value for IDD Field `Output:Variable or Output:Meter Index Key Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outputvariable_or_outputmeter_index_key_name` or None if not set
"""
return self["Output:Variable or Output:Meter Index Key Name"]
@outputvariable_or_outputmeter_index_key_name.setter
def outputvariable_or_outputmeter_index_key_name(self, value=None):
""" Corresponds to IDD field `Output:Variable or Output:Meter Index Key Name`
"""
self["Output:Variable or Output:Meter Index Key Name"] = value
@property
def outputvariable_or_outputmeter_name(self):
"""field `Output:Variable or Output:Meter Name`
Args:
value (str): value for IDD Field `Output:Variable or Output:Meter Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outputvariable_or_outputmeter_name` or None if not set
"""
return self["Output:Variable or Output:Meter Name"]
@outputvariable_or_outputmeter_name.setter
def outputvariable_or_outputmeter_name(self, value=None):
""" Corresponds to IDD field `Output:Variable or Output:Meter Name`
"""
self["Output:Variable or Output:Meter Name"] = value
class EnergyManagementSystemActuator(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Actuator`
Hardware portion of EMS used to set up actuators in the model
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component unique name',
{'name': u'Actuated Component Unique Name',
'pyname': u'actuated_component_unique_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component type',
{'name': u'Actuated Component Type',
'pyname': u'actuated_component_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component control type',
{'name': u'Actuated Component Control Type',
'pyname': u'actuated_component_control_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 4,
'name': u'EnergyManagementSystem:Actuator',
'pyname': u'EnergyManagementSystemActuator',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def actuated_component_unique_name(self):
"""field `Actuated Component Unique Name`
Args:
value (str): value for IDD Field `Actuated Component Unique Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_unique_name` or None if not set
"""
return self["Actuated Component Unique Name"]
@actuated_component_unique_name.setter
def actuated_component_unique_name(self, value=None):
"""Corresponds to IDD field `Actuated Component Unique Name`"""
self["Actuated Component Unique Name"] = value
@property
def actuated_component_type(self):
"""field `Actuated Component Type`
Args:
value (str): value for IDD Field `Actuated Component Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_type` or None if not set
"""
return self["Actuated Component Type"]
@actuated_component_type.setter
def actuated_component_type(self, value=None):
"""Corresponds to IDD field `Actuated Component Type`"""
self["Actuated Component Type"] = value
@property
def actuated_component_control_type(self):
"""field `Actuated Component Control Type`
Args:
value (str): value for IDD Field `Actuated Component Control Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_control_type` or None if not set
"""
return self["Actuated Component Control Type"]
@actuated_component_control_type.setter
def actuated_component_control_type(self, value=None):
"""Corresponds to IDD field `Actuated Component Control Type`"""
self["Actuated Component Control Type"] = value
class EnergyManagementSystemProgramCallingManager(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:ProgramCallingManager`
Input EMS program. a program needs a name
a description of when it should be called
and then lines of program code for EMS Runtime language
"""
_schema = {'extensible-fields': OrderedDict([(u'program name 1',
{'name': u'Program Name 1',
'pyname': u'program_name_1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'energyplus model calling point',
{'name': u'EnergyPlus Model Calling Point',
'pyname': u'energyplus_model_calling_point',
'required-field': False,
'autosizable': False,
'accepted-values': [u'BeginNewEnvironment',
u'AfterNewEnvironmentWarmUpIsComplete',
u'BeginTimestepBeforePredictor',
u'AfterPredictorBeforeHVACManagers',
u'AfterPredictorAfterHVACManagers',
u'InsideHVACSystemIterationLoop',
u'EndOfZoneTimestepBeforeZoneReporting',
u'EndOfZoneTimestepAfterZoneReporting',
u'EndOfSystemTimestepBeforeHVACReporting',
u'EndOfSystemTimestepAfterHVACReporting',
u'EndOfZoneSizing',
u'EndOfSystemSizing',
u'AfterComponentInputReadIn',
u'UserDefinedComponentModel',
u'UnitarySystemSizing'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:ProgramCallingManager',
'pyname': u'EnergyManagementSystemProgramCallingManager',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def energyplus_model_calling_point(self):
"""field `EnergyPlus Model Calling Point`
Args:
value (str): value for IDD Field `EnergyPlus Model Calling Point`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `energyplus_model_calling_point` or None if not set
"""
return self["EnergyPlus Model Calling Point"]
@energyplus_model_calling_point.setter
def energyplus_model_calling_point(self, value=None):
"""Corresponds to IDD field `EnergyPlus Model Calling Point`"""
self["EnergyPlus Model Calling Point"] = value
def add_extensible(self,
program_name_1=None,
):
"""Add values for extensible fields.
Args:
program_name_1 (str): value for IDD Field `Program Name 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_name_1 = self.check_value("Program Name 1", program_name_1)
vals.append(program_name_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemOutputVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:OutputVariable`
This object sets up an EnergyPlus output variable from an Erl variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'type of data in variable',
{'name': u'Type of Data in Variable',
'pyname': u'type_of_data_in_variable',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Averaged',
u'Summed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'update frequency',
{'name': u'Update Frequency',
'pyname': u'update_frequency',
'required-field': True,
'autosizable': False,
'accepted-values': [u'ZoneTimestep',
u'SystemTimestep'],
'autocalculatable': False,
'type': 'alpha'}),
(u'ems program or subroutine name',
{'name': u'EMS Program or Subroutine Name',
'pyname': u'ems_program_or_subroutine_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'units',
{'name': u'Units',
'pyname': u'units',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 4,
'name': u'EnergyManagementSystem:OutputVariable',
'pyname': u'EnergyManagementSystemOutputVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be an acceptable EMS variable
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def type_of_data_in_variable(self):
"""field `Type of Data in Variable`
Args:
value (str): value for IDD Field `Type of Data in Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `type_of_data_in_variable` or None if not set
"""
return self["Type of Data in Variable"]
@type_of_data_in_variable.setter
def type_of_data_in_variable(self, value=None):
"""Corresponds to IDD field `Type of Data in Variable`"""
self["Type of Data in Variable"] = value
@property
def update_frequency(self):
"""field `Update Frequency`
Args:
value (str): value for IDD Field `Update Frequency`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `update_frequency` or None if not set
"""
return self["Update Frequency"]
@update_frequency.setter
def update_frequency(self, value=None):
"""Corresponds to IDD field `Update Frequency`"""
self["Update Frequency"] = value
@property
def ems_program_or_subroutine_name(self):
"""field `EMS Program or Subroutine Name`
| optional for global scope variables, required for local scope variables
Args:
value (str): value for IDD Field `EMS Program or Subroutine Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_program_or_subroutine_name` or None if not set
"""
return self["EMS Program or Subroutine Name"]
@ems_program_or_subroutine_name.setter
def ems_program_or_subroutine_name(self, value=None):
"""Corresponds to IDD field `EMS Program or Subroutine Name`"""
self["EMS Program or Subroutine Name"] = value
@property
def units(self):
"""field `Units`
| optional but will result in dimensionless units for blank
| EnergyPlus units are standard SI units
Args:
value (str): value for IDD Field `Units`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `units` or None if not set
"""
return self["Units"]
@units.setter
def units(self, value=None):
"""Corresponds to IDD field `Units`"""
self["Units"] = value
class EnergyManagementSystemMeteredOutputVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:MeteredOutputVariable`
This object sets up an EnergyPlus output variable from an Erl variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'update frequency',
{'name': u'Update Frequency',
'pyname': u'update_frequency',
'required-field': True,
'autosizable': False,
'accepted-values': [u'ZoneTimestep',
u'SystemTimestep'],
'autocalculatable': False,
'type': 'alpha'}),
(u'ems program or subroutine name',
{'name': u'EMS Program or Subroutine Name',
'pyname': u'ems_program_or_subroutine_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'resource type',
{'name': u'Resource Type',
'pyname': u'resource_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Electricity',
u'NaturalGas',
u'Gasoline',
u'Diesel',
u'Coal',
u'FuelOil#1',
u'FuelOil#2',
u'Propane',
u'OtherFuel1',
u'OtherFuel2',
u'WaterUse',
u'OnSiteWaterProduced',
u'MainsWaterSupply',
u'RainWaterCollected',
u'WellWaterDrawn',
u'CondensateWaterCollected',
u'EnergyTransfer',
u'Steam',
u'DistrictCooling',
u'DistrictHeating',
u'ElectricityProducedOnSite',
u'SolarWaterHeating',
u'SolarAirHeating'],
'autocalculatable': False,
'type': 'alpha'}),
(u'group type',
{'name': u'Group Type',
'pyname': u'group_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Building',
u'HVAC',
u'Plant',
u'System'],
'autocalculatable': False,
'type': 'alpha'}),
(u'end-use category',
{'name': u'End-Use Category',
'pyname': u'enduse_category',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Heating',
u'Cooling',
u'InteriorLights',
u'ExteriorLights',
u'InteriorEquipment',
u'ExteriorEquipment',
u'Fans',
u'Pumps',
u'HeatRejection',
u'Humidifier',
u'HeatRecovery',
u'WaterSystems',
u'Refrigeration',
u'OnSiteGeneration',
u'HeatingCoils',
u'CoolingCoils',
u'Chillers',
u'Boilers',
u'Baseboard',
u'HeatRecoveryForCooling',
u'HeatRecoveryForHeating'],
'autocalculatable': False,
'type': 'alpha'}),
(u'end-use subcategory',
{'name': u'End-Use Subcategory',
'pyname': u'enduse_subcategory',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'units',
{'name': u'Units',
'pyname': u'units',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 7,
'name': u'EnergyManagementSystem:MeteredOutputVariable',
'pyname': u'EnergyManagementSystemMeteredOutputVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be an acceptable EMS variable, no spaces
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def update_frequency(self):
"""field `Update Frequency`
Args:
value (str): value for IDD Field `Update Frequency`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `update_frequency` or None if not set
"""
return self["Update Frequency"]
@update_frequency.setter
def update_frequency(self, value=None):
"""Corresponds to IDD field `Update Frequency`"""
self["Update Frequency"] = value
@property
def ems_program_or_subroutine_name(self):
"""field `EMS Program or Subroutine Name`
| optional for global scope variables, required for local scope variables
Args:
value (str): value for IDD Field `EMS Program or Subroutine Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_program_or_subroutine_name` or None if not set
"""
return self["EMS Program or Subroutine Name"]
@ems_program_or_subroutine_name.setter
def ems_program_or_subroutine_name(self, value=None):
"""Corresponds to IDD field `EMS Program or Subroutine Name`"""
self["EMS Program or Subroutine Name"] = value
@property
def resource_type(self):
"""field `Resource Type`
| choose the type of fuel, water, electricity, pollution or heat rate that should be metered.
Args:
value (str): value for IDD Field `Resource Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `resource_type` or None if not set
"""
return self["Resource Type"]
@resource_type.setter
def resource_type(self, value=None):
"""Corresponds to IDD field `Resource Type`"""
self["Resource Type"] = value
@property
def group_type(self):
"""field `Group Type`
| choose a general classification, building (internal services), HVAC (air systems), or plant (hydronic systems), or system
Args:
value (str): value for IDD Field `Group Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `group_type` or None if not set
"""
return self["Group Type"]
@group_type.setter
def group_type(self, value=None):
"""Corresponds to IDD field `Group Type`"""
self["Group Type"] = value
@property
def enduse_category(self):
"""field `End-Use Category`
| choose how the metered output should be classified for end-use category
Args:
value (str): value for IDD Field `End-Use Category`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `enduse_category` or None if not set
"""
return self["End-Use Category"]
@enduse_category.setter
def enduse_category(self, value=None):
""" Corresponds to IDD field `End-Use Category`
"""
self["End-Use Category"] = value
@property
def enduse_subcategory(self):
"""field `End-Use Subcategory`
| enter a user-defined subcategory for this metered output
Args:
value (str): value for IDD Field `End-Use Subcategory`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `enduse_subcategory` or None if not set
"""
return self["End-Use Subcategory"]
@enduse_subcategory.setter
def enduse_subcategory(self, value=None):
""" Corresponds to IDD field `End-Use Subcategory`
"""
self["End-Use Subcategory"] = value
@property
def units(self):
"""field `Units`
| optional but will result in dimensionless units for blank
| EnergyPlus units are standard SI units
Args:
value (str): value for IDD Field `Units`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `units` or None if not set
"""
return self["Units"]
@units.setter
def units(self, value=None):
"""Corresponds to IDD field `Units`"""
self["Units"] = value
class EnergyManagementSystemTrendVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:TrendVariable`
This object sets up an EMS trend variable from an Erl variable
A trend variable logs values across timesteps
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'number of timesteps to be logged',
{'name': u'Number of Timesteps to be Logged',
'pyname': u'number_of_timesteps_to_be_logged',
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:TrendVariable',
'pyname': u'EnergyManagementSystemTrendVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be a global scope EMS variable
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def number_of_timesteps_to_be_logged(self):
"""field `Number of Timesteps to be Logged`
| value >= 1
Args:
value (int): value for IDD Field `Number of Timesteps to be Logged`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `number_of_timesteps_to_be_logged` or None if not set
"""
return self["Number of Timesteps to be Logged"]
@number_of_timesteps_to_be_logged.setter
def number_of_timesteps_to_be_logged(self, value=None):
"""Corresponds to IDD field `Number of Timesteps to be Logged`"""
self["Number of Timesteps to be Logged"] = value
class EnergyManagementSystemInternalVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:InternalVariable`
Declares EMS variable as an internal data variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'internal data index key name',
{'name': u'Internal Data Index Key Name',
'pyname': u'internal_data_index_key_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'internal data type',
{'name': u'Internal Data Type',
'pyname': u'internal_data_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:InternalVariable',
'pyname': u'EnergyManagementSystemInternalVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def internal_data_index_key_name(self):
"""field `Internal Data Index Key Name`
Args:
value (str): value for IDD Field `Internal Data Index Key Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `internal_data_index_key_name` or None if not set
"""
return self["Internal Data Index Key Name"]
@internal_data_index_key_name.setter
def internal_data_index_key_name(self, value=None):
"""Corresponds to IDD field `Internal Data Index Key Name`"""
self["Internal Data Index Key Name"] = value
@property
def internal_data_type(self):
"""field `Internal Data Type`
Args:
value (str): value for IDD Field `Internal Data Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `internal_data_type` or None if not set
"""
return self["Internal Data Type"]
@internal_data_type.setter
def internal_data_type(self, value=None):
"""Corresponds to IDD field `Internal Data Type`"""
self["Internal Data Type"] = value
class EnergyManagementSystemCurveOrTableIndexVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:CurveOrTableIndexVariable`
Declares EMS variable that identifies a curve or table
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'curve or table object name',
{'name': u'Curve or Table Object Name',
'pyname': u'curve_or_table_object_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:CurveOrTableIndexVariable',
'pyname': u'EnergyManagementSystemCurveOrTableIndexVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def curve_or_table_object_name(self):
"""field `Curve or Table Object Name`
Args:
value (str): value for IDD Field `Curve or Table Object Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `curve_or_table_object_name` or None if not set
"""
return self["Curve or Table Object Name"]
@curve_or_table_object_name.setter
def curve_or_table_object_name(self, value=None):
"""Corresponds to IDD field `Curve or Table Object Name`"""
self["Curve or Table Object Name"] = value
class EnergyManagementSystemConstructionIndexVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:ConstructionIndexVariable`
Declares EMS variable that identifies a construction
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'construction object name',
{'name': u'Construction Object Name',
'pyname': u'construction_object_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:ConstructionIndexVariable',
'pyname': u'EnergyManagementSystemConstructionIndexVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def construction_object_name(self):
"""field `Construction Object Name`
Args:
value (str): value for IDD Field `Construction Object Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `construction_object_name` or None if not set
"""
return self["Construction Object Name"]
@construction_object_name.setter
def construction_object_name(self, value=None):
"""Corresponds to IDD field `Construction Object Name`"""
self["Construction Object Name"] = value
class EnergyManagementSystemProgram(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Program`
This input defines an Erl program
Each field after the name is a line of EMS Runtime Language
"""
_schema = {'extensible-fields': OrderedDict([(u'program line 1',
{'name': u'Program Line 1',
'pyname': u'program_line_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:Program',
'pyname': u'EnergyManagementSystemProgram',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
program_line_1=None,
):
"""Add values for extensible fields.
Args:
program_line_1 (str): value for IDD Field `Program Line 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_line_1 = self.check_value("Program Line 1", program_line_1)
vals.append(program_line_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemSubroutine(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Subroutine`
This input defines an Erl program subroutine
Each field after the name is a line of EMS Runtime Language
"""
_schema = {'extensible-fields': OrderedDict([(u'program line',
{'name': u'Program Line',
'pyname': u'program_line',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:Subroutine',
'pyname': u'EnergyManagementSystemSubroutine',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
program_line=None,
):
"""Add values for extensible fields.
Args:
program_line (str): value for IDD Field `Program Line`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_line = self.check_value("Program Line", program_line)
vals.append(program_line)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemGlobalVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:GlobalVariable`
Declares Erl variable as having global scope
No spaces allowed in names used for Erl variables
"""
_schema = {'extensible-fields': OrderedDict([(u'erl variable 1 name',
{'name': u'Erl Variable 1 Name',
'pyname': u'erl_variable_1_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict(),
'format': None,
'group': u'Energy Management System',
'min-fields': 1,
'name': u'EnergyManagementSystem:GlobalVariable',
'pyname': u'EnergyManagementSystemGlobalVariable',
'required-object': False,
'unique-object': False}
def add_extensible(self,
erl_variable_1_name=None,
):
"""Add values for extensible fields.
Args:
erl_variable_1_name (str): value for IDD Field `Erl Variable 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
erl_variable_1_name = self.check_value(
"Erl Variable 1 Name",
erl_variable_1_name)
vals.append(erl_variable_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext) | pyidf/energy_management_system.py | from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class EnergyManagementSystemSensor(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Sensor`
Declares EMS variable as a sensor
a list of output variables and meters that can be reported are available after a run on
the report (.rdd) or meter dictionary file (.mdd) if the Output:VariableDictionary
has been requested.
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'output:variable or output:meter index key name',
{'name': u'Output:Variable or Output:Meter Index Key Name',
'pyname': u'outputvariable_or_outputmeter_index_key_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'output:variable or output:meter name',
{'name': u'Output:Variable or Output:Meter Name',
'pyname': u'outputvariable_or_outputmeter_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'external-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:Sensor',
'pyname': u'EnergyManagementSystemSensor',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def outputvariable_or_outputmeter_index_key_name(self):
"""field `Output:Variable or Output:Meter Index Key Name`
Args:
value (str): value for IDD Field `Output:Variable or Output:Meter Index Key Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outputvariable_or_outputmeter_index_key_name` or None if not set
"""
return self["Output:Variable or Output:Meter Index Key Name"]
@outputvariable_or_outputmeter_index_key_name.setter
def outputvariable_or_outputmeter_index_key_name(self, value=None):
""" Corresponds to IDD field `Output:Variable or Output:Meter Index Key Name`
"""
self["Output:Variable or Output:Meter Index Key Name"] = value
@property
def outputvariable_or_outputmeter_name(self):
"""field `Output:Variable or Output:Meter Name`
Args:
value (str): value for IDD Field `Output:Variable or Output:Meter Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `outputvariable_or_outputmeter_name` or None if not set
"""
return self["Output:Variable or Output:Meter Name"]
@outputvariable_or_outputmeter_name.setter
def outputvariable_or_outputmeter_name(self, value=None):
""" Corresponds to IDD field `Output:Variable or Output:Meter Name`
"""
self["Output:Variable or Output:Meter Name"] = value
class EnergyManagementSystemActuator(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Actuator`
Hardware portion of EMS used to set up actuators in the model
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component unique name',
{'name': u'Actuated Component Unique Name',
'pyname': u'actuated_component_unique_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component type',
{'name': u'Actuated Component Type',
'pyname': u'actuated_component_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'actuated component control type',
{'name': u'Actuated Component Control Type',
'pyname': u'actuated_component_control_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 4,
'name': u'EnergyManagementSystem:Actuator',
'pyname': u'EnergyManagementSystemActuator',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def actuated_component_unique_name(self):
"""field `Actuated Component Unique Name`
Args:
value (str): value for IDD Field `Actuated Component Unique Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_unique_name` or None if not set
"""
return self["Actuated Component Unique Name"]
@actuated_component_unique_name.setter
def actuated_component_unique_name(self, value=None):
"""Corresponds to IDD field `Actuated Component Unique Name`"""
self["Actuated Component Unique Name"] = value
@property
def actuated_component_type(self):
"""field `Actuated Component Type`
Args:
value (str): value for IDD Field `Actuated Component Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_type` or None if not set
"""
return self["Actuated Component Type"]
@actuated_component_type.setter
def actuated_component_type(self, value=None):
"""Corresponds to IDD field `Actuated Component Type`"""
self["Actuated Component Type"] = value
@property
def actuated_component_control_type(self):
"""field `Actuated Component Control Type`
Args:
value (str): value for IDD Field `Actuated Component Control Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `actuated_component_control_type` or None if not set
"""
return self["Actuated Component Control Type"]
@actuated_component_control_type.setter
def actuated_component_control_type(self, value=None):
"""Corresponds to IDD field `Actuated Component Control Type`"""
self["Actuated Component Control Type"] = value
class EnergyManagementSystemProgramCallingManager(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:ProgramCallingManager`
Input EMS program. a program needs a name
a description of when it should be called
and then lines of program code for EMS Runtime language
"""
_schema = {'extensible-fields': OrderedDict([(u'program name 1',
{'name': u'Program Name 1',
'pyname': u'program_name_1',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'energyplus model calling point',
{'name': u'EnergyPlus Model Calling Point',
'pyname': u'energyplus_model_calling_point',
'required-field': False,
'autosizable': False,
'accepted-values': [u'BeginNewEnvironment',
u'AfterNewEnvironmentWarmUpIsComplete',
u'BeginTimestepBeforePredictor',
u'AfterPredictorBeforeHVACManagers',
u'AfterPredictorAfterHVACManagers',
u'InsideHVACSystemIterationLoop',
u'EndOfZoneTimestepBeforeZoneReporting',
u'EndOfZoneTimestepAfterZoneReporting',
u'EndOfSystemTimestepBeforeHVACReporting',
u'EndOfSystemTimestepAfterHVACReporting',
u'EndOfZoneSizing',
u'EndOfSystemSizing',
u'AfterComponentInputReadIn',
u'UserDefinedComponentModel',
u'UnitarySystemSizing'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:ProgramCallingManager',
'pyname': u'EnergyManagementSystemProgramCallingManager',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def energyplus_model_calling_point(self):
"""field `EnergyPlus Model Calling Point`
Args:
value (str): value for IDD Field `EnergyPlus Model Calling Point`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `energyplus_model_calling_point` or None if not set
"""
return self["EnergyPlus Model Calling Point"]
@energyplus_model_calling_point.setter
def energyplus_model_calling_point(self, value=None):
"""Corresponds to IDD field `EnergyPlus Model Calling Point`"""
self["EnergyPlus Model Calling Point"] = value
def add_extensible(self,
program_name_1=None,
):
"""Add values for extensible fields.
Args:
program_name_1 (str): value for IDD Field `Program Name 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_name_1 = self.check_value("Program Name 1", program_name_1)
vals.append(program_name_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemOutputVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:OutputVariable`
This object sets up an EnergyPlus output variable from an Erl variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'type of data in variable',
{'name': u'Type of Data in Variable',
'pyname': u'type_of_data_in_variable',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Averaged',
u'Summed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'update frequency',
{'name': u'Update Frequency',
'pyname': u'update_frequency',
'required-field': True,
'autosizable': False,
'accepted-values': [u'ZoneTimestep',
u'SystemTimestep'],
'autocalculatable': False,
'type': 'alpha'}),
(u'ems program or subroutine name',
{'name': u'EMS Program or Subroutine Name',
'pyname': u'ems_program_or_subroutine_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'units',
{'name': u'Units',
'pyname': u'units',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 4,
'name': u'EnergyManagementSystem:OutputVariable',
'pyname': u'EnergyManagementSystemOutputVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be an acceptable EMS variable
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def type_of_data_in_variable(self):
"""field `Type of Data in Variable`
Args:
value (str): value for IDD Field `Type of Data in Variable`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `type_of_data_in_variable` or None if not set
"""
return self["Type of Data in Variable"]
@type_of_data_in_variable.setter
def type_of_data_in_variable(self, value=None):
"""Corresponds to IDD field `Type of Data in Variable`"""
self["Type of Data in Variable"] = value
@property
def update_frequency(self):
"""field `Update Frequency`
Args:
value (str): value for IDD Field `Update Frequency`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `update_frequency` or None if not set
"""
return self["Update Frequency"]
@update_frequency.setter
def update_frequency(self, value=None):
"""Corresponds to IDD field `Update Frequency`"""
self["Update Frequency"] = value
@property
def ems_program_or_subroutine_name(self):
"""field `EMS Program or Subroutine Name`
| optional for global scope variables, required for local scope variables
Args:
value (str): value for IDD Field `EMS Program or Subroutine Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_program_or_subroutine_name` or None if not set
"""
return self["EMS Program or Subroutine Name"]
@ems_program_or_subroutine_name.setter
def ems_program_or_subroutine_name(self, value=None):
"""Corresponds to IDD field `EMS Program or Subroutine Name`"""
self["EMS Program or Subroutine Name"] = value
@property
def units(self):
"""field `Units`
| optional but will result in dimensionless units for blank
| EnergyPlus units are standard SI units
Args:
value (str): value for IDD Field `Units`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `units` or None if not set
"""
return self["Units"]
@units.setter
def units(self, value=None):
"""Corresponds to IDD field `Units`"""
self["Units"] = value
class EnergyManagementSystemMeteredOutputVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:MeteredOutputVariable`
This object sets up an EnergyPlus output variable from an Erl variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'update frequency',
{'name': u'Update Frequency',
'pyname': u'update_frequency',
'required-field': True,
'autosizable': False,
'accepted-values': [u'ZoneTimestep',
u'SystemTimestep'],
'autocalculatable': False,
'type': 'alpha'}),
(u'ems program or subroutine name',
{'name': u'EMS Program or Subroutine Name',
'pyname': u'ems_program_or_subroutine_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'resource type',
{'name': u'Resource Type',
'pyname': u'resource_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Electricity',
u'NaturalGas',
u'Gasoline',
u'Diesel',
u'Coal',
u'FuelOil#1',
u'FuelOil#2',
u'Propane',
u'OtherFuel1',
u'OtherFuel2',
u'WaterUse',
u'OnSiteWaterProduced',
u'MainsWaterSupply',
u'RainWaterCollected',
u'WellWaterDrawn',
u'CondensateWaterCollected',
u'EnergyTransfer',
u'Steam',
u'DistrictCooling',
u'DistrictHeating',
u'ElectricityProducedOnSite',
u'SolarWaterHeating',
u'SolarAirHeating'],
'autocalculatable': False,
'type': 'alpha'}),
(u'group type',
{'name': u'Group Type',
'pyname': u'group_type',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Building',
u'HVAC',
u'Plant',
u'System'],
'autocalculatable': False,
'type': 'alpha'}),
(u'end-use category',
{'name': u'End-Use Category',
'pyname': u'enduse_category',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Heating',
u'Cooling',
u'InteriorLights',
u'ExteriorLights',
u'InteriorEquipment',
u'ExteriorEquipment',
u'Fans',
u'Pumps',
u'HeatRejection',
u'Humidifier',
u'HeatRecovery',
u'WaterSystems',
u'Refrigeration',
u'OnSiteGeneration',
u'HeatingCoils',
u'CoolingCoils',
u'Chillers',
u'Boilers',
u'Baseboard',
u'HeatRecoveryForCooling',
u'HeatRecoveryForHeating'],
'autocalculatable': False,
'type': 'alpha'}),
(u'end-use subcategory',
{'name': u'End-Use Subcategory',
'pyname': u'enduse_subcategory',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'units',
{'name': u'Units',
'pyname': u'units',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 7,
'name': u'EnergyManagementSystem:MeteredOutputVariable',
'pyname': u'EnergyManagementSystemMeteredOutputVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be an acceptable EMS variable, no spaces
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def update_frequency(self):
"""field `Update Frequency`
Args:
value (str): value for IDD Field `Update Frequency`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `update_frequency` or None if not set
"""
return self["Update Frequency"]
@update_frequency.setter
def update_frequency(self, value=None):
"""Corresponds to IDD field `Update Frequency`"""
self["Update Frequency"] = value
@property
def ems_program_or_subroutine_name(self):
"""field `EMS Program or Subroutine Name`
| optional for global scope variables, required for local scope variables
Args:
value (str): value for IDD Field `EMS Program or Subroutine Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_program_or_subroutine_name` or None if not set
"""
return self["EMS Program or Subroutine Name"]
@ems_program_or_subroutine_name.setter
def ems_program_or_subroutine_name(self, value=None):
"""Corresponds to IDD field `EMS Program or Subroutine Name`"""
self["EMS Program or Subroutine Name"] = value
@property
def resource_type(self):
"""field `Resource Type`
| choose the type of fuel, water, electricity, pollution or heat rate that should be metered.
Args:
value (str): value for IDD Field `Resource Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `resource_type` or None if not set
"""
return self["Resource Type"]
@resource_type.setter
def resource_type(self, value=None):
"""Corresponds to IDD field `Resource Type`"""
self["Resource Type"] = value
@property
def group_type(self):
"""field `Group Type`
| choose a general classification, building (internal services), HVAC (air systems), or plant (hydronic systems), or system
Args:
value (str): value for IDD Field `Group Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `group_type` or None if not set
"""
return self["Group Type"]
@group_type.setter
def group_type(self, value=None):
"""Corresponds to IDD field `Group Type`"""
self["Group Type"] = value
@property
def enduse_category(self):
"""field `End-Use Category`
| choose how the metered output should be classified for end-use category
Args:
value (str): value for IDD Field `End-Use Category`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `enduse_category` or None if not set
"""
return self["End-Use Category"]
@enduse_category.setter
def enduse_category(self, value=None):
""" Corresponds to IDD field `End-Use Category`
"""
self["End-Use Category"] = value
@property
def enduse_subcategory(self):
"""field `End-Use Subcategory`
| enter a user-defined subcategory for this metered output
Args:
value (str): value for IDD Field `End-Use Subcategory`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `enduse_subcategory` or None if not set
"""
return self["End-Use Subcategory"]
@enduse_subcategory.setter
def enduse_subcategory(self, value=None):
""" Corresponds to IDD field `End-Use Subcategory`
"""
self["End-Use Subcategory"] = value
@property
def units(self):
"""field `Units`
| optional but will result in dimensionless units for blank
| EnergyPlus units are standard SI units
Args:
value (str): value for IDD Field `Units`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `units` or None if not set
"""
return self["Units"]
@units.setter
def units(self, value=None):
"""Corresponds to IDD field `Units`"""
self["Units"] = value
class EnergyManagementSystemTrendVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:TrendVariable`
This object sets up an EMS trend variable from an Erl variable
A trend variable logs values across timesteps
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'ems variable name',
{'name': u'EMS Variable Name',
'pyname': u'ems_variable_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'number of timesteps to be logged',
{'name': u'Number of Timesteps to be Logged',
'pyname': u'number_of_timesteps_to_be_logged',
'required-field': True,
'autosizable': False,
'minimum': 1,
'autocalculatable': False,
'type': u'integer'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:TrendVariable',
'pyname': u'EnergyManagementSystemTrendVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def ems_variable_name(self):
"""field `EMS Variable Name`
| must be a global scope EMS variable
Args:
value (str): value for IDD Field `EMS Variable Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `ems_variable_name` or None if not set
"""
return self["EMS Variable Name"]
@ems_variable_name.setter
def ems_variable_name(self, value=None):
"""Corresponds to IDD field `EMS Variable Name`"""
self["EMS Variable Name"] = value
@property
def number_of_timesteps_to_be_logged(self):
"""field `Number of Timesteps to be Logged`
| value >= 1
Args:
value (int): value for IDD Field `Number of Timesteps to be Logged`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `number_of_timesteps_to_be_logged` or None if not set
"""
return self["Number of Timesteps to be Logged"]
@number_of_timesteps_to_be_logged.setter
def number_of_timesteps_to_be_logged(self, value=None):
"""Corresponds to IDD field `Number of Timesteps to be Logged`"""
self["Number of Timesteps to be Logged"] = value
class EnergyManagementSystemInternalVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:InternalVariable`
Declares EMS variable as an internal data variable
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'internal data index key name',
{'name': u'Internal Data Index Key Name',
'pyname': u'internal_data_index_key_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'internal data type',
{'name': u'Internal Data Type',
'pyname': u'internal_data_type',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 3,
'name': u'EnergyManagementSystem:InternalVariable',
'pyname': u'EnergyManagementSystemInternalVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def internal_data_index_key_name(self):
"""field `Internal Data Index Key Name`
Args:
value (str): value for IDD Field `Internal Data Index Key Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `internal_data_index_key_name` or None if not set
"""
return self["Internal Data Index Key Name"]
@internal_data_index_key_name.setter
def internal_data_index_key_name(self, value=None):
"""Corresponds to IDD field `Internal Data Index Key Name`"""
self["Internal Data Index Key Name"] = value
@property
def internal_data_type(self):
"""field `Internal Data Type`
Args:
value (str): value for IDD Field `Internal Data Type`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `internal_data_type` or None if not set
"""
return self["Internal Data Type"]
@internal_data_type.setter
def internal_data_type(self, value=None):
"""Corresponds to IDD field `Internal Data Type`"""
self["Internal Data Type"] = value
class EnergyManagementSystemCurveOrTableIndexVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:CurveOrTableIndexVariable`
Declares EMS variable that identifies a curve or table
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'curve or table object name',
{'name': u'Curve or Table Object Name',
'pyname': u'curve_or_table_object_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:CurveOrTableIndexVariable',
'pyname': u'EnergyManagementSystemCurveOrTableIndexVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def curve_or_table_object_name(self):
"""field `Curve or Table Object Name`
Args:
value (str): value for IDD Field `Curve or Table Object Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `curve_or_table_object_name` or None if not set
"""
return self["Curve or Table Object Name"]
@curve_or_table_object_name.setter
def curve_or_table_object_name(self, value=None):
"""Corresponds to IDD field `Curve or Table Object Name`"""
self["Curve or Table Object Name"] = value
class EnergyManagementSystemConstructionIndexVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:ConstructionIndexVariable`
Declares EMS variable that identifies a construction
"""
_schema = {'extensible-fields': OrderedDict(),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'construction object name',
{'name': u'Construction Object Name',
'pyname': u'construction_object_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:ConstructionIndexVariable',
'pyname': u'EnergyManagementSystemConstructionIndexVariable',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| This name becomes a variable for use in Erl programs
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def construction_object_name(self):
"""field `Construction Object Name`
Args:
value (str): value for IDD Field `Construction Object Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `construction_object_name` or None if not set
"""
return self["Construction Object Name"]
@construction_object_name.setter
def construction_object_name(self, value=None):
"""Corresponds to IDD field `Construction Object Name`"""
self["Construction Object Name"] = value
class EnergyManagementSystemProgram(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Program`
This input defines an Erl program
Each field after the name is a line of EMS Runtime Language
"""
_schema = {'extensible-fields': OrderedDict([(u'program line 1',
{'name': u'Program Line 1',
'pyname': u'program_line_1',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:Program',
'pyname': u'EnergyManagementSystemProgram',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
program_line_1=None,
):
"""Add values for extensible fields.
Args:
program_line_1 (str): value for IDD Field `Program Line 1`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_line_1 = self.check_value("Program Line 1", program_line_1)
vals.append(program_line_1)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemSubroutine(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:Subroutine`
This input defines an Erl program subroutine
Each field after the name is a line of EMS Runtime Language
"""
_schema = {'extensible-fields': OrderedDict([(u'program line',
{'name': u'Program Line',
'pyname': u'program_line',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'format': None,
'group': u'Energy Management System',
'min-fields': 2,
'name': u'EnergyManagementSystem:Subroutine',
'pyname': u'EnergyManagementSystemSubroutine',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
| no spaces allowed in name
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
def add_extensible(self,
program_line=None,
):
"""Add values for extensible fields.
Args:
program_line (str): value for IDD Field `Program Line`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
program_line = self.check_value("Program Line", program_line)
vals.append(program_line)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class EnergyManagementSystemGlobalVariable(DataObject):
""" Corresponds to IDD object `EnergyManagementSystem:GlobalVariable`
Declares Erl variable as having global scope
No spaces allowed in names used for Erl variables
"""
_schema = {'extensible-fields': OrderedDict([(u'erl variable 1 name',
{'name': u'Erl Variable 1 Name',
'pyname': u'erl_variable_1_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'})]),
'fields': OrderedDict(),
'format': None,
'group': u'Energy Management System',
'min-fields': 1,
'name': u'EnergyManagementSystem:GlobalVariable',
'pyname': u'EnergyManagementSystemGlobalVariable',
'required-object': False,
'unique-object': False}
def add_extensible(self,
erl_variable_1_name=None,
):
"""Add values for extensible fields.
Args:
erl_variable_1_name (str): value for IDD Field `Erl Variable 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
erl_variable_1_name = self.check_value(
"Erl Variable 1 Name",
erl_variable_1_name)
vals.append(erl_variable_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext) | 0.840423 | 0.250042 |
import sys
import weakref
import operator
from random import randint
from array import array
from .base import (
lib,
ffi,
NULL,
NoValue,
_check,
_build_range,
_get_select_op,
_get_bin_op,
)
from . import types, binaryop, monoid, unaryop
from .vector import Vector
from .scalar import Scalar
from .semiring import Semiring, current_semiring
from .binaryop import BinaryOp, current_accum, current_binop
from .unaryop import UnaryOp
from .monoid import Monoid, current_monoid
from . import descriptor
from .descriptor import Descriptor, Default, TransposeA, current_desc
__all__ = ['Matrix']
class Matrix:
"""GraphBLAS Sparse Matrix
This is a high-level wrapper around the GrB_Matrix type.
"""
__slots__ = ('matrix', 'type', '_funcs', '_keep_alives')
def __init__(self, matrix, typ=None, **options):
if typ is None:
new_type = ffi.new('GrB_Type*')
_check(lib.GxB_Matrix_type(new_type, matrix[0]))
typ = types.gb_type_to_type(new_type[0])
self.matrix = matrix
self.type = typ
self._keep_alives = weakref.WeakKeyDictionary()
if options:
self.options_set(**options)
def __del__(self):
_check(lib.GrB_Matrix_free(self.matrix))
@classmethod
def sparse(cls, typ, nrows=0, ncols=0, **options):
"""Create an empty Matrix from the given type, number of rows, and
number of columns.
"""
new_mat = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(new_mat, typ.gb_type, nrows, ncols))
m = cls(new_mat, typ, **options)
return m
@classmethod
def dense(cls, typ, nrows, ncols, fill=None, **options):
m = cls.sparse(typ, nrows, ncols, **options)
if fill is None:
fill = m.type.zero
m[:,:] = fill
return m
@classmethod
def from_lists(cls, I, J, V, nrows=None, ncols=None, typ=None, **options):
"""Create a new matrix from the given lists of row indices, column
indices, and values. If nrows or ncols are not provided, they
are computed from the max values of the provides row and
column indices lists.
"""
assert len(I) == len(J) == len(V)
if not nrows:
nrows = max(I) + 1
if not ncols:
ncols = max(J) + 1
# TODO use ffi and GrB_Matrix_build
if typ is None:
typ = types._gb_from_type(type(V[0]))
m = cls.sparse(typ, nrows, ncols, **options)
for i, j, v in zip(I, J, V):
m[i, j] = v
return m
@classmethod
def from_mm(cls, mm_file, typ, **options):
"""Create a new matrix by reading a Matrix Market file.
"""
m = ffi.new('GrB_Matrix*')
i = cls(m, typ, **options)
_check(lib.LAGraph_mmread(m, mm_file))
return i
@classmethod
def from_tsv(cls, tsv_file, typ, nrows, ncols, **options):
"""Create a new matrix by reading a tab separated value file.
"""
m = ffi.new('GrB_Matrix*')
i = cls(m, typ, **options)
_check(lib.LAGraph_tsvread(m, tsv_file, typ.gb_type, nrows, ncols))
return i
@classmethod
def from_binfile(cls, bin_file):
"""Create a new matrix by reading a SuiteSparse specific binary file.
"""
m = ffi.new('GrB_Matrix*')
_check(lib.LAGraph_binread(m, bin_file))
return cls(m)
@classmethod
def random(cls, typ, nrows, ncols, nvals,
make_pattern=False, make_symmetric=False,
make_skew_symmetric=False, make_hermitian=True,
no_diagonal=False, seed=None, **options):
"""Create a new random Matrix of the given type, number of rows,
columns and values. Other flags set additional properties the
matrix will hold.
"""
result = ffi.new('GrB_Matrix*')
i = cls(result, typ, **options)
fseed = ffi.new('uint64_t*')
if seed is None:
seed = randint(0, sys.maxsize)
fseed[0] = seed
_check(lib.LAGraph_random(
result,
typ.gb_type,
nrows,
ncols,
nvals,
make_pattern,
make_symmetric,
make_skew_symmetric,
make_hermitian,
no_diagonal,
fseed))
return i
@classmethod
def identity(cls, typ, nrows, **options):
result = cls.sparse(typ, nrows, nrows, **options)
for i in range(nrows):
result[i,i] = result.type.one
return result
@property
def gb_type(self):
"""Return the GraphBLAS low-level type object of the Matrix.
"""
new_type = ffi.new('GrB_Type*')
_check(lib.GxB_Matrix_type(new_type, self.matrix[0]))
return new_type[0]
@property
def nrows(self):
"""Return the number of Matrix rows.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_nrows(n, self.matrix[0]))
return n[0]
@property
def ncols(self):
"""Return the number of Matrix columns.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_ncols(n, self.matrix[0]))
return n[0]
@property
def shape(self):
"""Numpy-like description of matrix shape.
"""
return (self.nrows, self.ncols)
@property
def square(self):
return self.nrows == self.ncols
@property
def nvals(self):
"""Return the number of Matrix values.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_nvals(n, self.matrix[0]))
return n[0]
@property
def T(self):
return self.transpose()
def dup(self, **options):
"""Create an duplicate Matrix.
"""
new_mat = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_dup(new_mat, self.matrix[0]))
return self.__class__(new_mat, self.type, **options)
def options_set(self, hyper=None, format=None):
if hyper:
hyper = ffi.cast('double', hyper)
_check(lib.GxB_Matrix_Option_set(
self.matrix[0],
lib.GxB_HYPER,
hyper))
if format:
format = ffi.cast('GxB_Format_Value', format)
_check(lib.GxB_Matrix_Option_set(
self.matrix[0],
lib.GxB_FORMAT,
format))
def options_get(self):
hyper = ffi.new('double*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_HYPER,
hyper
))
format = ffi.new('GxB_Format_Value*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_FORMAT,
format
))
is_hyper = ffi.new('bool*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_IS_HYPER,
is_hyper
))
return (hyper[0], format[0], is_hyper[0])
def pattern(self, typ=types.BOOL):
"""Return the pattern of the matrix, this is a boolean Matrix where
every present value in this matrix is set to True.
"""
r = ffi.new('GrB_Matrix*')
_check(lib.LAGraph_pattern(r, self.matrix[0], typ.gb_type))
return Matrix(r, typ)
def to_mm(self, fileobj):
"""Write this matrix to a file using the Matrix Market format.
"""
_check(lib.LAGraph_mmwrite(self.matrix[0], fileobj))
def to_binfile(self, filename, comments=NULL):
"""Write this matrix using custom SuiteSparse binary format.
"""
_check(lib.LAGraph_binwrite(self.matrix, filename, comments))
def to_lists(self):
"""Extract the rows, columns and values of the Matrix as 3 lists.
"""
I = ffi.new('GrB_Index[%s]' % self.nvals)
J = ffi.new('GrB_Index[%s]' % self.nvals)
V = self.type.ffi.new(self.type.C + '[%s]' % self.nvals)
n = ffi.new('GrB_Index*')
n[0] = self.nvals
_check(self.type.Matrix_extractTuples(
I,
J,
V,
n,
self.matrix[0]
))
return [list(I), list(J), list(map(self.type.to_value, V))]
def clear(self):
"""Clear the matrix. This does not change the size but removes all
values.
"""
_check(lib.GrB_Matrix_clear(self.matrix[0]))
def resize(self, nrows, ncols):
"""Resize the matrix. If the dimensions decrease, entries that fall
outside the resized matrix are deleted.
"""
_check(lib.GxB_Matrix_resize(
self.matrix[0],
nrows,
ncols))
def transpose(self, out=None, **kwargs):
""" Transpose matrix. """
if out is None:
new_dimensions = (self.nrows, self.ncols) if TransposeA in kwargs.get('desc', ()) \
else (self.ncols, self.nrows)
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, *new_dimensions))
out = self.__class__(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_transpose(
out.matrix[0],
mask,
accum,
self.matrix[0],
desc
))
return out
def eadd(self, other, add_op=NULL, out=None, **kwargs):
"""Element-wise addition with other matrix.
Element-wise addition applies a binary operator element-wise
on two matrices A and B, for all entries that appear in the
set intersection of the patterns of A and B. Other operators
other than addition can be used.
The pattern of the result of the element-wise addition is
the set union of the pattern of A and B. Entries in neither in
A nor in B do not appear in the result.
The only difference between element-wise multiplication and
addition is the pattern of the result, and what happens to
entries outside the intersection. With multiplication the
pattern of T is the intersection; with addition it is the set
union. Entries outside the set intersection are dropped for
multiplication, and kept for addition; in both cases the
operator is only applied to those (and only those) entries in
the intersection. Any binary operator can be used
interchangeably for either operation.
"""
if add_op is NULL:
add_op = current_binop.get(binaryop.PLUS)
elif isinstance(add_op, str):
add_op = _get_bin_op(add_op, self.type)
add_op = add_op.get_binaryop(self, other)
if out is None:
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, self.nrows, self.ncols))
out = Matrix(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_eWiseAdd_Matrix_BinaryOp(
out.matrix[0],
mask,
accum,
add_op,
self.matrix[0],
other.matrix[0],
desc))
return out
def emult(self, other, mult_op=NULL, out=None, **kwargs):
"""Element-wise multiplication with other matrix.
Element-wise multiplication applies a binary operator
element-wise on two matrices A and B, for all entries that
appear in the set intersection of the patterns of A and B.
Other operators other than addition can be used.
The pattern of the result of the element-wise multiplication
is exactly this set intersection. Entries in A but not B, or
visa versa, do not appear in the result.
"""
if mult_op is NULL:
mult_op = current_binop.get(binaryop.TIMES)
elif isinstance(mult_op, str):
mult_op = _get_bin_op(mult_op, self.type)
mult_op = mult_op.get_binaryop(self, other)
if out is None:
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, self.nrows, self.ncols))
out = Matrix(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_eWiseMult_Matrix_BinaryOp(
out.matrix[0],
mask,
accum,
mult_op,
self.matrix[0],
other.matrix[0],
desc))
return out
def iseq(self, other):
"""Compare two matrices for equality.
"""
result = ffi.new('_Bool*')
eq_op = self.type.EQ.get_binaryop(self, other)
_check(lib.LAGraph_isequal(
result,
self.matrix[0],
other.matrix[0],
eq_op))
return result[0]
def isne(self, other):
"""Compare two matrices for inequality.
"""
return not self.iseq(other)
def __getstate__(self):
pass
def __setstate__(self, data):
pass
def __iter__(self):
nvals = self.nvals
_nvals = ffi.new('GrB_Index[1]', [nvals])
I = ffi.new('GrB_Index[%s]' % nvals)
J = ffi.new('GrB_Index[%s]' % nvals)
X = self.type.ffi.new('%s[%s]' % (self.type.C, nvals))
_check(self.type.Matrix_extractTuples(
I,
J,
X,
_nvals,
self.matrix[0]
))
return zip(I, J, map(self.type.to_value, X))
def to_arrays(self):
if self.type.typecode is None:
raise TypeError('This matrix has no array typecode.')
nvals = self.nvals
_nvals = ffi.new('GrB_Index[1]', [nvals])
I = ffi.new('GrB_Index[%s]' % nvals)
J = ffi.new('GrB_Index[%s]' % nvals)
X = self.type.ffi.new('%s[%s]' % (self.type.C, nvals))
_check(self.type.Matrix_extractTuples(
I,
J,
X,
_nvals,
self.matrix[0]
))
return array('L', I), array('L', J), array(self.type.typecode, X)
@property
def rows(self):
""" An iterator of row indexes present in the matrix.
"""
for i, j, v in self:
yield i
@property
def cols(self):
""" An iterator of column indexes present in the matrix.
"""
for i, j, v in self:
yield j
@property
def vals(self):
""" An iterator of values present in the matrix.
"""
for i, j, v in self:
yield v
def __len__(self):
return self.nvals
def __nonzero__(self):
return self.reduce_bool()
def __add__(self, other):
return self.eadd(other)
def __iadd__(self, other):
return self.eadd(other, out=self)
def __sub__(self, other):
return self + (-other)
def __isub__(self, other):
return self.eadd(-other, out=self)
def __mul__(self, other):
return self.emult(other)
def __imul__(self, other):
return self.emult(other, out=self)
def __truediv__(self, other):
return self.emult(other, mult_op=binaryop.DIV)
def __itruediv__(self, other):
return self.emult(other, mult_op=binaryop.DIV, out=self)
def __invert__(self):
return self.apply(unaryop.MINV)
def __neg__(self):
return self.apply(unaryop.AINV)
def __abs__(self):
return self.apply(unaryop.ABS)
def __pow__(self, exponent):
if exponent == 0:
return self.__class__.identity(self.type, self.nrows)
if exponent == 1:
return self
result = self.dup()
for i in range(1, exponent):
result.mxm(self, out=result)
return result
def reduce_bool(self, mon=NULL, **kwargs):
"""Reduce matrix to a boolean.
"""
if mon is NULL:
mon = current_monoid.get(types.BOOL.LOR_MONOID)
mon = mon.get_monoid(self)
result = ffi.new('_Bool*')
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_BOOL(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_int(self, mon=NULL, **kwargs):
"""Reduce matrix to an integer.
"""
if mon is NULL:
mon = current_monoid.get(types.INT64.PLUS_MONOID)
mon = mon.get_monoid(self)
result = ffi.new('int64_t*')
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_INT64(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_float(self, mon=NULL, **kwargs):
"""Reduce matrix to an float.
"""
if mon is NULL:
mon = current_monoid.get(self.type.PLUS_MONOID)
mon = mon.get_monoid(self)
mask, semiring, accum, desc = self._get_args(**kwargs)
result = ffi.new('double*')
_check(lib.GrB_Matrix_reduce_FP64(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_vector(self, mon=NULL, out=None, **kwargs):
"""Reduce matrix to a vector.
"""
if mon is NULL:
mon = current_monoid.get(getattr(self.type, 'PLUS_MONOID', NULL))
mon = mon.get_monoid(self)
if out is None:
out = Vector.sparse(self.type, self.nrows)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_Monoid(
out.vector[0],
mask,
accum,
mon,
self.matrix[0],
desc))
return out
def apply(self, op, out=None, **kwargs):
"""Apply Unary op to matrix elements.
"""
if out is None:
out = self.__class__.sparse(self.type, self.nrows, self.ncols)
if isinstance(op, UnaryOp):
op = op.get_unaryop(self)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_apply(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
desc
))
return out
def select(self, op, thunk=NULL, out=NULL, **kwargs):
if out is NULL:
out = self.__class__.sparse(self.type, self.nrows, self.ncols)
if isinstance(op, UnaryOp):
op = op.get_unaryop(self)
elif isinstance(op, str):
op = _get_select_op(op)
if isinstance(thunk, (bool, int, float)):
thunk = Scalar.from_value(thunk)
if isinstance(thunk, Scalar):
self._keep_alives[self.matrix] = thunk
thunk = thunk.scalar[0]
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GxB_Matrix_select(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
thunk,
desc
))
return out
def tril(self, thunk=NULL):
return self.select(lib.GxB_TRIL, thunk=thunk)
def triu(self, thunk=NULL):
return self.select(lib.GxB_TRIU, thunk=thunk)
def diag(self, thunk=NULL):
return self.select(lib.GxB_DIAG, thunk=thunk)
def offdiag(self, thunk=NULL):
return self.select(lib.GxB_OFFDIAG, thunk=thunk)
def nonzero(self):
return self.select(lib.GxB_NONZERO)
def full(self, identity=None):
B = self.__class__.sparse(self.type, self.nrows, self.ncols)
if identity is None:
identity = self.type.one
_check(self.type.Matrix_assignScalar(
B.matrix[0],
NULL,
NULL,
identity,
lib.GrB_ALL,
0,
lib.GrB_ALL,
0,
NULL))
return self.eadd(B, self.type.FIRST)
def compare(self, other, op, strop):
C = self.__class__.sparse(types.BOOL, self.nrows, self.ncols)
if isinstance(other, (bool, int, float)):
if op(other, 0):
B = self.__class__.dup(self)
B[:,:] = other
self.emult(B, strop, out=C)
return C
else:
self.select(strop, other).apply(types.BOOL.ONE, out=C)
return C
elif isinstance(other, Matrix):
A = self.full()
B = other.full()
A.emult(B, strop, out=C)
return C
else:
raise NotImplementedError
def __gt__(self, other):
return self.compare(other, operator.gt, '>')
def __lt__(self, other):
return self.compare(other, operator.lt, '<')
def __ge__(self, other):
return self.compare(other, operator.ge, '>=')
def __le__(self, other):
return self.compare(other, operator.le, '<=')
def __eq__(self, other):
return self.compare(other, operator.eq, '==')
def __ne__(self, other):
return self.compare(other, operator.ne, '!=')
def _get_args(self,
mask=NULL, accum=NULL, semiring=NULL,
desc=Default):
if isinstance(mask, Matrix):
mask = mask.matrix[0]
elif isinstance(mask, Vector):
mask = mask.vector[0]
if semiring is NULL:
semiring = current_semiring.get(getattr(self.type, 'PLUS_TIMES', NULL))
if isinstance(semiring, Semiring):
semiring = semiring.get_semiring(self)
if accum is NULL:
accum = current_accum.get(NULL)
if isinstance(accum, BinaryOp):
accum = accum.get_binaryop(self)
if desc is NULL:
desc = current_desc.get(NULL)
if isinstance(desc, Descriptor):
desc = desc.desc[0]
return mask, semiring, accum, desc
def mxm(self, other, out=None, **kwargs):
"""Matrix-matrix multiply.
"""
if out is None:
out = self.__class__.sparse(self.type, self.nrows, other.ncols)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_mxm(
out.matrix[0],
mask,
accum,
semiring,
self.matrix[0],
other.matrix[0],
desc))
return out
def mxv(self, other, out=None, **kwargs):
"""Matrix-vector multiply.
"""
if out is None:
new_dimension = self.ncols if TransposeA in kwargs.get('desc', ()) \
else self.nrows
out = Vector.sparse(self.type, new_dimension)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_mxv(
out.vector[0],
mask,
accum,
semiring,
self.matrix[0],
other.vector[0],
desc))
return out
def __matmul__(self, other):
if isinstance(other, Matrix):
return self.mxm(other)
elif isinstance(other, Vector):
return self.mxv(other)
else:
raise TypeError('Right argument to @ must be Matrix or Vector.')
def __imatmul__(self, other):
return self.mxm(other, out=self)
def kron(self, other, op=NULL, out=None, **kwargs):
"""Kronecker product.
"""
if out is None:
out = self.__class__.sparse(
self.type,
self.nrows*other.nrows,
self.ncols*other.ncols)
if op is NULL:
op = self.type.TIMES
if isinstance(op, BinaryOp):
op = op.get_binaryop(self, other)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GxB_kron(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
other.matrix[0],
desc))
return out
def extract_matrix(self, rindex=None, cindex=None, out=None, **kwargs):
"""Slice a submatrix.
"""
ta = TransposeA in kwargs.get('desc', ())
mask, semiring, accum, desc = self._get_args(**kwargs)
result_nrows = self.ncols if ta else self.nrows
result_ncols = self.nrows if ta else self.ncols
I, ni, isize = _build_range(rindex, result_nrows - 1)
J, nj, jsize = _build_range(cindex, result_ncols - 1)
if isize is None:
isize = result_nrows
if jsize is None:
jsize = result_ncols
if out is None:
out = self.__class__.sparse(self.type, isize, jsize)
_check(lib.GrB_Matrix_extract(
out.matrix[0],
mask,
accum,
self.matrix[0],
I,
ni,
J,
nj,
desc))
return out
def extract_col(self, col_index, row_slice=None, out=None, **kwargs):
"""Slice a column as subvector.
Use `desc=TransposeA` to slice a row.
"""
stop_val = self.ncols if TransposeA in kwargs.get('desc', ()) else self.nrows
if out is None:
out = Vector.sparse(self.type, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
I, ni, size = _build_range(row_slice, stop_val)
_check(lib.GrB_Col_extract(
out.vector[0],
mask,
accum,
self.matrix[0],
I,
ni,
col_index,
desc
))
return out
def extract_row(self, row_index, col_slice=None, out=None, **kwargs):
"""Slice a row as subvector.
"""
desc = TransposeA
if 'desc' in kwargs:
desc = desc | kwargs['desc']
return self.extract_col(row_index, col_slice, out, desc=desc, **kwargs)
def __getitem__(self, index):
if isinstance(index, int):
# a[3] extract single row
return self.extract_row(index, None)
if isinstance(index, slice):
# a[3:] extract submatrix of rows
return self.extract_matrix(index, None)
if isinstance(index, Matrix):
return self.extract_matrix(mask=index)
if not isinstance(index, (tuple, list)):
raise TypeError
i0 = index[0]
i1 = index[1]
if isinstance(i0, int) and isinstance(i1, int):
# a[3,3] extract single element
result = self.type.ffi.new(self.type.ptr)
_check(self.type.Matrix_extractElement(
result,
self.matrix[0],
index[0],
index[1]))
return self.type.to_value(result[0])
if isinstance(i0, int) and isinstance(i1, slice):
# a[3,:] extract slice of row vector
return self.extract_row(i0, i1)
if isinstance(i0, slice) and isinstance(i1, int):
# a[:,3] extract slice of col vector
return self.extract_col(i1, i0)
# a[:,:] or a[[0,1,2], [3,4,5]] extract submatrix with slice or row/col indices
return self.extract_matrix(i0, i1)
def assign_col(self, col_index, value, row_slice=None, **kwargs):
"""Assign a vector to a column.
"""
stop_val = self.ncols if TransposeA in kwargs.get('desc', ()) else self.nrows
I, ni, size = _build_range(row_slice, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Col_assign(
self.matrix[0],
mask,
accum,
value.vector[0],
I,
ni,
col_index,
desc
))
def assign_row(self, row_index, value, col_slice=None, **kwargs):
"""Assign a vector to a row.
"""
stop_val = self.nrows if TransposeA in kwargs.get('desc', ()) else self.ncols
I, ni, size = _build_range(col_slice, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Row_assign(
self.matrix[0],
mask,
accum,
value.vector[0],
row_index,
I,
ni,
desc
))
def assign_matrix(self, value, rindex=None, cindex=None, **kwargs):
"""Assign a submatrix.
"""
I, ni, isize = _build_range(rindex, self.nrows - 1)
J, nj, jsize = _build_range(cindex, self.ncols - 1)
if isize is None:
isize = self.nrows
if jsize is None:
jsize = self.ncols
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_assign(
self.matrix[0],
mask,
accum,
value.matrix[0],
I,
ni,
J,
nj,
desc))
def assign_scalar(self, value, row_slice=None, col_slice=None, **kwargs):
mask, semiring, accum, desc = self._get_args(**kwargs)
if row_slice:
I, ni, isize = _build_range(row_slice, self.nrows - 1)
else:
I = lib.GrB_ALL
ni = 0
if col_slice:
J, nj, jsize = _build_range(col_slice, self.ncols - 1)
else:
J = lib.GrB_ALL
nj = 0
scalar_type = types._gb_from_type(type(value))
_check(scalar_type.Matrix_assignScalar(
self.matrix[0],
mask,
accum,
value,
I,
ni,
J,
nj,
desc
))
def __setitem__(self, index, value):
if isinstance(index, int):
# A[3] = assign single row vector
if isinstance(value, Vector):
return self.assign_row(index, value)
if isinstance(index, slice):
# A[3:] = assign submatrix to rows
if isinstance(value, Matrix):
self.assign_matrix(value, index, None)
return
if isinstance(value, (bool, int, float)):
self.assign_scalar(value, index, None)
if isinstance(index, Matrix):
if isinstance(value, Matrix):
# A[M] = B masked matrix assignment
raise NotImplementedError
if not isinstance(value, (bool, int, float)):
raise TypeError
# A[M] = s masked scalar assignment
self.assign_scalar(value, mask=index)
return
if not isinstance(index, (tuple, list)):
raise TypeError
i0 = index[0]
i1 = index[1]
if isinstance(i0, int) and isinstance(i1, int):
val = self.type.from_value(value)
_check(self.type.Matrix_setElement(
self.matrix[0],
val,
i0,
i1))
return
if isinstance(i0, int) and isinstance(i1, slice):
# a[3,:] assign slice of row vector or scalar
self.assign_row(i0, value, i1)
return
if isinstance(i0, slice) and isinstance(i1, int):
# a[:,3] extract slice of col vector or scalar
self.assign_col(i1, value, i0)
return
if isinstance(i0, slice) and isinstance(i1, slice):
if isinstance(value, (bool, int, float)):
self.assign_scalar(value, i0, i1)
return
# a[:,:] assign submatrix
self.assign_matrix(value, i0, i1)
return
raise TypeError('Unknown index or value for matrix assignment.')
def __contains__(self, index):
try:
v = self[index]
return True
except NoValue:
return False
def get(self, i, j, default=None):
try:
return self[i,j]
except NoValue:
return default
def to_string(self, format_string='{:>2}', empty_char=''):
header = format_string.format('') + ' ' + ''.join(format_string.format(i) for i in range(self.ncols))
result = header + '\n'
for row in range(self.nrows):
result += format_string.format(row) + '|'
for col in range(self.ncols):
value = self.get(row, col, empty_char)
result += format_string.format(value)
result += '|' + format_string.format(row) + '\n'
result += header
return result
def __repr__(self):
return '<Matrix (%sx%s : %s:%s)>' % (
self.nrows,
self.ncols,
self.nvals,
self.type.__name__) | pygraphblas/matrix.py | import sys
import weakref
import operator
from random import randint
from array import array
from .base import (
lib,
ffi,
NULL,
NoValue,
_check,
_build_range,
_get_select_op,
_get_bin_op,
)
from . import types, binaryop, monoid, unaryop
from .vector import Vector
from .scalar import Scalar
from .semiring import Semiring, current_semiring
from .binaryop import BinaryOp, current_accum, current_binop
from .unaryop import UnaryOp
from .monoid import Monoid, current_monoid
from . import descriptor
from .descriptor import Descriptor, Default, TransposeA, current_desc
__all__ = ['Matrix']
class Matrix:
"""GraphBLAS Sparse Matrix
This is a high-level wrapper around the GrB_Matrix type.
"""
__slots__ = ('matrix', 'type', '_funcs', '_keep_alives')
def __init__(self, matrix, typ=None, **options):
if typ is None:
new_type = ffi.new('GrB_Type*')
_check(lib.GxB_Matrix_type(new_type, matrix[0]))
typ = types.gb_type_to_type(new_type[0])
self.matrix = matrix
self.type = typ
self._keep_alives = weakref.WeakKeyDictionary()
if options:
self.options_set(**options)
def __del__(self):
_check(lib.GrB_Matrix_free(self.matrix))
@classmethod
def sparse(cls, typ, nrows=0, ncols=0, **options):
"""Create an empty Matrix from the given type, number of rows, and
number of columns.
"""
new_mat = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(new_mat, typ.gb_type, nrows, ncols))
m = cls(new_mat, typ, **options)
return m
@classmethod
def dense(cls, typ, nrows, ncols, fill=None, **options):
m = cls.sparse(typ, nrows, ncols, **options)
if fill is None:
fill = m.type.zero
m[:,:] = fill
return m
@classmethod
def from_lists(cls, I, J, V, nrows=None, ncols=None, typ=None, **options):
"""Create a new matrix from the given lists of row indices, column
indices, and values. If nrows or ncols are not provided, they
are computed from the max values of the provides row and
column indices lists.
"""
assert len(I) == len(J) == len(V)
if not nrows:
nrows = max(I) + 1
if not ncols:
ncols = max(J) + 1
# TODO use ffi and GrB_Matrix_build
if typ is None:
typ = types._gb_from_type(type(V[0]))
m = cls.sparse(typ, nrows, ncols, **options)
for i, j, v in zip(I, J, V):
m[i, j] = v
return m
@classmethod
def from_mm(cls, mm_file, typ, **options):
"""Create a new matrix by reading a Matrix Market file.
"""
m = ffi.new('GrB_Matrix*')
i = cls(m, typ, **options)
_check(lib.LAGraph_mmread(m, mm_file))
return i
@classmethod
def from_tsv(cls, tsv_file, typ, nrows, ncols, **options):
"""Create a new matrix by reading a tab separated value file.
"""
m = ffi.new('GrB_Matrix*')
i = cls(m, typ, **options)
_check(lib.LAGraph_tsvread(m, tsv_file, typ.gb_type, nrows, ncols))
return i
@classmethod
def from_binfile(cls, bin_file):
"""Create a new matrix by reading a SuiteSparse specific binary file.
"""
m = ffi.new('GrB_Matrix*')
_check(lib.LAGraph_binread(m, bin_file))
return cls(m)
@classmethod
def random(cls, typ, nrows, ncols, nvals,
make_pattern=False, make_symmetric=False,
make_skew_symmetric=False, make_hermitian=True,
no_diagonal=False, seed=None, **options):
"""Create a new random Matrix of the given type, number of rows,
columns and values. Other flags set additional properties the
matrix will hold.
"""
result = ffi.new('GrB_Matrix*')
i = cls(result, typ, **options)
fseed = ffi.new('uint64_t*')
if seed is None:
seed = randint(0, sys.maxsize)
fseed[0] = seed
_check(lib.LAGraph_random(
result,
typ.gb_type,
nrows,
ncols,
nvals,
make_pattern,
make_symmetric,
make_skew_symmetric,
make_hermitian,
no_diagonal,
fseed))
return i
@classmethod
def identity(cls, typ, nrows, **options):
result = cls.sparse(typ, nrows, nrows, **options)
for i in range(nrows):
result[i,i] = result.type.one
return result
@property
def gb_type(self):
"""Return the GraphBLAS low-level type object of the Matrix.
"""
new_type = ffi.new('GrB_Type*')
_check(lib.GxB_Matrix_type(new_type, self.matrix[0]))
return new_type[0]
@property
def nrows(self):
"""Return the number of Matrix rows.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_nrows(n, self.matrix[0]))
return n[0]
@property
def ncols(self):
"""Return the number of Matrix columns.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_ncols(n, self.matrix[0]))
return n[0]
@property
def shape(self):
"""Numpy-like description of matrix shape.
"""
return (self.nrows, self.ncols)
@property
def square(self):
return self.nrows == self.ncols
@property
def nvals(self):
"""Return the number of Matrix values.
"""
n = ffi.new('GrB_Index*')
_check(lib.GrB_Matrix_nvals(n, self.matrix[0]))
return n[0]
@property
def T(self):
return self.transpose()
def dup(self, **options):
"""Create an duplicate Matrix.
"""
new_mat = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_dup(new_mat, self.matrix[0]))
return self.__class__(new_mat, self.type, **options)
def options_set(self, hyper=None, format=None):
if hyper:
hyper = ffi.cast('double', hyper)
_check(lib.GxB_Matrix_Option_set(
self.matrix[0],
lib.GxB_HYPER,
hyper))
if format:
format = ffi.cast('GxB_Format_Value', format)
_check(lib.GxB_Matrix_Option_set(
self.matrix[0],
lib.GxB_FORMAT,
format))
def options_get(self):
hyper = ffi.new('double*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_HYPER,
hyper
))
format = ffi.new('GxB_Format_Value*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_FORMAT,
format
))
is_hyper = ffi.new('bool*')
_check(lib.GxB_Matrix_Option_get(
self.matrix[0],
lib.GxB_IS_HYPER,
is_hyper
))
return (hyper[0], format[0], is_hyper[0])
def pattern(self, typ=types.BOOL):
"""Return the pattern of the matrix, this is a boolean Matrix where
every present value in this matrix is set to True.
"""
r = ffi.new('GrB_Matrix*')
_check(lib.LAGraph_pattern(r, self.matrix[0], typ.gb_type))
return Matrix(r, typ)
def to_mm(self, fileobj):
"""Write this matrix to a file using the Matrix Market format.
"""
_check(lib.LAGraph_mmwrite(self.matrix[0], fileobj))
def to_binfile(self, filename, comments=NULL):
"""Write this matrix using custom SuiteSparse binary format.
"""
_check(lib.LAGraph_binwrite(self.matrix, filename, comments))
def to_lists(self):
"""Extract the rows, columns and values of the Matrix as 3 lists.
"""
I = ffi.new('GrB_Index[%s]' % self.nvals)
J = ffi.new('GrB_Index[%s]' % self.nvals)
V = self.type.ffi.new(self.type.C + '[%s]' % self.nvals)
n = ffi.new('GrB_Index*')
n[0] = self.nvals
_check(self.type.Matrix_extractTuples(
I,
J,
V,
n,
self.matrix[0]
))
return [list(I), list(J), list(map(self.type.to_value, V))]
def clear(self):
"""Clear the matrix. This does not change the size but removes all
values.
"""
_check(lib.GrB_Matrix_clear(self.matrix[0]))
def resize(self, nrows, ncols):
"""Resize the matrix. If the dimensions decrease, entries that fall
outside the resized matrix are deleted.
"""
_check(lib.GxB_Matrix_resize(
self.matrix[0],
nrows,
ncols))
def transpose(self, out=None, **kwargs):
""" Transpose matrix. """
if out is None:
new_dimensions = (self.nrows, self.ncols) if TransposeA in kwargs.get('desc', ()) \
else (self.ncols, self.nrows)
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, *new_dimensions))
out = self.__class__(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_transpose(
out.matrix[0],
mask,
accum,
self.matrix[0],
desc
))
return out
def eadd(self, other, add_op=NULL, out=None, **kwargs):
"""Element-wise addition with other matrix.
Element-wise addition applies a binary operator element-wise
on two matrices A and B, for all entries that appear in the
set intersection of the patterns of A and B. Other operators
other than addition can be used.
The pattern of the result of the element-wise addition is
the set union of the pattern of A and B. Entries in neither in
A nor in B do not appear in the result.
The only difference between element-wise multiplication and
addition is the pattern of the result, and what happens to
entries outside the intersection. With multiplication the
pattern of T is the intersection; with addition it is the set
union. Entries outside the set intersection are dropped for
multiplication, and kept for addition; in both cases the
operator is only applied to those (and only those) entries in
the intersection. Any binary operator can be used
interchangeably for either operation.
"""
if add_op is NULL:
add_op = current_binop.get(binaryop.PLUS)
elif isinstance(add_op, str):
add_op = _get_bin_op(add_op, self.type)
add_op = add_op.get_binaryop(self, other)
if out is None:
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, self.nrows, self.ncols))
out = Matrix(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_eWiseAdd_Matrix_BinaryOp(
out.matrix[0],
mask,
accum,
add_op,
self.matrix[0],
other.matrix[0],
desc))
return out
def emult(self, other, mult_op=NULL, out=None, **kwargs):
"""Element-wise multiplication with other matrix.
Element-wise multiplication applies a binary operator
element-wise on two matrices A and B, for all entries that
appear in the set intersection of the patterns of A and B.
Other operators other than addition can be used.
The pattern of the result of the element-wise multiplication
is exactly this set intersection. Entries in A but not B, or
visa versa, do not appear in the result.
"""
if mult_op is NULL:
mult_op = current_binop.get(binaryop.TIMES)
elif isinstance(mult_op, str):
mult_op = _get_bin_op(mult_op, self.type)
mult_op = mult_op.get_binaryop(self, other)
if out is None:
_out = ffi.new('GrB_Matrix*')
_check(lib.GrB_Matrix_new(
_out, self.type.gb_type, self.nrows, self.ncols))
out = Matrix(_out, self.type)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_eWiseMult_Matrix_BinaryOp(
out.matrix[0],
mask,
accum,
mult_op,
self.matrix[0],
other.matrix[0],
desc))
return out
def iseq(self, other):
"""Compare two matrices for equality.
"""
result = ffi.new('_Bool*')
eq_op = self.type.EQ.get_binaryop(self, other)
_check(lib.LAGraph_isequal(
result,
self.matrix[0],
other.matrix[0],
eq_op))
return result[0]
def isne(self, other):
"""Compare two matrices for inequality.
"""
return not self.iseq(other)
def __getstate__(self):
pass
def __setstate__(self, data):
pass
def __iter__(self):
nvals = self.nvals
_nvals = ffi.new('GrB_Index[1]', [nvals])
I = ffi.new('GrB_Index[%s]' % nvals)
J = ffi.new('GrB_Index[%s]' % nvals)
X = self.type.ffi.new('%s[%s]' % (self.type.C, nvals))
_check(self.type.Matrix_extractTuples(
I,
J,
X,
_nvals,
self.matrix[0]
))
return zip(I, J, map(self.type.to_value, X))
def to_arrays(self):
if self.type.typecode is None:
raise TypeError('This matrix has no array typecode.')
nvals = self.nvals
_nvals = ffi.new('GrB_Index[1]', [nvals])
I = ffi.new('GrB_Index[%s]' % nvals)
J = ffi.new('GrB_Index[%s]' % nvals)
X = self.type.ffi.new('%s[%s]' % (self.type.C, nvals))
_check(self.type.Matrix_extractTuples(
I,
J,
X,
_nvals,
self.matrix[0]
))
return array('L', I), array('L', J), array(self.type.typecode, X)
@property
def rows(self):
""" An iterator of row indexes present in the matrix.
"""
for i, j, v in self:
yield i
@property
def cols(self):
""" An iterator of column indexes present in the matrix.
"""
for i, j, v in self:
yield j
@property
def vals(self):
""" An iterator of values present in the matrix.
"""
for i, j, v in self:
yield v
def __len__(self):
return self.nvals
def __nonzero__(self):
return self.reduce_bool()
def __add__(self, other):
return self.eadd(other)
def __iadd__(self, other):
return self.eadd(other, out=self)
def __sub__(self, other):
return self + (-other)
def __isub__(self, other):
return self.eadd(-other, out=self)
def __mul__(self, other):
return self.emult(other)
def __imul__(self, other):
return self.emult(other, out=self)
def __truediv__(self, other):
return self.emult(other, mult_op=binaryop.DIV)
def __itruediv__(self, other):
return self.emult(other, mult_op=binaryop.DIV, out=self)
def __invert__(self):
return self.apply(unaryop.MINV)
def __neg__(self):
return self.apply(unaryop.AINV)
def __abs__(self):
return self.apply(unaryop.ABS)
def __pow__(self, exponent):
if exponent == 0:
return self.__class__.identity(self.type, self.nrows)
if exponent == 1:
return self
result = self.dup()
for i in range(1, exponent):
result.mxm(self, out=result)
return result
def reduce_bool(self, mon=NULL, **kwargs):
"""Reduce matrix to a boolean.
"""
if mon is NULL:
mon = current_monoid.get(types.BOOL.LOR_MONOID)
mon = mon.get_monoid(self)
result = ffi.new('_Bool*')
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_BOOL(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_int(self, mon=NULL, **kwargs):
"""Reduce matrix to an integer.
"""
if mon is NULL:
mon = current_monoid.get(types.INT64.PLUS_MONOID)
mon = mon.get_monoid(self)
result = ffi.new('int64_t*')
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_INT64(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_float(self, mon=NULL, **kwargs):
"""Reduce matrix to an float.
"""
if mon is NULL:
mon = current_monoid.get(self.type.PLUS_MONOID)
mon = mon.get_monoid(self)
mask, semiring, accum, desc = self._get_args(**kwargs)
result = ffi.new('double*')
_check(lib.GrB_Matrix_reduce_FP64(
result,
accum,
mon,
self.matrix[0],
desc))
return result[0]
def reduce_vector(self, mon=NULL, out=None, **kwargs):
"""Reduce matrix to a vector.
"""
if mon is NULL:
mon = current_monoid.get(getattr(self.type, 'PLUS_MONOID', NULL))
mon = mon.get_monoid(self)
if out is None:
out = Vector.sparse(self.type, self.nrows)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_reduce_Monoid(
out.vector[0],
mask,
accum,
mon,
self.matrix[0],
desc))
return out
def apply(self, op, out=None, **kwargs):
"""Apply Unary op to matrix elements.
"""
if out is None:
out = self.__class__.sparse(self.type, self.nrows, self.ncols)
if isinstance(op, UnaryOp):
op = op.get_unaryop(self)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_apply(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
desc
))
return out
def select(self, op, thunk=NULL, out=NULL, **kwargs):
if out is NULL:
out = self.__class__.sparse(self.type, self.nrows, self.ncols)
if isinstance(op, UnaryOp):
op = op.get_unaryop(self)
elif isinstance(op, str):
op = _get_select_op(op)
if isinstance(thunk, (bool, int, float)):
thunk = Scalar.from_value(thunk)
if isinstance(thunk, Scalar):
self._keep_alives[self.matrix] = thunk
thunk = thunk.scalar[0]
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GxB_Matrix_select(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
thunk,
desc
))
return out
def tril(self, thunk=NULL):
return self.select(lib.GxB_TRIL, thunk=thunk)
def triu(self, thunk=NULL):
return self.select(lib.GxB_TRIU, thunk=thunk)
def diag(self, thunk=NULL):
return self.select(lib.GxB_DIAG, thunk=thunk)
def offdiag(self, thunk=NULL):
return self.select(lib.GxB_OFFDIAG, thunk=thunk)
def nonzero(self):
return self.select(lib.GxB_NONZERO)
def full(self, identity=None):
B = self.__class__.sparse(self.type, self.nrows, self.ncols)
if identity is None:
identity = self.type.one
_check(self.type.Matrix_assignScalar(
B.matrix[0],
NULL,
NULL,
identity,
lib.GrB_ALL,
0,
lib.GrB_ALL,
0,
NULL))
return self.eadd(B, self.type.FIRST)
def compare(self, other, op, strop):
C = self.__class__.sparse(types.BOOL, self.nrows, self.ncols)
if isinstance(other, (bool, int, float)):
if op(other, 0):
B = self.__class__.dup(self)
B[:,:] = other
self.emult(B, strop, out=C)
return C
else:
self.select(strop, other).apply(types.BOOL.ONE, out=C)
return C
elif isinstance(other, Matrix):
A = self.full()
B = other.full()
A.emult(B, strop, out=C)
return C
else:
raise NotImplementedError
def __gt__(self, other):
return self.compare(other, operator.gt, '>')
def __lt__(self, other):
return self.compare(other, operator.lt, '<')
def __ge__(self, other):
return self.compare(other, operator.ge, '>=')
def __le__(self, other):
return self.compare(other, operator.le, '<=')
def __eq__(self, other):
return self.compare(other, operator.eq, '==')
def __ne__(self, other):
return self.compare(other, operator.ne, '!=')
def _get_args(self,
mask=NULL, accum=NULL, semiring=NULL,
desc=Default):
if isinstance(mask, Matrix):
mask = mask.matrix[0]
elif isinstance(mask, Vector):
mask = mask.vector[0]
if semiring is NULL:
semiring = current_semiring.get(getattr(self.type, 'PLUS_TIMES', NULL))
if isinstance(semiring, Semiring):
semiring = semiring.get_semiring(self)
if accum is NULL:
accum = current_accum.get(NULL)
if isinstance(accum, BinaryOp):
accum = accum.get_binaryop(self)
if desc is NULL:
desc = current_desc.get(NULL)
if isinstance(desc, Descriptor):
desc = desc.desc[0]
return mask, semiring, accum, desc
def mxm(self, other, out=None, **kwargs):
"""Matrix-matrix multiply.
"""
if out is None:
out = self.__class__.sparse(self.type, self.nrows, other.ncols)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_mxm(
out.matrix[0],
mask,
accum,
semiring,
self.matrix[0],
other.matrix[0],
desc))
return out
def mxv(self, other, out=None, **kwargs):
"""Matrix-vector multiply.
"""
if out is None:
new_dimension = self.ncols if TransposeA in kwargs.get('desc', ()) \
else self.nrows
out = Vector.sparse(self.type, new_dimension)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_mxv(
out.vector[0],
mask,
accum,
semiring,
self.matrix[0],
other.vector[0],
desc))
return out
def __matmul__(self, other):
if isinstance(other, Matrix):
return self.mxm(other)
elif isinstance(other, Vector):
return self.mxv(other)
else:
raise TypeError('Right argument to @ must be Matrix or Vector.')
def __imatmul__(self, other):
return self.mxm(other, out=self)
def kron(self, other, op=NULL, out=None, **kwargs):
"""Kronecker product.
"""
if out is None:
out = self.__class__.sparse(
self.type,
self.nrows*other.nrows,
self.ncols*other.ncols)
if op is NULL:
op = self.type.TIMES
if isinstance(op, BinaryOp):
op = op.get_binaryop(self, other)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GxB_kron(
out.matrix[0],
mask,
accum,
op,
self.matrix[0],
other.matrix[0],
desc))
return out
def extract_matrix(self, rindex=None, cindex=None, out=None, **kwargs):
"""Slice a submatrix.
"""
ta = TransposeA in kwargs.get('desc', ())
mask, semiring, accum, desc = self._get_args(**kwargs)
result_nrows = self.ncols if ta else self.nrows
result_ncols = self.nrows if ta else self.ncols
I, ni, isize = _build_range(rindex, result_nrows - 1)
J, nj, jsize = _build_range(cindex, result_ncols - 1)
if isize is None:
isize = result_nrows
if jsize is None:
jsize = result_ncols
if out is None:
out = self.__class__.sparse(self.type, isize, jsize)
_check(lib.GrB_Matrix_extract(
out.matrix[0],
mask,
accum,
self.matrix[0],
I,
ni,
J,
nj,
desc))
return out
def extract_col(self, col_index, row_slice=None, out=None, **kwargs):
"""Slice a column as subvector.
Use `desc=TransposeA` to slice a row.
"""
stop_val = self.ncols if TransposeA in kwargs.get('desc', ()) else self.nrows
if out is None:
out = Vector.sparse(self.type, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
I, ni, size = _build_range(row_slice, stop_val)
_check(lib.GrB_Col_extract(
out.vector[0],
mask,
accum,
self.matrix[0],
I,
ni,
col_index,
desc
))
return out
def extract_row(self, row_index, col_slice=None, out=None, **kwargs):
"""Slice a row as subvector.
"""
desc = TransposeA
if 'desc' in kwargs:
desc = desc | kwargs['desc']
return self.extract_col(row_index, col_slice, out, desc=desc, **kwargs)
def __getitem__(self, index):
if isinstance(index, int):
# a[3] extract single row
return self.extract_row(index, None)
if isinstance(index, slice):
# a[3:] extract submatrix of rows
return self.extract_matrix(index, None)
if isinstance(index, Matrix):
return self.extract_matrix(mask=index)
if not isinstance(index, (tuple, list)):
raise TypeError
i0 = index[0]
i1 = index[1]
if isinstance(i0, int) and isinstance(i1, int):
# a[3,3] extract single element
result = self.type.ffi.new(self.type.ptr)
_check(self.type.Matrix_extractElement(
result,
self.matrix[0],
index[0],
index[1]))
return self.type.to_value(result[0])
if isinstance(i0, int) and isinstance(i1, slice):
# a[3,:] extract slice of row vector
return self.extract_row(i0, i1)
if isinstance(i0, slice) and isinstance(i1, int):
# a[:,3] extract slice of col vector
return self.extract_col(i1, i0)
# a[:,:] or a[[0,1,2], [3,4,5]] extract submatrix with slice or row/col indices
return self.extract_matrix(i0, i1)
def assign_col(self, col_index, value, row_slice=None, **kwargs):
"""Assign a vector to a column.
"""
stop_val = self.ncols if TransposeA in kwargs.get('desc', ()) else self.nrows
I, ni, size = _build_range(row_slice, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Col_assign(
self.matrix[0],
mask,
accum,
value.vector[0],
I,
ni,
col_index,
desc
))
def assign_row(self, row_index, value, col_slice=None, **kwargs):
"""Assign a vector to a row.
"""
stop_val = self.nrows if TransposeA in kwargs.get('desc', ()) else self.ncols
I, ni, size = _build_range(col_slice, stop_val)
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Row_assign(
self.matrix[0],
mask,
accum,
value.vector[0],
row_index,
I,
ni,
desc
))
def assign_matrix(self, value, rindex=None, cindex=None, **kwargs):
"""Assign a submatrix.
"""
I, ni, isize = _build_range(rindex, self.nrows - 1)
J, nj, jsize = _build_range(cindex, self.ncols - 1)
if isize is None:
isize = self.nrows
if jsize is None:
jsize = self.ncols
mask, semiring, accum, desc = self._get_args(**kwargs)
_check(lib.GrB_Matrix_assign(
self.matrix[0],
mask,
accum,
value.matrix[0],
I,
ni,
J,
nj,
desc))
def assign_scalar(self, value, row_slice=None, col_slice=None, **kwargs):
mask, semiring, accum, desc = self._get_args(**kwargs)
if row_slice:
I, ni, isize = _build_range(row_slice, self.nrows - 1)
else:
I = lib.GrB_ALL
ni = 0
if col_slice:
J, nj, jsize = _build_range(col_slice, self.ncols - 1)
else:
J = lib.GrB_ALL
nj = 0
scalar_type = types._gb_from_type(type(value))
_check(scalar_type.Matrix_assignScalar(
self.matrix[0],
mask,
accum,
value,
I,
ni,
J,
nj,
desc
))
def __setitem__(self, index, value):
if isinstance(index, int):
# A[3] = assign single row vector
if isinstance(value, Vector):
return self.assign_row(index, value)
if isinstance(index, slice):
# A[3:] = assign submatrix to rows
if isinstance(value, Matrix):
self.assign_matrix(value, index, None)
return
if isinstance(value, (bool, int, float)):
self.assign_scalar(value, index, None)
if isinstance(index, Matrix):
if isinstance(value, Matrix):
# A[M] = B masked matrix assignment
raise NotImplementedError
if not isinstance(value, (bool, int, float)):
raise TypeError
# A[M] = s masked scalar assignment
self.assign_scalar(value, mask=index)
return
if not isinstance(index, (tuple, list)):
raise TypeError
i0 = index[0]
i1 = index[1]
if isinstance(i0, int) and isinstance(i1, int):
val = self.type.from_value(value)
_check(self.type.Matrix_setElement(
self.matrix[0],
val,
i0,
i1))
return
if isinstance(i0, int) and isinstance(i1, slice):
# a[3,:] assign slice of row vector or scalar
self.assign_row(i0, value, i1)
return
if isinstance(i0, slice) and isinstance(i1, int):
# a[:,3] extract slice of col vector or scalar
self.assign_col(i1, value, i0)
return
if isinstance(i0, slice) and isinstance(i1, slice):
if isinstance(value, (bool, int, float)):
self.assign_scalar(value, i0, i1)
return
# a[:,:] assign submatrix
self.assign_matrix(value, i0, i1)
return
raise TypeError('Unknown index or value for matrix assignment.')
def __contains__(self, index):
try:
v = self[index]
return True
except NoValue:
return False
def get(self, i, j, default=None):
try:
return self[i,j]
except NoValue:
return default
def to_string(self, format_string='{:>2}', empty_char=''):
header = format_string.format('') + ' ' + ''.join(format_string.format(i) for i in range(self.ncols))
result = header + '\n'
for row in range(self.nrows):
result += format_string.format(row) + '|'
for col in range(self.ncols):
value = self.get(row, col, empty_char)
result += format_string.format(value)
result += '|' + format_string.format(row) + '\n'
result += header
return result
def __repr__(self):
return '<Matrix (%sx%s : %s:%s)>' % (
self.nrows,
self.ncols,
self.nvals,
self.type.__name__) | 0.599368 | 0.328408 |
from __future__ import absolute_import
import HLL
class Counter(object):
def __init__(self, op):
self.__op = op
self._value = None
@property
def op(self):
return self.__op
@property
def value(self):
return self._value
def append(self, raw):
raise NotImplementedError('Please implement this method.')
class CountOp(Counter):
def __init__(self, *args, **kwargs):
super(CountOp, self).__init__(*args, **kwargs)
self._value = 0
def append(self, raw):
self._value += 1
class HLLOp(Counter):
def __init__(self, k, seed, *args, **kwargs):
super(HLLOp, self).__init__(*args, **kwargs)
self._value = HLL.HyperLogLog(int(k), int(seed))
def append(self, raw):
self._value.add(raw)
@Counter.value.getter
def value(self):
return int(self._value.cardinality())
class FirstOp(Counter):
def __init__(self, *args, **kwargs):
super(FirstOp, self).__init__(*args, **kwargs)
def append(self, raw):
if self._value is None:
self._value = raw
class LastOp(Counter):
def __init__(self, *args, **kwargs):
super(LastOp, self).__init__(*args, **kwargs)
def append(self, raw):
self._value = raw
class NumericCounter(Counter):
def __init__(self, *args, **kwargs):
super(NumericCounter, self).__init__(*args, **kwargs)
def append(self, raw):
value = self._raw2number(raw)
if value is not None:
self._append_number(value)
def _raw2number(self, raw):
try:
return int(raw)
except ValueError:
try:
return float(raw)
except ValueError:
return None
def _append_number(self, value):
raise NotImplementedError('Please implement this method.')
class AvgOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(AvgOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is not None:
self._value = (self._value + value) / 2.0
else:
self._value = value
class MinOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(MinOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is None or value < self._value:
self._value = value
class MaxOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(MaxOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is None or value > self._value:
self._value = value
_OPERATORS = {
'count': CountOp,
'hll': HLLOp,
'avg': AvgOp,
'min': MinOp,
'max': MaxOp,
'first': FirstOp,
'last': LastOp,
}
def instance(op):
items = op.split(',')
klass = _OPERATORS.get(items[0], None)
if klass is not None:
try:
return klass(*items[1:], op=op)
except:
pass
return None | vcc/counters.py | from __future__ import absolute_import
import HLL
class Counter(object):
def __init__(self, op):
self.__op = op
self._value = None
@property
def op(self):
return self.__op
@property
def value(self):
return self._value
def append(self, raw):
raise NotImplementedError('Please implement this method.')
class CountOp(Counter):
def __init__(self, *args, **kwargs):
super(CountOp, self).__init__(*args, **kwargs)
self._value = 0
def append(self, raw):
self._value += 1
class HLLOp(Counter):
def __init__(self, k, seed, *args, **kwargs):
super(HLLOp, self).__init__(*args, **kwargs)
self._value = HLL.HyperLogLog(int(k), int(seed))
def append(self, raw):
self._value.add(raw)
@Counter.value.getter
def value(self):
return int(self._value.cardinality())
class FirstOp(Counter):
def __init__(self, *args, **kwargs):
super(FirstOp, self).__init__(*args, **kwargs)
def append(self, raw):
if self._value is None:
self._value = raw
class LastOp(Counter):
def __init__(self, *args, **kwargs):
super(LastOp, self).__init__(*args, **kwargs)
def append(self, raw):
self._value = raw
class NumericCounter(Counter):
def __init__(self, *args, **kwargs):
super(NumericCounter, self).__init__(*args, **kwargs)
def append(self, raw):
value = self._raw2number(raw)
if value is not None:
self._append_number(value)
def _raw2number(self, raw):
try:
return int(raw)
except ValueError:
try:
return float(raw)
except ValueError:
return None
def _append_number(self, value):
raise NotImplementedError('Please implement this method.')
class AvgOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(AvgOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is not None:
self._value = (self._value + value) / 2.0
else:
self._value = value
class MinOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(MinOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is None or value < self._value:
self._value = value
class MaxOp(NumericCounter):
def __init__(self, *args, **kwargs):
super(MaxOp, self).__init__(*args, **kwargs)
def _append_number(self, value):
if self._value is None or value > self._value:
self._value = value
_OPERATORS = {
'count': CountOp,
'hll': HLLOp,
'avg': AvgOp,
'min': MinOp,
'max': MaxOp,
'first': FirstOp,
'last': LastOp,
}
def instance(op):
items = op.split(',')
klass = _OPERATORS.get(items[0], None)
if klass is not None:
try:
return klass(*items[1:], op=op)
except:
pass
return None | 0.679604 | 0.160135 |
import numpy
from discrete_fuzzy_operators.base.operators.binary_operators.suboperators.fuzzy_aggregation_operator import \
DiscreteFuzzyAggregationBinaryOperator
from typing import Callable, Dict
class Uninorm(DiscreteFuzzyAggregationBinaryOperator):
def __init__(self, n: int, e: int,
operator_matrix: numpy.ndarray = None,
operator_expression: Callable[[int, int, int], int] = None,
operator_components: Dict[str, numpy.ndarray] = None):
"""
Initializes the object that represents a uninorm U: L x L -> L over a finite chain
L={0, 1, ..., n} from its matrix or its analytical expression.
Args:
n: An integer, representing the dimension of the space where the uninorm is defined.
operator_matrix: A two-dimensional matrix of integers, representing the images of the operator; that is,
in the row x and column y, the entry (x,y) represents the value of F(x, y).
operator_expression: A callable method with three parameters (x, y, n), which returns an integer value.
operator_components: A dictionary containing the components of the uninorms, which a t-norm in [0, e], a
t-conorm in [e, n], and two mappings in the compensation space
[0,e)x(e,n]U(e,n]x[0,e).
"""
if operator_matrix is None and operator_expression is None and operator_components is None:
raise Exception("To initialise a uninorm it is necessary to provide its matrix expression, a callable "
"method or its components.")
if not(operator_matrix is None or operator_expression is None):
super(Uninorm, self).__init__(n, operator_matrix, operator_expression)
self.e = e
else:
super(Uninorm, self).__init__(n, Uninorm.__generate_uninorm_matrix_from_components(n, e, operator_components))
self.e = e
if not(self.is_associative() and self.is_commutative() and self.checks_boundary_condition(element=self.e)):
raise Exception("With the input arguments, the generated operator is not a uninorm since not verifies "
"the associativity, the commutativity or the neutral element.")
@staticmethod
def __generate_uninorm_matrix_from_components(n: int, e: int, components: Dict[str, numpy.ndarray]) -> numpy.ndarray:
"""
Generates the uninorm matrix representation from the components that define a uninorm: a t-norm in [0,e], a
t-conorm in [e,n] and two mappings in the compensation space.
Args:
n: An integer, representing the dimension of the space where the uninorm is defined.
e: An integer, representing the neutral element.
components: A dictionary, which contains strings as keys and numpy arrays as values. Each pair key-value
represents a component of the uninorm.
Returns:
A numpy array, representing the matrix representation of the uninorm.
"""
uninorm_matrix = numpy.zeros(shape=(n+1, n+1), dtype=int)
if not("TNORM" in components and "TCONORM" in components and "CE_LEFT" in components and "CE_RIGHT" in components):
raise Exception("The dictionary with the components does not have a correct key structure. ")
tnorm_matrix = numpy.array(components["TNORM"])
tconorm_matrix = numpy.array(components["TCONORM"])
compensation_mapping_left_matrix = numpy.array(components["CE_LEFT"])
compensation_mapping_right_matrix = numpy.array(components["CE_RIGHT"])
# Verification of the shapes
if not(tnorm_matrix.shape == (e+1, e+1) and tconorm_matrix.shape == (n-e+1, n-e+1) and
compensation_mapping_left_matrix.shape == (n-e, e) and
compensation_mapping_right_matrix.shape == (e, n-e)):
raise Exception("The dimensions of the components of the uninorms are not correct for the initialization. "
"The t-norm matrix must be of size (e, e), the t-conorm matrix must be (n-e, n-e), the "
"left compensation mapping must be (n-e, e) and the right compensation mapping must be "
"(e, n-e).")
for x in range(0, e+1):
for y in range(0, e+1):
uninorm_matrix[y, x] = tnorm_matrix[y, x]
for x in range(e, n+1):
for y in range(e, n+1):
uninorm_matrix[y, x] = tconorm_matrix[y-e, x-e]
for x in range(0, e):
for y in range(e+1, n+1):
uninorm_matrix[y, x] = compensation_mapping_left_matrix[y-e-1, x]
for x in range(e+1, n+1):
for y in range(0, e):
uninorm_matrix[y, x] = compensation_mapping_right_matrix[y, x-e-1]
return uninorm_matrix | discrete_fuzzy_operators/base/operators/binary_operators/suboperators/fuzzy_aggregation_suboperators/uninorm.py | import numpy
from discrete_fuzzy_operators.base.operators.binary_operators.suboperators.fuzzy_aggregation_operator import \
DiscreteFuzzyAggregationBinaryOperator
from typing import Callable, Dict
class Uninorm(DiscreteFuzzyAggregationBinaryOperator):
def __init__(self, n: int, e: int,
operator_matrix: numpy.ndarray = None,
operator_expression: Callable[[int, int, int], int] = None,
operator_components: Dict[str, numpy.ndarray] = None):
"""
Initializes the object that represents a uninorm U: L x L -> L over a finite chain
L={0, 1, ..., n} from its matrix or its analytical expression.
Args:
n: An integer, representing the dimension of the space where the uninorm is defined.
operator_matrix: A two-dimensional matrix of integers, representing the images of the operator; that is,
in the row x and column y, the entry (x,y) represents the value of F(x, y).
operator_expression: A callable method with three parameters (x, y, n), which returns an integer value.
operator_components: A dictionary containing the components of the uninorms, which a t-norm in [0, e], a
t-conorm in [e, n], and two mappings in the compensation space
[0,e)x(e,n]U(e,n]x[0,e).
"""
if operator_matrix is None and operator_expression is None and operator_components is None:
raise Exception("To initialise a uninorm it is necessary to provide its matrix expression, a callable "
"method or its components.")
if not(operator_matrix is None or operator_expression is None):
super(Uninorm, self).__init__(n, operator_matrix, operator_expression)
self.e = e
else:
super(Uninorm, self).__init__(n, Uninorm.__generate_uninorm_matrix_from_components(n, e, operator_components))
self.e = e
if not(self.is_associative() and self.is_commutative() and self.checks_boundary_condition(element=self.e)):
raise Exception("With the input arguments, the generated operator is not a uninorm since not verifies "
"the associativity, the commutativity or the neutral element.")
@staticmethod
def __generate_uninorm_matrix_from_components(n: int, e: int, components: Dict[str, numpy.ndarray]) -> numpy.ndarray:
"""
Generates the uninorm matrix representation from the components that define a uninorm: a t-norm in [0,e], a
t-conorm in [e,n] and two mappings in the compensation space.
Args:
n: An integer, representing the dimension of the space where the uninorm is defined.
e: An integer, representing the neutral element.
components: A dictionary, which contains strings as keys and numpy arrays as values. Each pair key-value
represents a component of the uninorm.
Returns:
A numpy array, representing the matrix representation of the uninorm.
"""
uninorm_matrix = numpy.zeros(shape=(n+1, n+1), dtype=int)
if not("TNORM" in components and "TCONORM" in components and "CE_LEFT" in components and "CE_RIGHT" in components):
raise Exception("The dictionary with the components does not have a correct key structure. ")
tnorm_matrix = numpy.array(components["TNORM"])
tconorm_matrix = numpy.array(components["TCONORM"])
compensation_mapping_left_matrix = numpy.array(components["CE_LEFT"])
compensation_mapping_right_matrix = numpy.array(components["CE_RIGHT"])
# Verification of the shapes
if not(tnorm_matrix.shape == (e+1, e+1) and tconorm_matrix.shape == (n-e+1, n-e+1) and
compensation_mapping_left_matrix.shape == (n-e, e) and
compensation_mapping_right_matrix.shape == (e, n-e)):
raise Exception("The dimensions of the components of the uninorms are not correct for the initialization. "
"The t-norm matrix must be of size (e, e), the t-conorm matrix must be (n-e, n-e), the "
"left compensation mapping must be (n-e, e) and the right compensation mapping must be "
"(e, n-e).")
for x in range(0, e+1):
for y in range(0, e+1):
uninorm_matrix[y, x] = tnorm_matrix[y, x]
for x in range(e, n+1):
for y in range(e, n+1):
uninorm_matrix[y, x] = tconorm_matrix[y-e, x-e]
for x in range(0, e):
for y in range(e+1, n+1):
uninorm_matrix[y, x] = compensation_mapping_left_matrix[y-e-1, x]
for x in range(e+1, n+1):
for y in range(0, e):
uninorm_matrix[y, x] = compensation_mapping_right_matrix[y, x-e-1]
return uninorm_matrix | 0.899539 | 0.728555 |
from dataclasses import dataclass, is_dataclass, asdict
from typing import Callable, List, Union
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from ..EditorExt import Editor
ext.editor = Editor(None)
iop.hostedComp = COMP()
@dataclass
class ToolContext:
toolName: str
component: 'COMP'
editorPane: 'NetworkEditor'
@dataclass
class _ToolDefinition:
name: str
action: Callable[[ToolContext], None]
label: str = None
icon: str = None
@classmethod
def parse(cls, spec: Union['_ToolDefinition', list, tuple, dict]) -> '_ToolDefinition':
if isinstance(spec, _ToolDefinition):
return spec
if is_dataclass(spec):
spec = asdict(spec)
if isinstance(spec, dict):
return cls(**spec)
return cls(*spec)
class EditorTools:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
self.builtInTools = [] # type: List[_ToolDefinition]
self.customTools = [] # type: List[_ToolDefinition]
self.customToolsScript = self.ownerComp.op('customTools') # type: DAT
self.toolTable = self.ownerComp.op('set_tool_table') # type: DAT
self.initializeBuiltInTools()
self.updateToolTable()
def initializeBuiltInTools(self):
self.builtInTools = [
_ToolDefinition('saveComponent', lambda ctx: ext.editor.SaveComponent(), icon=chr(0xF193))
]
def updateToolTable(self):
self.toolTable.clear()
self.toolTable.appendRow(['name', 'label', 'icon', 'category'])
for tool in self.builtInTools:
self.toolTable.appendRow([
tool.name,
tool.label or tool.name,
tool.icon or '',
'builtIn'
])
for tool in self.customTools:
self.toolTable.appendRow([
tool.name,
tool.label or tool.name,
tool.icon or '',
'custom'
])
def ClearCustomTools(self):
self.customToolsScript.clear()
self.ownerComp.par.Customtoolscriptfile = ''
self.ownerComp.par.Customtoolscript = ''
self.updateToolTable()
def LoadCustomTools(self):
self.customTools.clear()
self.customToolsScript.clear()
file = self.ownerComp.par.Customtoolscriptfile.eval()
if file:
self.customToolsScript.par.file = file
self.customToolsScript.par.loadonstartpulse.pulse()
self.addCustomToolsFromScript(self.customToolsScript)
self.addCustomToolsFromScript(self.ownerComp.par.Customtoolscript.eval())
self.updateToolTable()
def addCustomToolsFromScript(self, script: 'DAT'):
if not script or not script.text:
return
try:
m = script.module
except Exception as e:
print(self.ownerComp, f'ERROR loading custom tools script: {e}')
return
if not hasattr(m, 'getEditorTools'):
print(self.ownerComp, 'ERROR: Custom tools script does not have `getEditorTools` function')
return
try:
specs = m.getEditorTools()
except Exception as e:
print(self.ownerComp, f'ERROR loading custom tools script: {e}')
return
if not specs:
return
for spec in specs:
try:
tool = _ToolDefinition.parse(spec)
except Exception as e:
print(self.ownerComp, f'ERROR parsing custom tool spec: {spec!r}\n{e}')
continue
self.customTools.append(tool)
def findTool(self, name: str):
# custom tools take precedence over built-in tools
for tool in self.customTools:
if tool.name == name:
return tool
for tool in self.builtInTools:
if tool.name == name:
return tool
def ExecuteTool(self, name: str):
tool = self.findTool(name)
if not tool:
raise Exception(f'Editor tool not found: {name}')
context = ToolContext(
name,
iop.hostedComp,
ext.editor.GetActiveNetworkEditor())
tool.action(context)
def OnWorkspaceLoad(self):
self.LoadCustomTools()
def OnWorkspaceUnload(self):
self.ClearCustomTools() | rack/editor/components/EditorToolsExt.py | from dataclasses import dataclass, is_dataclass, asdict
from typing import Callable, List, Union
# noinspection PyUnreachableCode
if False:
# noinspection PyUnresolvedReferences
from _stubs import *
from ..EditorExt import Editor
ext.editor = Editor(None)
iop.hostedComp = COMP()
@dataclass
class ToolContext:
toolName: str
component: 'COMP'
editorPane: 'NetworkEditor'
@dataclass
class _ToolDefinition:
name: str
action: Callable[[ToolContext], None]
label: str = None
icon: str = None
@classmethod
def parse(cls, spec: Union['_ToolDefinition', list, tuple, dict]) -> '_ToolDefinition':
if isinstance(spec, _ToolDefinition):
return spec
if is_dataclass(spec):
spec = asdict(spec)
if isinstance(spec, dict):
return cls(**spec)
return cls(*spec)
class EditorTools:
def __init__(self, ownerComp: 'COMP'):
self.ownerComp = ownerComp
self.builtInTools = [] # type: List[_ToolDefinition]
self.customTools = [] # type: List[_ToolDefinition]
self.customToolsScript = self.ownerComp.op('customTools') # type: DAT
self.toolTable = self.ownerComp.op('set_tool_table') # type: DAT
self.initializeBuiltInTools()
self.updateToolTable()
def initializeBuiltInTools(self):
self.builtInTools = [
_ToolDefinition('saveComponent', lambda ctx: ext.editor.SaveComponent(), icon=chr(0xF193))
]
def updateToolTable(self):
self.toolTable.clear()
self.toolTable.appendRow(['name', 'label', 'icon', 'category'])
for tool in self.builtInTools:
self.toolTable.appendRow([
tool.name,
tool.label or tool.name,
tool.icon or '',
'builtIn'
])
for tool in self.customTools:
self.toolTable.appendRow([
tool.name,
tool.label or tool.name,
tool.icon or '',
'custom'
])
def ClearCustomTools(self):
self.customToolsScript.clear()
self.ownerComp.par.Customtoolscriptfile = ''
self.ownerComp.par.Customtoolscript = ''
self.updateToolTable()
def LoadCustomTools(self):
self.customTools.clear()
self.customToolsScript.clear()
file = self.ownerComp.par.Customtoolscriptfile.eval()
if file:
self.customToolsScript.par.file = file
self.customToolsScript.par.loadonstartpulse.pulse()
self.addCustomToolsFromScript(self.customToolsScript)
self.addCustomToolsFromScript(self.ownerComp.par.Customtoolscript.eval())
self.updateToolTable()
def addCustomToolsFromScript(self, script: 'DAT'):
if not script or not script.text:
return
try:
m = script.module
except Exception as e:
print(self.ownerComp, f'ERROR loading custom tools script: {e}')
return
if not hasattr(m, 'getEditorTools'):
print(self.ownerComp, 'ERROR: Custom tools script does not have `getEditorTools` function')
return
try:
specs = m.getEditorTools()
except Exception as e:
print(self.ownerComp, f'ERROR loading custom tools script: {e}')
return
if not specs:
return
for spec in specs:
try:
tool = _ToolDefinition.parse(spec)
except Exception as e:
print(self.ownerComp, f'ERROR parsing custom tool spec: {spec!r}\n{e}')
continue
self.customTools.append(tool)
def findTool(self, name: str):
# custom tools take precedence over built-in tools
for tool in self.customTools:
if tool.name == name:
return tool
for tool in self.builtInTools:
if tool.name == name:
return tool
def ExecuteTool(self, name: str):
tool = self.findTool(name)
if not tool:
raise Exception(f'Editor tool not found: {name}')
context = ToolContext(
name,
iop.hostedComp,
ext.editor.GetActiveNetworkEditor())
tool.action(context)
def OnWorkspaceLoad(self):
self.LoadCustomTools()
def OnWorkspaceUnload(self):
self.ClearCustomTools() | 0.668447 | 0.085175 |
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from oslo_log import log as logging
import neutronclient.common.exceptions as neutron_exp
LOG = logging.getLogger(__name__)
class ProviderNetRange(neutron.NeutronResource):
"""A resource for managing WR Neutron Provider Network Range.
The WR Neutron Provider Network Range adds range capabilities to the
WR Neutron Provider Network resource.
"""
neutron_api_key = 'providernet_range'
PROPERTIES = (
PROVIDERNET_ID, NAME,
MINIMUM, MAXIMUM,
DESCRIPTION, SHARED,
TENANT_ID, GROUP,
TTL, PORT,
) = (
'providernet_id', 'name',
'minimum', 'maximum',
'description', 'shared',
'tenant_id', 'group',
'ttl', 'port',
)
ATTRIBUTES = (
NAME, DESCRIPTION, SHARED, MINIMUM, MAXIMUM,
) = (
'name', 'description', 'shared', 'minimum', 'maximum'
)
properties_schema = {
PROVIDERNET_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the existing provider network.'),
required=True,
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the provider network range.'),
required=True,
),
MINIMUM: properties.Schema(
properties.Schema.NUMBER,
_('Minimum value for the range for this provider network range.'),
required=True,
update_allowed=True,
),
MAXIMUM: properties.Schema(
properties.Schema.NUMBER,
_('Maximum value for the range for this provider network range.'),
required=True,
update_allowed=True,
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for this provider network range.'),
update_allowed=True,
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether provider network range is SHARED for all tenants.'),
default=False,
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant ID to assign to this range. '
'Note: Only applied if range is not SHARED.'),
constraints=[
constraints.CustomConstraint('keystone.project')
],
),
GROUP: properties.Schema(
properties.Schema.STRING,
_('Multicast IP addresses for VXLAN endpoints. '
'Note: Only applied if provider net is VXLAN.'),
update_allowed=True,
),
TTL: properties.Schema(
properties.Schema.NUMBER,
_('Time-to-live value for VXLAN provider networks. '
'Note: Only applied if provider net is VXLAN.'),
update_allowed=True,
),
PORT: properties.Schema(
properties.Schema.NUMBER,
_('Destination UDP port value to use for VXLAN provider networks. '
'Note: Only valid values are 4789 or 8472. '
'Note: Only applied if provider net is VXLAN. Default: 4789.'),
update_allowed=True,
constraints=[
constraints.AllowedValues([4789, 8472]),
],
),
}
# Base class already has "show"
attributes_schema = {
NAME: attributes.Schema(
_("The name of the provider network range."),
type=attributes.Schema.STRING
),
DESCRIPTION: attributes.Schema(
_("The description of the provider network range."),
type=attributes.Schema.STRING
),
MAXIMUM: attributes.Schema(
_('Maximum value for the range for this provider network range.'),
type=attributes.Schema.NUMBER
),
MINIMUM: attributes.Schema(
_('Minimum value for the range for this provider network range.'),
type=attributes.Schema.NUMBER
),
SHARED: attributes.Schema(
_('Whether this provider network range is shared or not.'),
type=attributes.Schema.BOOLEAN
),
}
def validate(self):
super(ProviderNetRange, self).validate()
def prepare_properties(self, properties, name):
props = super(ProviderNetRange, self).prepare_properties(properties,
name)
tenant = props.get(self.TENANT_ID)
if tenant:
# keystone project-list is the same as openstack tenant list"
tenant_id = self.client_plugin('keystone').get_project_id(tenant)
props[self.TENANT_ID] = tenant_id
return props
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
neutron_object = self.client().create_providernet_range(
{self.neutron_api_key: props})[self.neutron_api_key]
self.resource_id_set(neutron_object['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_providernet_range(
self.resource_id,
{self.neutron_api_key: prop_diff})
def _show_resource(self):
return self.client().show_providernet_range(
self.resource_id)[self.neutron_api_key]
def handle_delete(self):
if self.resource_id is None:
return
try:
self.client().delete_providernet_range(self.resource_id)
except neutron_exp.NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def resource_mapping():
return {
'WR::Neutron::ProviderNetRange': ProviderNetRange,
} | heat/engine/resources/wr/neutron_provider_net_range.py |
from heat.common.i18n import _
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
from heat.engine.resources.openstack.neutron import neutron
from oslo_log import log as logging
import neutronclient.common.exceptions as neutron_exp
LOG = logging.getLogger(__name__)
class ProviderNetRange(neutron.NeutronResource):
"""A resource for managing WR Neutron Provider Network Range.
The WR Neutron Provider Network Range adds range capabilities to the
WR Neutron Provider Network resource.
"""
neutron_api_key = 'providernet_range'
PROPERTIES = (
PROVIDERNET_ID, NAME,
MINIMUM, MAXIMUM,
DESCRIPTION, SHARED,
TENANT_ID, GROUP,
TTL, PORT,
) = (
'providernet_id', 'name',
'minimum', 'maximum',
'description', 'shared',
'tenant_id', 'group',
'ttl', 'port',
)
ATTRIBUTES = (
NAME, DESCRIPTION, SHARED, MINIMUM, MAXIMUM,
) = (
'name', 'description', 'shared', 'minimum', 'maximum'
)
properties_schema = {
PROVIDERNET_ID: properties.Schema(
properties.Schema.STRING,
_('ID of the existing provider network.'),
required=True,
),
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the provider network range.'),
required=True,
),
MINIMUM: properties.Schema(
properties.Schema.NUMBER,
_('Minimum value for the range for this provider network range.'),
required=True,
update_allowed=True,
),
MAXIMUM: properties.Schema(
properties.Schema.NUMBER,
_('Maximum value for the range for this provider network range.'),
required=True,
update_allowed=True,
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description for this provider network range.'),
update_allowed=True,
),
SHARED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether provider network range is SHARED for all tenants.'),
default=False,
),
TENANT_ID: properties.Schema(
properties.Schema.STRING,
_('Tenant ID to assign to this range. '
'Note: Only applied if range is not SHARED.'),
constraints=[
constraints.CustomConstraint('keystone.project')
],
),
GROUP: properties.Schema(
properties.Schema.STRING,
_('Multicast IP addresses for VXLAN endpoints. '
'Note: Only applied if provider net is VXLAN.'),
update_allowed=True,
),
TTL: properties.Schema(
properties.Schema.NUMBER,
_('Time-to-live value for VXLAN provider networks. '
'Note: Only applied if provider net is VXLAN.'),
update_allowed=True,
),
PORT: properties.Schema(
properties.Schema.NUMBER,
_('Destination UDP port value to use for VXLAN provider networks. '
'Note: Only valid values are 4789 or 8472. '
'Note: Only applied if provider net is VXLAN. Default: 4789.'),
update_allowed=True,
constraints=[
constraints.AllowedValues([4789, 8472]),
],
),
}
# Base class already has "show"
attributes_schema = {
NAME: attributes.Schema(
_("The name of the provider network range."),
type=attributes.Schema.STRING
),
DESCRIPTION: attributes.Schema(
_("The description of the provider network range."),
type=attributes.Schema.STRING
),
MAXIMUM: attributes.Schema(
_('Maximum value for the range for this provider network range.'),
type=attributes.Schema.NUMBER
),
MINIMUM: attributes.Schema(
_('Minimum value for the range for this provider network range.'),
type=attributes.Schema.NUMBER
),
SHARED: attributes.Schema(
_('Whether this provider network range is shared or not.'),
type=attributes.Schema.BOOLEAN
),
}
def validate(self):
super(ProviderNetRange, self).validate()
def prepare_properties(self, properties, name):
props = super(ProviderNetRange, self).prepare_properties(properties,
name)
tenant = props.get(self.TENANT_ID)
if tenant:
# keystone project-list is the same as openstack tenant list"
tenant_id = self.client_plugin('keystone').get_project_id(tenant)
props[self.TENANT_ID] = tenant_id
return props
def handle_create(self):
props = self.prepare_properties(
self.properties,
self.physical_resource_name())
neutron_object = self.client().create_providernet_range(
{self.neutron_api_key: props})[self.neutron_api_key]
self.resource_id_set(neutron_object['id'])
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
if prop_diff:
self.prepare_update_properties(prop_diff)
self.client().update_providernet_range(
self.resource_id,
{self.neutron_api_key: prop_diff})
def _show_resource(self):
return self.client().show_providernet_range(
self.resource_id)[self.neutron_api_key]
def handle_delete(self):
if self.resource_id is None:
return
try:
self.client().delete_providernet_range(self.resource_id)
except neutron_exp.NeutronClientException as ex:
self.client_plugin().ignore_not_found(ex)
else:
return True
def resource_mapping():
return {
'WR::Neutron::ProviderNetRange': ProviderNetRange,
} | 0.713132 | 0.192881 |
from flask_restful import Resource, marshal, fields, reqparse
from app import auth
from app.models import PatientMeal
# Viewed from patient details
meal_by_patient_fields = {
'id': fields.Integer,
'label': fields.String,
'quantity': fields.Integer,
'uri': fields.Url('meal_list_by_patient'),
'meal': fields.Url('meal'),
'requirements': fields.Url('requirement_list_by_patient'),
'patient_meal': fields.Url('patient_meal')
}
# Viewed from meal details
patient_by_meal_fields = {
'patient_name': fields.String,
'quantity': fields.Integer,
'uri': fields.Url('patient_list_by_meal'),
'patient': fields.Url('patient'),
'patient_meal': fields.Url('patient_meal')
}
class MealListByPatient(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('meal_id', type=int, required=True,
help='No meal provided.', location='json')
self.reqparse.add_argument('quantity', type=int, required=True,
help='Quantity must be no lower than 1', location='json')
super(MealListByPatient, self).__init__()
def get(self, id):
patient_meals = PatientMeal.query.filter_by(patient_id=id).all()
meal_list = [{
'id': patient_meal.id,
'patient_id': patient_meal.patient_id,
'meal_id': patient_meal.meal_id,
'label': patient_meal.meal.label,
'quantity': patient_meal.quantity
} for patient_meal in patient_meals]
return {'meals': marshal([meal for meal in meal_list], meal_by_patient_fields)}
class PatientListByMeal(Resource):
decorators = [auth.login_required]
def get(self, id):
patient_meals = PatientMeal.query.filter_by(meal_id=id).all()
patient_list = [{
'id': patient_meal.id,
'patient_id': patient_meal.patient_id,
'meal_id': patient_meal.meal_id,
'patient_name': patient_meal.patient.first_name + ' ' + patient_meal.patient.last_name,
'quantity': patient_meal.quantity
} for patient_meal in patient_meals]
return {'patients': marshal([meal for meal in patient_list], patient_by_meal_fields)} | app/resources/patient_meal.py | from flask_restful import Resource, marshal, fields, reqparse
from app import auth
from app.models import PatientMeal
# Viewed from patient details
meal_by_patient_fields = {
'id': fields.Integer,
'label': fields.String,
'quantity': fields.Integer,
'uri': fields.Url('meal_list_by_patient'),
'meal': fields.Url('meal'),
'requirements': fields.Url('requirement_list_by_patient'),
'patient_meal': fields.Url('patient_meal')
}
# Viewed from meal details
patient_by_meal_fields = {
'patient_name': fields.String,
'quantity': fields.Integer,
'uri': fields.Url('patient_list_by_meal'),
'patient': fields.Url('patient'),
'patient_meal': fields.Url('patient_meal')
}
class MealListByPatient(Resource):
decorators = [auth.login_required]
def __init__(self):
self.reqparse = reqparse.RequestParser()
self.reqparse.add_argument('meal_id', type=int, required=True,
help='No meal provided.', location='json')
self.reqparse.add_argument('quantity', type=int, required=True,
help='Quantity must be no lower than 1', location='json')
super(MealListByPatient, self).__init__()
def get(self, id):
patient_meals = PatientMeal.query.filter_by(patient_id=id).all()
meal_list = [{
'id': patient_meal.id,
'patient_id': patient_meal.patient_id,
'meal_id': patient_meal.meal_id,
'label': patient_meal.meal.label,
'quantity': patient_meal.quantity
} for patient_meal in patient_meals]
return {'meals': marshal([meal for meal in meal_list], meal_by_patient_fields)}
class PatientListByMeal(Resource):
decorators = [auth.login_required]
def get(self, id):
patient_meals = PatientMeal.query.filter_by(meal_id=id).all()
patient_list = [{
'id': patient_meal.id,
'patient_id': patient_meal.patient_id,
'meal_id': patient_meal.meal_id,
'patient_name': patient_meal.patient.first_name + ' ' + patient_meal.patient.last_name,
'quantity': patient_meal.quantity
} for patient_meal in patient_meals]
return {'patients': marshal([meal for meal in patient_list], patient_by_meal_fields)} | 0.589953 | 0.113629 |
import dectate
import morepath
from morepath.error import ConflictError
import pytest
from webtest import TestApp as Client
def test_settings_property():
class App(morepath.App):
pass
@App.setting("foo", "bar")
def get_foo_setting():
return "bar"
dectate.commit(App)
app = App()
assert app.settings is app.config.setting_registry
def test_app_extends_settings():
class alpha(morepath.App):
pass
class beta(alpha):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "bar")
def get_bar_setting():
return "BAR"
dectate.commit(alpha, beta)
alpha_inst = alpha()
settings = alpha_inst.config.setting_registry
assert settings.one.foo == "FOO"
with pytest.raises(AttributeError):
settings.one.bar
beta_inst = beta()
settings = beta_inst.config.setting_registry
assert settings.one.foo == "FOO"
assert settings.one.bar == "BAR"
def test_app_overrides_settings():
class alpha(morepath.App):
pass
class beta(alpha):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "foo")
def get_bar_setting():
return "OVERRIDE"
dectate.commit(alpha, beta)
assert alpha().config.setting_registry.one.foo == "FOO"
assert beta().config.setting_registry.one.foo == "OVERRIDE"
def test_app_overrides_settings_three():
class alpha(morepath.App):
pass
class beta(alpha):
pass
class gamma(beta):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "foo")
def get_bar_setting():
return "OVERRIDE"
dectate.commit(alpha, beta, gamma)
assert gamma().config.setting_registry.one.foo == "OVERRIDE"
def test_app_section_settings():
class app(morepath.App):
pass
@app.setting_section("one")
def settings():
return {"foo": "FOO", "bar": "BAR"}
dectate.commit(app)
app_inst = app()
s = app_inst.config.setting_registry
assert s.one.foo == "FOO"
assert s.one.bar == "BAR"
def test_app_section_settings_conflict():
class app(morepath.App):
pass
@app.setting_section("one")
def settings():
return {"foo": "FOO", "bar": "BAR"}
@app.setting("one", "foo")
def get_foo():
return "another"
with pytest.raises(ConflictError):
dectate.commit(app)
def test_settings_property_in_view():
class app(morepath.App):
pass
@app.setting("section", "name")
def setting():
return "LAH"
@app.path(path="")
class Model(object):
def __init__(self):
pass
@app.view(model=Model)
def default(self, request):
return request.app.settings.section.name
c = Client(app())
response = c.get("/")
assert response.body == b"LAH" | morepath/tests/test_setting_directive.py | import dectate
import morepath
from morepath.error import ConflictError
import pytest
from webtest import TestApp as Client
def test_settings_property():
class App(morepath.App):
pass
@App.setting("foo", "bar")
def get_foo_setting():
return "bar"
dectate.commit(App)
app = App()
assert app.settings is app.config.setting_registry
def test_app_extends_settings():
class alpha(morepath.App):
pass
class beta(alpha):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "bar")
def get_bar_setting():
return "BAR"
dectate.commit(alpha, beta)
alpha_inst = alpha()
settings = alpha_inst.config.setting_registry
assert settings.one.foo == "FOO"
with pytest.raises(AttributeError):
settings.one.bar
beta_inst = beta()
settings = beta_inst.config.setting_registry
assert settings.one.foo == "FOO"
assert settings.one.bar == "BAR"
def test_app_overrides_settings():
class alpha(morepath.App):
pass
class beta(alpha):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "foo")
def get_bar_setting():
return "OVERRIDE"
dectate.commit(alpha, beta)
assert alpha().config.setting_registry.one.foo == "FOO"
assert beta().config.setting_registry.one.foo == "OVERRIDE"
def test_app_overrides_settings_three():
class alpha(morepath.App):
pass
class beta(alpha):
pass
class gamma(beta):
pass
@alpha.setting("one", "foo")
def get_foo_setting():
return "FOO"
@beta.setting("one", "foo")
def get_bar_setting():
return "OVERRIDE"
dectate.commit(alpha, beta, gamma)
assert gamma().config.setting_registry.one.foo == "OVERRIDE"
def test_app_section_settings():
class app(morepath.App):
pass
@app.setting_section("one")
def settings():
return {"foo": "FOO", "bar": "BAR"}
dectate.commit(app)
app_inst = app()
s = app_inst.config.setting_registry
assert s.one.foo == "FOO"
assert s.one.bar == "BAR"
def test_app_section_settings_conflict():
class app(morepath.App):
pass
@app.setting_section("one")
def settings():
return {"foo": "FOO", "bar": "BAR"}
@app.setting("one", "foo")
def get_foo():
return "another"
with pytest.raises(ConflictError):
dectate.commit(app)
def test_settings_property_in_view():
class app(morepath.App):
pass
@app.setting("section", "name")
def setting():
return "LAH"
@app.path(path="")
class Model(object):
def __init__(self):
pass
@app.view(model=Model)
def default(self, request):
return request.app.settings.section.name
c = Client(app())
response = c.get("/")
assert response.body == b"LAH" | 0.683208 | 0.311505 |
from __future__ import absolute_import, division, print_function
import os
import re
from collections import OrderedDict
from ..defs import (task_name_sep, task_state_to_int, task_int_to_state)
from ...util import option_list
from ...io import findfile
from .base import (BaseTask, task_classes)
from desiutil.log import get_logger
import numpy as np
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskPreproc(BaseTask):
"""Class containing the properties of one preprocessed pixel file.
"""
def __init__(self):
# do that first
super(TaskPreproc, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "preproc"
self._cols = [
"night",
"band",
"spec",
"expid",
"flavor",
"state"
]
self._coltypes = [
"integer",
"text",
"integer",
"integer",
"text",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["night","band","spec","expid"]
self._name_formats = ["08d","s","d","08d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
return [ findfile("preproc", night=props["night"], expid=props["expid"],
camera=camera, groupname=None, nside=None, band=props["band"],
spectrograph=props["spec"]) ]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
from .base import task_classes
props = self.name_split(name)
deptasks = {
"fibermap" : task_classes["fibermap"].name_join(props),
"rawdata" : task_classes["rawdata"].name_join(props)
}
return deptasks
def _run_max_procs(self, procs_per_node):
"""See BaseTask.run_max_procs.
"""
return 1
def _run_max_mem(self):
return 7.0
def _run_time(self, name, procs_per_node, db=None):
"""See BaseTask.run_time.
"""
return 5
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
return dict()
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
from .base import task_classes, task_type
dp = self.deps(name)
options = OrderedDict()
options.update(opts)
props = self.name_split(name)
options["infile"] = task_classes["rawdata"].paths(dp["rawdata"])[0]
options["cameras"] = "{}{}".format(props["band"],props["spec"])
outfile = self.paths(name)[0]
options["outfile"] = outfile
return option_list(options)
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
entry = "desi_preproc"
optlist = self._option_list(name, opts)
com = "{} {}".format(entry, " ".join(optlist))
return com
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
from ...scripts import preproc
optlist = self._option_list(name, opts)
args = preproc.parse(optlist)
preproc.main(args)
return
def postprocessing(self, db, name, cur):
"""For successful runs, postprocessing on DB"""
# run getready for all extraction with same night,band,spec
props = self.name_split(name)
log = get_logger()
tt = "psf"
cmd = "select name from {} where night={} and band='{}' and spec={} and expid={} and state=0".format(tt,props["night"],props["band"],props["spec"],props["expid"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks:
task_classes[tt].getready(db=db, name=task, cur=cur)
tt = "traceshift"
cmd = "select name from {} where night={} and band='{}' and spec={} and expid={} and state=0".format(tt,props["night"],props["band"],props["spec"],props["expid"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks:
task_classes[tt].getready(db=db, name=task, cur=cur) | py/desispec/pipeline/tasks/preproc.py |
from __future__ import absolute_import, division, print_function
import os
import re
from collections import OrderedDict
from ..defs import (task_name_sep, task_state_to_int, task_int_to_state)
from ...util import option_list
from ...io import findfile
from .base import (BaseTask, task_classes)
from desiutil.log import get_logger
import numpy as np
# NOTE: only one class in this file should have a name that starts with "Task".
class TaskPreproc(BaseTask):
"""Class containing the properties of one preprocessed pixel file.
"""
def __init__(self):
# do that first
super(TaskPreproc, self).__init__()
# then put int the specifics of this class
# _cols must have a state
self._type = "preproc"
self._cols = [
"night",
"band",
"spec",
"expid",
"flavor",
"state"
]
self._coltypes = [
"integer",
"text",
"integer",
"integer",
"text",
"integer"
]
# _name_fields must also be in _cols
self._name_fields = ["night","band","spec","expid"]
self._name_formats = ["08d","s","d","08d"]
def _paths(self, name):
"""See BaseTask.paths.
"""
props = self.name_split(name)
camera = "{}{}".format(props["band"], props["spec"])
return [ findfile("preproc", night=props["night"], expid=props["expid"],
camera=camera, groupname=None, nside=None, band=props["band"],
spectrograph=props["spec"]) ]
def _deps(self, name, db, inputs):
"""See BaseTask.deps.
"""
from .base import task_classes
props = self.name_split(name)
deptasks = {
"fibermap" : task_classes["fibermap"].name_join(props),
"rawdata" : task_classes["rawdata"].name_join(props)
}
return deptasks
def _run_max_procs(self, procs_per_node):
"""See BaseTask.run_max_procs.
"""
return 1
def _run_max_mem(self):
return 7.0
def _run_time(self, name, procs_per_node, db=None):
"""See BaseTask.run_time.
"""
return 5
def _run_defaults(self):
"""See BaseTask.run_defaults.
"""
return dict()
def _option_list(self, name, opts):
"""Build the full list of options.
This includes appending the filenames and incorporating runtime
options.
"""
from .base import task_classes, task_type
dp = self.deps(name)
options = OrderedDict()
options.update(opts)
props = self.name_split(name)
options["infile"] = task_classes["rawdata"].paths(dp["rawdata"])[0]
options["cameras"] = "{}{}".format(props["band"],props["spec"])
outfile = self.paths(name)[0]
options["outfile"] = outfile
return option_list(options)
def _run_cli(self, name, opts, procs, db):
"""See BaseTask.run_cli.
"""
entry = "desi_preproc"
optlist = self._option_list(name, opts)
com = "{} {}".format(entry, " ".join(optlist))
return com
def _run(self, name, opts, comm, db):
"""See BaseTask.run.
"""
from ...scripts import preproc
optlist = self._option_list(name, opts)
args = preproc.parse(optlist)
preproc.main(args)
return
def postprocessing(self, db, name, cur):
"""For successful runs, postprocessing on DB"""
# run getready for all extraction with same night,band,spec
props = self.name_split(name)
log = get_logger()
tt = "psf"
cmd = "select name from {} where night={} and band='{}' and spec={} and expid={} and state=0".format(tt,props["night"],props["band"],props["spec"],props["expid"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks:
task_classes[tt].getready(db=db, name=task, cur=cur)
tt = "traceshift"
cmd = "select name from {} where night={} and band='{}' and spec={} and expid={} and state=0".format(tt,props["night"],props["band"],props["spec"],props["expid"])
cur.execute(cmd)
tasks = [ x for (x,) in cur.fetchall() ]
log.debug("checking {}".format(tasks))
for task in tasks:
task_classes[tt].getready(db=db, name=task, cur=cur) | 0.495606 | 0.139162 |
import numpy as np
#This class contains all the default parameters for DynaPhoPy
class Parameters:
def __init__(self,
#General
silent=False,
#Projections
reduced_q_vector=(0, 0, 0), # default reduced wave vector
#Maximum Entropy Method
number_of_coefficients_mem=1000,
mem_scan_range=np.array(np.linspace(40, 2000, 100), dtype=int),
#Correlation Method
correlation_function_step=10,
integration_method = 1, # 0: Trapezoid 1:Rectangles
#Power spectra
# 0: Correlation functions parallel (OpenMP) [Recommended]
# 1: Maximum Entropy Method parallel (OpenMP) [Recommended]
power_spectra_algorithm=1,
frequency_range=np.linspace(0, 40, 500),
#Phonon dispersion diagram
use_NAC = False,
band_ranges=([[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5]]]),
number_of_bins_histogram = 50
):
self._silent = silent
self._number_of_coefficients_mem=number_of_coefficients_mem
self._mem_scan_range=mem_scan_range
self._correlation_function_step = correlation_function_step
self._integration_method = integration_method
self._power_spectra_algorithm = power_spectra_algorithm
self._frequency_range = frequency_range
self._reduced_q_vector = reduced_q_vector
self._use_NAC = use_NAC
self._band_ranges = band_ranges
self._number_of_bins_histogram = number_of_bins_histogram
#Properties
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, silent):
self._silent = silent
@property
def reduced_q_vector(self):
return self._reduced_q_vector
@reduced_q_vector.setter
def reduced_q_vector(self,reduced_q_vector):
self._reduced_q_vector = reduced_q_vector
@property
def number_of_coefficients_mem(self):
return self._number_of_coefficients_mem
@number_of_coefficients_mem.setter
def number_of_coefficients_mem(self,number_of_coefficients_mem):
self._number_of_coefficients_mem = number_of_coefficients_mem
@property
def mem_scan_range(self):
return self._mem_scan_range
@mem_scan_range.setter
def mem_scan_range(self,mem_scan_range):
self._mem_scan_range = mem_scan_range
@property
def correlation_function_step(self):
return self._correlation_function_step
@correlation_function_step.setter
def correlation_function_step(self,correlation_function_step):
self._correlation_function_step = correlation_function_step
@property
def integration_method(self):
return self._integration_method
@integration_method.setter
def integration_method(self,integration_method):
self._integration_method = integration_method
@property
def frequency_range(self):
return self._frequency_range
@frequency_range.setter
def frequency_range(self,frequency_range):
self._frequency_range = frequency_range
@property
def power_spectra_algorithm(self):
return self._power_spectra_algorithm
@power_spectra_algorithm.setter
def power_spectra_algorithm(self,power_spectra_algorithm):
self._power_spectra_algorithm = power_spectra_algorithm
@property
def use_NAC(self):
return self._use_NAC
@use_NAC.setter
def use_NAC(self,use_NAC):
self._use_NAC = use_NAC
@property
def band_ranges(self):
return self._band_ranges
@band_ranges.setter
def band_ranges(self,band_ranges):
self._band_ranges = band_ranges
@property
def number_of_bins_histogram(self):
return self._number_of_bins_histogram
@number_of_bins_histogram.setter
def number_of_bins_histogram(self, number_of_bins_histogram):
self._number_of_bins_histogram = number_of_bins_histogram | dynaphopy/classes/parameters.py | import numpy as np
#This class contains all the default parameters for DynaPhoPy
class Parameters:
def __init__(self,
#General
silent=False,
#Projections
reduced_q_vector=(0, 0, 0), # default reduced wave vector
#Maximum Entropy Method
number_of_coefficients_mem=1000,
mem_scan_range=np.array(np.linspace(40, 2000, 100), dtype=int),
#Correlation Method
correlation_function_step=10,
integration_method = 1, # 0: Trapezoid 1:Rectangles
#Power spectra
# 0: Correlation functions parallel (OpenMP) [Recommended]
# 1: Maximum Entropy Method parallel (OpenMP) [Recommended]
power_spectra_algorithm=1,
frequency_range=np.linspace(0, 40, 500),
#Phonon dispersion diagram
use_NAC = False,
band_ranges=([[[0.0, 0.0, 0.0], [0.5, 0.0, 0.5]]]),
number_of_bins_histogram = 50
):
self._silent = silent
self._number_of_coefficients_mem=number_of_coefficients_mem
self._mem_scan_range=mem_scan_range
self._correlation_function_step = correlation_function_step
self._integration_method = integration_method
self._power_spectra_algorithm = power_spectra_algorithm
self._frequency_range = frequency_range
self._reduced_q_vector = reduced_q_vector
self._use_NAC = use_NAC
self._band_ranges = band_ranges
self._number_of_bins_histogram = number_of_bins_histogram
#Properties
@property
def silent(self):
return self._silent
@silent.setter
def silent(self, silent):
self._silent = silent
@property
def reduced_q_vector(self):
return self._reduced_q_vector
@reduced_q_vector.setter
def reduced_q_vector(self,reduced_q_vector):
self._reduced_q_vector = reduced_q_vector
@property
def number_of_coefficients_mem(self):
return self._number_of_coefficients_mem
@number_of_coefficients_mem.setter
def number_of_coefficients_mem(self,number_of_coefficients_mem):
self._number_of_coefficients_mem = number_of_coefficients_mem
@property
def mem_scan_range(self):
return self._mem_scan_range
@mem_scan_range.setter
def mem_scan_range(self,mem_scan_range):
self._mem_scan_range = mem_scan_range
@property
def correlation_function_step(self):
return self._correlation_function_step
@correlation_function_step.setter
def correlation_function_step(self,correlation_function_step):
self._correlation_function_step = correlation_function_step
@property
def integration_method(self):
return self._integration_method
@integration_method.setter
def integration_method(self,integration_method):
self._integration_method = integration_method
@property
def frequency_range(self):
return self._frequency_range
@frequency_range.setter
def frequency_range(self,frequency_range):
self._frequency_range = frequency_range
@property
def power_spectra_algorithm(self):
return self._power_spectra_algorithm
@power_spectra_algorithm.setter
def power_spectra_algorithm(self,power_spectra_algorithm):
self._power_spectra_algorithm = power_spectra_algorithm
@property
def use_NAC(self):
return self._use_NAC
@use_NAC.setter
def use_NAC(self,use_NAC):
self._use_NAC = use_NAC
@property
def band_ranges(self):
return self._band_ranges
@band_ranges.setter
def band_ranges(self,band_ranges):
self._band_ranges = band_ranges
@property
def number_of_bins_histogram(self):
return self._number_of_bins_histogram
@number_of_bins_histogram.setter
def number_of_bins_histogram(self, number_of_bins_histogram):
self._number_of_bins_histogram = number_of_bins_histogram | 0.886224 | 0.518302 |
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
#response.title = ' '.join(word.capitalize() for word in request.application.split('_'))
#response.subtitle = T('Select Services')
## read more at http://dev.w3.org/html5/markup/meta.name.html
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
add_shop = [
(T('С чего начать?'), True, URL('add','start')),
(T('Создать счёт на оплату'), True, URL('bill','simple')),
(T('Подать заявку на подключение'), True, URL('add','index')),
(T('Платежные модули и плагины'), True, URL('default','plugins')),
(T('Подключенные магазины'), True, URL('shops','list')),
(T('Тестировать уведомления'), True, URL('add','note_test')),
]
import common
if not common.not_is_local(): add_shop.append((B(T('Принять магазин')), True, URL('add','accept')))
response.menu = [
(CAT(H4(T('Зачем')),T('это нужно?')), URL('default','home')==URL(), URL('default','home'), []),
(CAT(H4(T('Как')),T('начать?')), URL('add','start')==URL(), URL('add','start'), []), # add_shop ),
(CAT(H4(T('Кто уже')),T('использует?')), URL('shops','list')==URL(), URL('shops','list')),
(CAT(H4(T('Что мы')),T('принимаем?')), URL('default','currs')==URL(), URL('default','currs'), []),
(CAT(H4(T('Почём')), T('обслуживаем?')), URL('shops','prices')==URL(), URL('shops','prices')),
]
response.menu_foot = [
(B(T('Стартап')), True, URL('default','startup'), []),
(T('API'), True, URL('default','api_docs'), []),
(T('Как заработать?'), True, URL('dealers','index'), []),
(T('Вакансии'), True, URL('default','vacs'), []),
(T('Почему?'), True, URL('why','index'), []),
(T('Когда?'), True, URL('news','index'), []),
] | models/menu.py |
#########################################################################
## Customize your APP title, subtitle and menus here
#########################################################################
#response.title = ' '.join(word.capitalize() for word in request.application.split('_'))
#response.subtitle = T('Select Services')
## read more at http://dev.w3.org/html5/markup/meta.name.html
## your http://google.com/analytics id
response.google_analytics_id = None
#########################################################################
## this is the main application menu add/remove items as required
#########################################################################
add_shop = [
(T('С чего начать?'), True, URL('add','start')),
(T('Создать счёт на оплату'), True, URL('bill','simple')),
(T('Подать заявку на подключение'), True, URL('add','index')),
(T('Платежные модули и плагины'), True, URL('default','plugins')),
(T('Подключенные магазины'), True, URL('shops','list')),
(T('Тестировать уведомления'), True, URL('add','note_test')),
]
import common
if not common.not_is_local(): add_shop.append((B(T('Принять магазин')), True, URL('add','accept')))
response.menu = [
(CAT(H4(T('Зачем')),T('это нужно?')), URL('default','home')==URL(), URL('default','home'), []),
(CAT(H4(T('Как')),T('начать?')), URL('add','start')==URL(), URL('add','start'), []), # add_shop ),
(CAT(H4(T('Кто уже')),T('использует?')), URL('shops','list')==URL(), URL('shops','list')),
(CAT(H4(T('Что мы')),T('принимаем?')), URL('default','currs')==URL(), URL('default','currs'), []),
(CAT(H4(T('Почём')), T('обслуживаем?')), URL('shops','prices')==URL(), URL('shops','prices')),
]
response.menu_foot = [
(B(T('Стартап')), True, URL('default','startup'), []),
(T('API'), True, URL('default','api_docs'), []),
(T('Как заработать?'), True, URL('dealers','index'), []),
(T('Вакансии'), True, URL('default','vacs'), []),
(T('Почему?'), True, URL('why','index'), []),
(T('Когда?'), True, URL('news','index'), []),
] | 0.172381 | 0.127816 |
from __future__ import annotations
import tempfile
from pathlib import Path
from typing import List, Set, Dict, Generator, Tuple
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from pybedtools import BedTool
from genomics_data_index.storage.model import NUCLEOTIDE_UNKNOWN, NUCLEOTIDE_UNKNOWN_TYPE
class MaskedGenomicRegions:
def __init__(self, mask: BedTool):
self._mask = mask.sort().merge()
@property
def mask(self):
return self._mask
def intersect(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
return MaskedGenomicRegions(self._mask.intersect(other._mask))
def subtract(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
subtraction = self._mask.subtract(other._mask)
return MaskedGenomicRegions(subtraction)
def union(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
union = self._mask.cat(other._mask, postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
def mask_to_features(self) -> pd.DataFrame:
mask_features = []
ref = 1
alt = NUCLEOTIDE_UNKNOWN
nuc_type = NUCLEOTIDE_UNKNOWN_TYPE
for sequence_name, position in self.positions_iter(start_position_index='1'):
variant_id = f'{sequence_name}:{position}:{ref}:{alt}'
mask_features.append([sequence_name, position, ref, alt, nuc_type, variant_id])
return pd.DataFrame(mask_features, columns=['CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'VARIANT_ID'])
def mask_genome(self, genome_file: Path, mask_char: str = '?', remove: bool = True) -> Dict[str, SeqRecord]:
"""
Gets a SeqRecord with all those regions on the passed genome that are in the masked regions removed
(or masked with mask_char).
:param genome_file: The genome file to mask.
:param mask_char: The character to mask with.
:param remove: Whether or not to remove masked sequence data.
:return: A Dictionary mapping a sequence name to a SeqRecord containing all those regions on the sequence
within the masked regions removed (or masked with mask_char)
"""
with tempfile.TemporaryDirectory() as out_f:
seq_records = {}
output_fasta = Path(out_f) / 'masked.fasta'
self._mask.mask_fasta(fi=str(genome_file), fo=str(output_fasta), mc=mask_char)
for record in SeqIO.parse(output_fasta, 'fasta'):
if remove:
record.seq = record.seq.ungap(mask_char)
seq_records[record.id] = record
return seq_records
def write(self, file: Path):
self._mask.saveas(str(file), compressed=True)
@classmethod
def union_all(cls, masked_regions: List[MaskedGenomicRegions]):
if len(masked_regions) == 0:
raise Exception('Cannot merge empty list')
elif len(masked_regions) == 1:
return masked_regions[0]
else:
start_mask = masked_regions.pop()
union = start_mask._mask.cat(*[o._mask for o in masked_regions], postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
@classmethod
def from_sequences(cls, sequences: List[SeqRecord]) -> MaskedGenomicRegions:
def is_missing(char):
return char.upper() == 'N' or char == '-'
# pybedtools internally stores as 0-based BED file intervals
# https://daler.github.io/pybedtools/intervals.html#bed-is-0-based-others-are-1-based
mask_intervals = []
for record in sequences:
start = 0
in_mask = False
for idx, char in enumerate(record.seq):
if in_mask:
if not is_missing(char):
in_mask = False
# pybedtools stop position is not included in interval
stop = idx
mask_intervals.append((record.id, start, stop))
else:
if is_missing(char):
in_mask = True
start = idx
# Finish recording last interval if it exists (e.g., if last bit of sequence was like 'NNNN')
if in_mask:
stop = len(record)
mask_intervals.append((record.id, start, stop))
bedtool_intervals = BedTool(mask_intervals)
return MaskedGenomicRegions(bedtool_intervals)
@classmethod
def from_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file))
return MaskedGenomicRegions(bed_file_data)
@classmethod
def from_vcf_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file)).merge()
return MaskedGenomicRegions(bed_file_data)
@classmethod
def empty_mask(cls):
return MaskedGenomicRegions(BedTool('', from_string=True))
def is_empty(self):
return len(self) == 0
def sequence_names(self) -> Set[str]:
"""
Gets a set of sequence names from this genomic regions mask.
:return: A set of sequence names.
"""
return {x.chrom for x in self._mask}
def contains(self, sequence: str, position: int, start_position_index: str = '0') -> bool:
if start_position_index != '0' and start_position_index != '1':
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
elif start_position_index == '1':
position = position - 1
for i in self._mask:
if i.chrom == sequence and i.start <= position < i.end:
return True
return False
def _validate_start_position_index(self, start_position_index: str) -> None:
if start_position_index not in ['0', '1']:
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
def overlaps_range(self, sequence: str, start: int, stop: int, start_position_index: str = '0') -> bool:
self._validate_start_position_index(start_position_index)
if start_position_index == '1':
start = start - 1
stop = stop - 1
if stop <= start:
raise Exception(f'start=[{start}] is less than stop=[{stop}]')
for i in self._mask:
if i.chrom == sequence:
if i.start <= start and i.end > start:
return True
elif start < i.end and stop > i.end:
return True
return False
def positions_iter(self, start_position_index: str = '0') -> Generator[Tuple[str, int], None, None]:
"""
Creates an iterator to iterate over all ('sequence', 'position') in this mask.
:param start_position_index: Whether positions should be in 0-base coordinates or 1-base coordinates.
See https://bedtools.readthedocs.io/en/latest/content/general-usage.html#bed-format
for a description of the differences in coordinates.
:return: An iterator which will return tuples like ('sequence', 'position') for every
position in this mask.
"""
self._validate_start_position_index(start_position_index)
for sequence in self._mask:
sequence_name = sequence.chrom
start = sequence.start
end = sequence.end
if start_position_index == '1':
start = start + 1
end = end + 1
for pos in range(start, end):
yield sequence_name, pos
def __len__(self) -> int:
"""
Calculates length of underlying masked intervals. Assumes the intervals have been merged beforehand.
:return: The length of the masked intervals.
"""
total = 0
for i in self._mask:
total += len(i)
return total | genomics_data_index/storage/MaskedGenomicRegions.py | from __future__ import annotations
import tempfile
from pathlib import Path
from typing import List, Set, Dict, Generator, Tuple
import pandas as pd
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from pybedtools import BedTool
from genomics_data_index.storage.model import NUCLEOTIDE_UNKNOWN, NUCLEOTIDE_UNKNOWN_TYPE
class MaskedGenomicRegions:
def __init__(self, mask: BedTool):
self._mask = mask.sort().merge()
@property
def mask(self):
return self._mask
def intersect(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
return MaskedGenomicRegions(self._mask.intersect(other._mask))
def subtract(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
subtraction = self._mask.subtract(other._mask)
return MaskedGenomicRegions(subtraction)
def union(self, other: MaskedGenomicRegions) -> MaskedGenomicRegions:
union = self._mask.cat(other._mask, postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
def mask_to_features(self) -> pd.DataFrame:
mask_features = []
ref = 1
alt = NUCLEOTIDE_UNKNOWN
nuc_type = NUCLEOTIDE_UNKNOWN_TYPE
for sequence_name, position in self.positions_iter(start_position_index='1'):
variant_id = f'{sequence_name}:{position}:{ref}:{alt}'
mask_features.append([sequence_name, position, ref, alt, nuc_type, variant_id])
return pd.DataFrame(mask_features, columns=['CHROM', 'POS', 'REF', 'ALT', 'TYPE', 'VARIANT_ID'])
def mask_genome(self, genome_file: Path, mask_char: str = '?', remove: bool = True) -> Dict[str, SeqRecord]:
"""
Gets a SeqRecord with all those regions on the passed genome that are in the masked regions removed
(or masked with mask_char).
:param genome_file: The genome file to mask.
:param mask_char: The character to mask with.
:param remove: Whether or not to remove masked sequence data.
:return: A Dictionary mapping a sequence name to a SeqRecord containing all those regions on the sequence
within the masked regions removed (or masked with mask_char)
"""
with tempfile.TemporaryDirectory() as out_f:
seq_records = {}
output_fasta = Path(out_f) / 'masked.fasta'
self._mask.mask_fasta(fi=str(genome_file), fo=str(output_fasta), mc=mask_char)
for record in SeqIO.parse(output_fasta, 'fasta'):
if remove:
record.seq = record.seq.ungap(mask_char)
seq_records[record.id] = record
return seq_records
def write(self, file: Path):
self._mask.saveas(str(file), compressed=True)
@classmethod
def union_all(cls, masked_regions: List[MaskedGenomicRegions]):
if len(masked_regions) == 0:
raise Exception('Cannot merge empty list')
elif len(masked_regions) == 1:
return masked_regions[0]
else:
start_mask = masked_regions.pop()
union = start_mask._mask.cat(*[o._mask for o in masked_regions], postmerge=True, force_truncate=True)
return MaskedGenomicRegions(union)
@classmethod
def from_sequences(cls, sequences: List[SeqRecord]) -> MaskedGenomicRegions:
def is_missing(char):
return char.upper() == 'N' or char == '-'
# pybedtools internally stores as 0-based BED file intervals
# https://daler.github.io/pybedtools/intervals.html#bed-is-0-based-others-are-1-based
mask_intervals = []
for record in sequences:
start = 0
in_mask = False
for idx, char in enumerate(record.seq):
if in_mask:
if not is_missing(char):
in_mask = False
# pybedtools stop position is not included in interval
stop = idx
mask_intervals.append((record.id, start, stop))
else:
if is_missing(char):
in_mask = True
start = idx
# Finish recording last interval if it exists (e.g., if last bit of sequence was like 'NNNN')
if in_mask:
stop = len(record)
mask_intervals.append((record.id, start, stop))
bedtool_intervals = BedTool(mask_intervals)
return MaskedGenomicRegions(bedtool_intervals)
@classmethod
def from_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file))
return MaskedGenomicRegions(bed_file_data)
@classmethod
def from_vcf_file(cls, file: Path) -> MaskedGenomicRegions:
bed_file_data = BedTool(str(file)).merge()
return MaskedGenomicRegions(bed_file_data)
@classmethod
def empty_mask(cls):
return MaskedGenomicRegions(BedTool('', from_string=True))
def is_empty(self):
return len(self) == 0
def sequence_names(self) -> Set[str]:
"""
Gets a set of sequence names from this genomic regions mask.
:return: A set of sequence names.
"""
return {x.chrom for x in self._mask}
def contains(self, sequence: str, position: int, start_position_index: str = '0') -> bool:
if start_position_index != '0' and start_position_index != '1':
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
elif start_position_index == '1':
position = position - 1
for i in self._mask:
if i.chrom == sequence and i.start <= position < i.end:
return True
return False
def _validate_start_position_index(self, start_position_index: str) -> None:
if start_position_index not in ['0', '1']:
raise Exception((f'Unknown value start_position_index=[{start_position_index}].'
'Should be "0" or "1" to indicate which is the starting base position'))
def overlaps_range(self, sequence: str, start: int, stop: int, start_position_index: str = '0') -> bool:
self._validate_start_position_index(start_position_index)
if start_position_index == '1':
start = start - 1
stop = stop - 1
if stop <= start:
raise Exception(f'start=[{start}] is less than stop=[{stop}]')
for i in self._mask:
if i.chrom == sequence:
if i.start <= start and i.end > start:
return True
elif start < i.end and stop > i.end:
return True
return False
def positions_iter(self, start_position_index: str = '0') -> Generator[Tuple[str, int], None, None]:
"""
Creates an iterator to iterate over all ('sequence', 'position') in this mask.
:param start_position_index: Whether positions should be in 0-base coordinates or 1-base coordinates.
See https://bedtools.readthedocs.io/en/latest/content/general-usage.html#bed-format
for a description of the differences in coordinates.
:return: An iterator which will return tuples like ('sequence', 'position') for every
position in this mask.
"""
self._validate_start_position_index(start_position_index)
for sequence in self._mask:
sequence_name = sequence.chrom
start = sequence.start
end = sequence.end
if start_position_index == '1':
start = start + 1
end = end + 1
for pos in range(start, end):
yield sequence_name, pos
def __len__(self) -> int:
"""
Calculates length of underlying masked intervals. Assumes the intervals have been merged beforehand.
:return: The length of the masked intervals.
"""
total = 0
for i in self._mask:
total += len(i)
return total | 0.886868 | 0.516656 |
import floppyforms.__future__ as forms
from django.utils.translation import ugettext_lazy as _
from django_backend.forms import FilterForm
from django_backend.forms import SearchFilterFormMixin
from django_backend.backend.base.backends import ModelBackend
from django_backend.backend.columns import BackendColumn
from django_backend import Group
from django_backend import site
# These are required for hooking up the custom "preview" view in the
# PostBackend.
from django_backend.backend.base.views import BackendReadView
from django_viewset import URLView
from .models import Author
from .models import Post
blog = Group('blog')
class PostFilterForm(SearchFilterFormMixin, FilterForm):
search_fields = ('title', 'text')
author = forms.ModelChoiceField(
label=_('Author'),
queryset=Author.objects.all(),
required=False)
def filter_author(self, queryset, author):
return queryset.filter(author=author)
class PostPreviewView(BackendReadView):
def get_template_name(self, **kwargs):
return 'blog/post_preview.html'
class PostBackend(ModelBackend):
filter_form_class = PostFilterForm
preview = URLView(r'^(?P<pk>\d+)/preview/$', PostPreviewView)
def get_list_columns(self):
columns = super(PostBackend, self).get_list_columns()
columns.update({
'author': BackendColumn(
_('Author'),
template_name='django_backend/blog/columns/_author.html',
position=100,
sort_field='author__name',
),
'preview': BackendColumn(
_('Preview'),
template_name='django_backend/blog/columns/_preview.html',
position=200
),
})
return columns
site.register(
PostBackend,
model=Post,
id='post',
group=blog,
)
# Here is an example of how to simply register a model as a backend by using
# the default model backend.
site.register(
ModelBackend,
model=Author,
id='author',
group=blog,
)
# We also want to make inline editing of the author possible from the post
# backend. That way we have more flexibility in the post backend to select the
# author, or create new ones from within the post backend.
site.register(
ModelBackend,
model=Author,
registry='inline',
id='author',
) | examples/simple_blog/project/blog/backend.py | import floppyforms.__future__ as forms
from django.utils.translation import ugettext_lazy as _
from django_backend.forms import FilterForm
from django_backend.forms import SearchFilterFormMixin
from django_backend.backend.base.backends import ModelBackend
from django_backend.backend.columns import BackendColumn
from django_backend import Group
from django_backend import site
# These are required for hooking up the custom "preview" view in the
# PostBackend.
from django_backend.backend.base.views import BackendReadView
from django_viewset import URLView
from .models import Author
from .models import Post
blog = Group('blog')
class PostFilterForm(SearchFilterFormMixin, FilterForm):
search_fields = ('title', 'text')
author = forms.ModelChoiceField(
label=_('Author'),
queryset=Author.objects.all(),
required=False)
def filter_author(self, queryset, author):
return queryset.filter(author=author)
class PostPreviewView(BackendReadView):
def get_template_name(self, **kwargs):
return 'blog/post_preview.html'
class PostBackend(ModelBackend):
filter_form_class = PostFilterForm
preview = URLView(r'^(?P<pk>\d+)/preview/$', PostPreviewView)
def get_list_columns(self):
columns = super(PostBackend, self).get_list_columns()
columns.update({
'author': BackendColumn(
_('Author'),
template_name='django_backend/blog/columns/_author.html',
position=100,
sort_field='author__name',
),
'preview': BackendColumn(
_('Preview'),
template_name='django_backend/blog/columns/_preview.html',
position=200
),
})
return columns
site.register(
PostBackend,
model=Post,
id='post',
group=blog,
)
# Here is an example of how to simply register a model as a backend by using
# the default model backend.
site.register(
ModelBackend,
model=Author,
id='author',
group=blog,
)
# We also want to make inline editing of the author possible from the post
# backend. That way we have more flexibility in the post backend to select the
# author, or create new ones from within the post backend.
site.register(
ModelBackend,
model=Author,
registry='inline',
id='author',
) | 0.539226 | 0.098903 |
import pytest
from boto3.dynamodb.conditions import Attr
from pcluster.aws.dynamo import DynamoResource
@pytest.fixture()
def mocked_dynamo_table(mocker):
mock_table = mocker.MagicMock(autospec=True)
mock_dynamo_resource = mocker.patch("boto3.resource")
mock_dynamo_resource.return_value.Table.return_value = mock_table
return mock_table
class TestDynamoDBResource:
def test_get_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
key = {"Id": "MyKey"}
DynamoResource().get_item("table_name", key)
mocked_dynamo_table.get_item.assert_called_with(ConsistentRead=True, Key=key)
def test_put_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
item = {"item": "myItem"}
DynamoResource().put_item("table_name", item=item)
mocked_dynamo_table.put_item.assert_called_with(Item=item)
condition_expression = Attr("status").eq(str("status"))
DynamoResource().put_item("table_name", item=item, condition_expression=condition_expression)
mocked_dynamo_table.put_item.assert_called_with(Item=item, ConditionExpression=condition_expression)
def test_update_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
key = {"Id": "MyKey"}
DynamoResource().update_item("table_name", key)
mocked_dynamo_table.update_item.assert_called_with(Key=key)
condition_expression = Attr("status").eq(str("status"))
update_expression = "expression"
expression_attribute_names = {"#dt": "name"}
expression_attribute_values = {":s": "value"}
DynamoResource().update_item(
"table_name",
key=key,
update_expression=update_expression,
expression_attribute_names=expression_attribute_names,
expression_attribute_values=expression_attribute_values,
condition_expression=condition_expression,
)
mocked_dynamo_table.update_item.assert_called_with(
Key=key,
UpdateExpression=update_expression,
ExpressionAttributeNames=expression_attribute_names,
ExpressionAttributeValues=expression_attribute_values,
ConditionExpression=condition_expression,
) | cli/tests/pcluster/aws/test_dynamo.py |
import pytest
from boto3.dynamodb.conditions import Attr
from pcluster.aws.dynamo import DynamoResource
@pytest.fixture()
def mocked_dynamo_table(mocker):
mock_table = mocker.MagicMock(autospec=True)
mock_dynamo_resource = mocker.patch("boto3.resource")
mock_dynamo_resource.return_value.Table.return_value = mock_table
return mock_table
class TestDynamoDBResource:
def test_get_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
key = {"Id": "MyKey"}
DynamoResource().get_item("table_name", key)
mocked_dynamo_table.get_item.assert_called_with(ConsistentRead=True, Key=key)
def test_put_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
item = {"item": "myItem"}
DynamoResource().put_item("table_name", item=item)
mocked_dynamo_table.put_item.assert_called_with(Item=item)
condition_expression = Attr("status").eq(str("status"))
DynamoResource().put_item("table_name", item=item, condition_expression=condition_expression)
mocked_dynamo_table.put_item.assert_called_with(Item=item, ConditionExpression=condition_expression)
def test_update_item(self, set_env, mocked_dynamo_table):
set_env("AWS_DEFAULT_REGION", "us-east-1")
key = {"Id": "MyKey"}
DynamoResource().update_item("table_name", key)
mocked_dynamo_table.update_item.assert_called_with(Key=key)
condition_expression = Attr("status").eq(str("status"))
update_expression = "expression"
expression_attribute_names = {"#dt": "name"}
expression_attribute_values = {":s": "value"}
DynamoResource().update_item(
"table_name",
key=key,
update_expression=update_expression,
expression_attribute_names=expression_attribute_names,
expression_attribute_values=expression_attribute_values,
condition_expression=condition_expression,
)
mocked_dynamo_table.update_item.assert_called_with(
Key=key,
UpdateExpression=update_expression,
ExpressionAttributeNames=expression_attribute_names,
ExpressionAttributeValues=expression_attribute_values,
ConditionExpression=condition_expression,
) | 0.475605 | 0.346541 |
import torch.nn as nn
import numpy as np
FAIRFACE_AE_N_UPSAMPLING_EXTRA = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 5, 7: 5}
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class Autoencoder(nn.Module):
def __init__(self, input_nc, output_nc, split_layer, ngf=32, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=3, padding_type='reflect'):
super(Autoencoder, self).__init__()
use_bias = norm_layer == nn.InstanceNorm2d
if split_layer > 6:
model = [nn.Conv2d(input_nc, ngf, kernel_size=1)]
else:
model = [#nn.ReflectionPad2d(1),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 4 if split_layer < 6 else 2
# Special case for 9th block of resnet
#n_downsampling, n_blocks = 0, 0
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1, bias=use_bias),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
n_upsampling_extra = FAIRFACE_AE_N_UPSAMPLING_EXTRA[split_layer] + 1 # +1 added for celeba split4
for i in range(n_upsampling_extra): # add upsampling layers
model += [nn.ConvTranspose2d(ngf, ngf,
kernel_size=3, stride=2,
padding=1, output_padding=1, bias=use_bias),
norm_layer(ngf), nn.ReLU(True)]
if i == 1 or i == 2:
model += [nn.Conv2d(ngf, ngf,
kernel_size=3, stride=1, padding=0),
norm_layer(ngf), nn.ReLU(True)]#"""
if split_layer < 3:
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, ngf//2, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
elif split_layer < 5:
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)] # orig kernel_size=11, changed to 9 for celeba split4
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)] # orig kernel_size=9, changed to 7 for celeba split4
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
elif split_layer ==5:
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
else:
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
self.m = nn.Sequential(*model)
def forward(self, x):
for l in self.m:
x = l(x)
return x
class MinimalDecoder(nn.Module):
def __init__(self, input_nc, output_nc=3, input_dim=None, output_dim=None):
super(MinimalDecoder, self).__init__()
upsampling_num = int(np.log2(output_dim // input_dim))
model = [nn.Conv2d(input_nc, 1 * output_nc, kernel_size=1)]
for num in range(upsampling_num):
model += [nn.ConvTranspose2d(64 * output_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1)]
#model += [nn.Linear(1 * output_nc * input_dim ** 2, output_nc * output_dim ** 2)]
# self.m = nn.Sequential(*model)
self.m = torch.nn.DataParallel(*model, device_ids=range(torch.cuda.device_count()))
def forward(self, x):
b = x.shape[0]
x = self.m[0](x)
x = x.view(b, -1)
x = self.m[1](x)
x = x.view(b, 3, 224, 224)
return self.m(x) | public/reconstruction/AEs.py | import torch.nn as nn
import numpy as np
FAIRFACE_AE_N_UPSAMPLING_EXTRA = {1: 1, 2: 1, 3: 2, 4: 2, 5: 3, 6: 5, 7: 5}
class ResnetBlock(nn.Module):
"""Define a Resnet block"""
def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Initialize the Resnet block
A resnet block is a conv block with skip connections
We construct a conv block with build_conv_block function,
and implement skip connections in <forward> function.
Original Resnet paper: https://arxiv.org/pdf/1512.03385.pdf
"""
super(ResnetBlock, self).__init__()
self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)
def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):
"""Construct a convolutional block.
Parameters:
dim (int) -- the number of channels in the conv layer.
padding_type (str) -- the name of padding layer: reflect | replicate | zero
norm_layer -- normalization layer
use_dropout (bool) -- if use dropout layers.
use_bias (bool) -- if the conv layer uses bias or not
Returns a conv block (with a conv layer, a normalization layer, and a non-linearity layer (ReLU))
"""
conv_block = []
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]
if use_dropout:
conv_block += [nn.Dropout(0.5)]
p = 0
if padding_type == 'reflect':
conv_block += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv_block += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]
return nn.Sequential(*conv_block)
def forward(self, x):
"""Forward function (with skip connections)"""
out = x + self.conv_block(x) # add skip connections
return out
class Autoencoder(nn.Module):
def __init__(self, input_nc, output_nc, split_layer, ngf=32, norm_layer=nn.BatchNorm2d, use_dropout=False, n_blocks=3, padding_type='reflect'):
super(Autoencoder, self).__init__()
use_bias = norm_layer == nn.InstanceNorm2d
if split_layer > 6:
model = [nn.Conv2d(input_nc, ngf, kernel_size=1)]
else:
model = [#nn.ReflectionPad2d(1),
nn.Conv2d(input_nc, ngf, kernel_size=7, padding=0, bias=use_bias),
norm_layer(ngf),
nn.ReLU(True)]
n_downsampling = 4 if split_layer < 6 else 2
# Special case for 9th block of resnet
#n_downsampling, n_blocks = 0, 0
for i in range(n_downsampling): # add downsampling layers
mult = 2 ** i
model += [nn.Conv2d(ngf * mult, ngf * mult * 2, kernel_size=3, stride=2, padding=1, bias=use_bias),
norm_layer(ngf * mult * 2), nn.ReLU(True)]
mult = 2 ** n_downsampling
for i in range(n_blocks): # add ResNet blocks
model += [ResnetBlock(ngf * mult, padding_type=padding_type, norm_layer=norm_layer, use_dropout=use_dropout, use_bias=use_bias)]
for i in range(n_downsampling): # add upsampling layers
mult = 2 ** (n_downsampling - i)
model += [nn.ConvTranspose2d(ngf * mult, int(ngf * mult / 2),
kernel_size=3, stride=2,
padding=1, output_padding=1, bias=use_bias),
norm_layer(int(ngf * mult / 2)), nn.ReLU(True)]
n_upsampling_extra = FAIRFACE_AE_N_UPSAMPLING_EXTRA[split_layer] + 1 # +1 added for celeba split4
for i in range(n_upsampling_extra): # add upsampling layers
model += [nn.ConvTranspose2d(ngf, ngf,
kernel_size=3, stride=2,
padding=1, output_padding=1, bias=use_bias),
norm_layer(ngf), nn.ReLU(True)]
if i == 1 or i == 2:
model += [nn.Conv2d(ngf, ngf,
kernel_size=3, stride=1, padding=0),
norm_layer(ngf), nn.ReLU(True)]#"""
if split_layer < 3:
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, ngf//2, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
elif split_layer < 5:
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)] # orig kernel_size=11, changed to 9 for celeba split4
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)] # orig kernel_size=9, changed to 7 for celeba split4
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
elif split_layer ==5:
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
else:
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
model += [nn.ReflectionPad2d(3)]
model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf, ngf//2, kernel_size=9, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf//2, ngf//4, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(ngf//4, output_nc, kernel_size=7, padding=0)]
#model += [nn.ReflectionPad2d(3)]
#model += [nn.Conv2d(output_nc, output_nc, kernel_size=7, padding=0)]
self.m = nn.Sequential(*model)
def forward(self, x):
for l in self.m:
x = l(x)
return x
class MinimalDecoder(nn.Module):
def __init__(self, input_nc, output_nc=3, input_dim=None, output_dim=None):
super(MinimalDecoder, self).__init__()
upsampling_num = int(np.log2(output_dim // input_dim))
model = [nn.Conv2d(input_nc, 1 * output_nc, kernel_size=1)]
for num in range(upsampling_num):
model += [nn.ConvTranspose2d(64 * output_nc, output_nc, kernel_size=3, stride=2, padding=1, output_padding=1)]
#model += [nn.Linear(1 * output_nc * input_dim ** 2, output_nc * output_dim ** 2)]
# self.m = nn.Sequential(*model)
self.m = torch.nn.DataParallel(*model, device_ids=range(torch.cuda.device_count()))
def forward(self, x):
b = x.shape[0]
x = self.m[0](x)
x = x.view(b, -1)
x = self.m[1](x)
x = x.view(b, 3, 224, 224)
return self.m(x) | 0.904445 | 0.551332 |
import pytest
from flask import session
def test_login(client, auth):
with client:
# test that successful login redirects to the index page
response = auth.login()
# login request set the user_id in the session
# check that the user is loaded from the session
assert response.status_code == 200
# env/lib/python3.7/site-packages/flask_user/user_mixin.py:27
# flask_user combine user_id and password and generate a randomnumber of 'user_id'
assert 'user_id' in session
@pytest.mark.parametrize(
('username', 'password', 'message'),
(
('a', 'test', b'{\n "error": "Invalid phone num or password"\n}\n'),
('test', 'a', b'{\n "error": "Invalid phone num or password"\n}\n'),
),
)
def test_login_validate_input(auth, username, password, message):
response = auth.login(username, password)
assert message in response.data
def test_logout(client, auth):
auth.login()
with client:
resp = auth.logout()
assert resp.status_code == 200
assert 'user_id' not in session
@pytest.mark.parametrize(
('phone_num',),
(('13312341234',), ('+8613312341234',), ('13312345678',), ('+8613312345678',)),
)
def test_invalid_register(client, phone_num):
with client:
client.set_cookie('localhost', key='phone_auth', value='yse')
response = client.post(
'/api/auth/register',
json={
'phone_num': phone_num,
'sex': 'male',
'password': '<PASSWORD>',
'first_name': '',
'last_name': 'he',
'email': '<EMAIL>',
},
)
assert response.status_code == 422
assert 'user_id' not in session
def test_valid_register(client):
with client:
client.set_cookie('localhost', key='phone_auth', value='yse')
response = client.post(
'/api/auth/register',
json={
'phone_num': '+8613333333334',
'sex': 'male',
'password': '<PASSWORD>',
'first_name': '',
'last_name': 'he',
'email': '<EMAIL>',
},
)
assert response.status_code == 200
assert 'user_id' in session | tests/test_auth.py | import pytest
from flask import session
def test_login(client, auth):
with client:
# test that successful login redirects to the index page
response = auth.login()
# login request set the user_id in the session
# check that the user is loaded from the session
assert response.status_code == 200
# env/lib/python3.7/site-packages/flask_user/user_mixin.py:27
# flask_user combine user_id and password and generate a randomnumber of 'user_id'
assert 'user_id' in session
@pytest.mark.parametrize(
('username', 'password', 'message'),
(
('a', 'test', b'{\n "error": "Invalid phone num or password"\n}\n'),
('test', 'a', b'{\n "error": "Invalid phone num or password"\n}\n'),
),
)
def test_login_validate_input(auth, username, password, message):
response = auth.login(username, password)
assert message in response.data
def test_logout(client, auth):
auth.login()
with client:
resp = auth.logout()
assert resp.status_code == 200
assert 'user_id' not in session
@pytest.mark.parametrize(
('phone_num',),
(('13312341234',), ('+8613312341234',), ('13312345678',), ('+8613312345678',)),
)
def test_invalid_register(client, phone_num):
with client:
client.set_cookie('localhost', key='phone_auth', value='yse')
response = client.post(
'/api/auth/register',
json={
'phone_num': phone_num,
'sex': 'male',
'password': '<PASSWORD>',
'first_name': '',
'last_name': 'he',
'email': '<EMAIL>',
},
)
assert response.status_code == 422
assert 'user_id' not in session
def test_valid_register(client):
with client:
client.set_cookie('localhost', key='phone_auth', value='yse')
response = client.post(
'/api/auth/register',
json={
'phone_num': '+8613333333334',
'sex': 'male',
'password': '<PASSWORD>',
'first_name': '',
'last_name': 'he',
'email': '<EMAIL>',
},
)
assert response.status_code == 200
assert 'user_id' in session | 0.332419 | 0.324342 |
import json
import ssl
import urllib.request
# ugly fix the ssl certificate bug
ssl._create_default_https_context = ssl._create_unverified_context
# ╔╗ ╦ ╔╦╗ ╔═╗ ╔╦╗ ╔═╗ ╔╦╗ ╔═╗
# ╠╩╗ ║ ║ ╚═╗ ║ ╠═╣ ║║║ ╠═╝
# ╚═╝ ╩ ╩ ╚═╝ ╩ ╩ ╩ ╩ ╩ ╩
raw_bitstamp_pairs = ["btcusd", "btceur", "eurusd", "xrpusd", "xrpeur",
"xrpbtc"]
standardized_bitstamp_pairs = ["BTC_USD", "BTC_EUR", "EUR_USD", "XRP_USD",
"XRP_EUR", "XRP_BTC"]
# ╦╔═ ╦═╗ ╔═╗ ╦╔═ ╔═╗ ╔╗╔
# ╠╩╗ ╠╦╝ ╠═╣ ╠╩╗ ║╣ ║║║
# ╩ ╩ ╩╚═ ╩ ╩ ╩ ╩ ╚═╝ ╝╚╝
url = "https://api.kraken.com/0/public/AssetPairs"
raw_kraken_pairs = list()
standardized_kraken_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
raw_kraken_pairs.append(currency)
quote = json_data["result"][currency]["quote"][1:] # remove the X or Z
base = json_data["result"][currency]["base"]
old_naming = ("XETH", "XXBT", "XETC", "XLTC", "XICN", "XREP", "XXDG",
"XZEC", "XXLM", "XXMR", "XMLN", "XXRP")
if base in old_naming:
base = base[1:] # remove the X
if base == "XBT":
base = "BTC"
if quote == "XBT":
quote = "BTC"
if json_data["result"][currency]["altname"][-2:] == ".d":
quote += "_d"
standardized_kraken_pairs.append(base + "_" + quote)
# ╔═╗ ╔═╗ ╦ ╔═╗ ╔╗╔ ╦ ╔═╗ ═╗ ╦
# ╠═╝ ║ ║ ║ ║ ║ ║║║ ║ ║╣ ╔╩╦╝
# ╩ ╚═╝ ╩═╝ ╚═╝ ╝╚╝ ╩ ╚═╝ ╩ ╚═
url = "https://poloniex.com/public?command=returnTicker"
raw_poloniex_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data:
raw_poloniex_pairs.append(currency)
# conversion
standardized_poloniex_pairs = list()
for pair in raw_poloniex_pairs:
base, quote = pair.split('_', 1)
standardized_poloniex_pairs.append(quote + "_" + base)
# ╔╗ ╦ ╔╦╗ ╔╦╗ ╦═╗ ╔═╗ ═╗ ╦
# ╠╩╗ ║ ║ ║ ╠╦╝ ║╣ ╔╩╦╝
# ╚═╝ ╩ ╩ ╩ ╩╚═ ╚═╝ ╩ ╚═
url = "https://bittrex.com/api/v1.1/public/getmarketsummaries"
raw_bittrex_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
raw_bittrex_pairs.append(currency["MarketName"])
# conversion
standardized_bittrex_pairs = list()
for pair in raw_bittrex_pairs:
base, quote = pair.split('-', 1)
standardized_bittrex_pairs.append(quote + "_" + base)
# Generate all possible pairs
exchanges = [standardized_bitstamp_pairs, standardized_kraken_pairs,
standardized_poloniex_pairs, standardized_bittrex_pairs]
pairs = list()
for exchange in exchanges:
for pair in exchange:
if pair not in pairs:
pairs.append(pair)
pairs = sorted(pairs)
print("SUPPORTED PAIRS")
print("===============")
for pair in pairs:
print(pair + ",")
print("\n\n\n")
print("BITSTAMP PAIRS")
print("==============")
for std, raw in zip(standardized_bitstamp_pairs, raw_bitstamp_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("KRAKEN PAIRS")
print("============")
for std, raw in zip(standardized_kraken_pairs, raw_kraken_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("POLONIEX PAIRS")
print("==============")
for std, raw in zip(standardized_poloniex_pairs, raw_poloniex_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("BITTREX PAIRS")
print("==============")
for std, raw in zip(standardized_bittrex_pairs, raw_bittrex_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
# CURRENCIES
# BITTREX
url = "https://bittrex.com/api/v1.1/public/getcurrencies"
bittrex_currencies = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
print(currency["Currency"] + ",")
bittrex_currencies.append(currency["Currency"])
# Currency enum -> Option<String>
for currency in bittrex_currencies:
print("Currency::" + currency + " => Some(\"" +
currency + "\".to_string()),")
# Currency str -> Option<Currency>
for currency in bittrex_currencies:
print("\"" + currency + "\" => Some(Currency::" + currency + "),") | get_pairs_name.py |
import json
import ssl
import urllib.request
# ugly fix the ssl certificate bug
ssl._create_default_https_context = ssl._create_unverified_context
# ╔╗ ╦ ╔╦╗ ╔═╗ ╔╦╗ ╔═╗ ╔╦╗ ╔═╗
# ╠╩╗ ║ ║ ╚═╗ ║ ╠═╣ ║║║ ╠═╝
# ╚═╝ ╩ ╩ ╚═╝ ╩ ╩ ╩ ╩ ╩ ╩
raw_bitstamp_pairs = ["btcusd", "btceur", "eurusd", "xrpusd", "xrpeur",
"xrpbtc"]
standardized_bitstamp_pairs = ["BTC_USD", "BTC_EUR", "EUR_USD", "XRP_USD",
"XRP_EUR", "XRP_BTC"]
# ╦╔═ ╦═╗ ╔═╗ ╦╔═ ╔═╗ ╔╗╔
# ╠╩╗ ╠╦╝ ╠═╣ ╠╩╗ ║╣ ║║║
# ╩ ╩ ╩╚═ ╩ ╩ ╩ ╩ ╚═╝ ╝╚╝
url = "https://api.kraken.com/0/public/AssetPairs"
raw_kraken_pairs = list()
standardized_kraken_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
raw_kraken_pairs.append(currency)
quote = json_data["result"][currency]["quote"][1:] # remove the X or Z
base = json_data["result"][currency]["base"]
old_naming = ("XETH", "XXBT", "XETC", "XLTC", "XICN", "XREP", "XXDG",
"XZEC", "XXLM", "XXMR", "XMLN", "XXRP")
if base in old_naming:
base = base[1:] # remove the X
if base == "XBT":
base = "BTC"
if quote == "XBT":
quote = "BTC"
if json_data["result"][currency]["altname"][-2:] == ".d":
quote += "_d"
standardized_kraken_pairs.append(base + "_" + quote)
# ╔═╗ ╔═╗ ╦ ╔═╗ ╔╗╔ ╦ ╔═╗ ═╗ ╦
# ╠═╝ ║ ║ ║ ║ ║ ║║║ ║ ║╣ ╔╩╦╝
# ╩ ╚═╝ ╩═╝ ╚═╝ ╝╚╝ ╩ ╚═╝ ╩ ╚═
url = "https://poloniex.com/public?command=returnTicker"
raw_poloniex_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data:
raw_poloniex_pairs.append(currency)
# conversion
standardized_poloniex_pairs = list()
for pair in raw_poloniex_pairs:
base, quote = pair.split('_', 1)
standardized_poloniex_pairs.append(quote + "_" + base)
# ╔╗ ╦ ╔╦╗ ╔╦╗ ╦═╗ ╔═╗ ═╗ ╦
# ╠╩╗ ║ ║ ║ ╠╦╝ ║╣ ╔╩╦╝
# ╚═╝ ╩ ╩ ╩ ╩╚═ ╚═╝ ╩ ╚═
url = "https://bittrex.com/api/v1.1/public/getmarketsummaries"
raw_bittrex_pairs = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
raw_bittrex_pairs.append(currency["MarketName"])
# conversion
standardized_bittrex_pairs = list()
for pair in raw_bittrex_pairs:
base, quote = pair.split('-', 1)
standardized_bittrex_pairs.append(quote + "_" + base)
# Generate all possible pairs
exchanges = [standardized_bitstamp_pairs, standardized_kraken_pairs,
standardized_poloniex_pairs, standardized_bittrex_pairs]
pairs = list()
for exchange in exchanges:
for pair in exchange:
if pair not in pairs:
pairs.append(pair)
pairs = sorted(pairs)
print("SUPPORTED PAIRS")
print("===============")
for pair in pairs:
print(pair + ",")
print("\n\n\n")
print("BITSTAMP PAIRS")
print("==============")
for std, raw in zip(standardized_bitstamp_pairs, raw_bitstamp_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("KRAKEN PAIRS")
print("============")
for std, raw in zip(standardized_kraken_pairs, raw_kraken_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("POLONIEX PAIRS")
print("==============")
for std, raw in zip(standardized_poloniex_pairs, raw_poloniex_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
print("\n\n\n")
print("BITTREX PAIRS")
print("==============")
for std, raw in zip(standardized_bittrex_pairs, raw_bittrex_pairs):
print("m.insert({std}, \"{raw}\");".format(std=std, raw=raw))
# CURRENCIES
# BITTREX
url = "https://bittrex.com/api/v1.1/public/getcurrencies"
bittrex_currencies = list()
with urllib.request.urlopen(url) as response:
html = response.read().decode("utf-8")
json_data = json.loads(html)
for currency in json_data["result"]:
print(currency["Currency"] + ",")
bittrex_currencies.append(currency["Currency"])
# Currency enum -> Option<String>
for currency in bittrex_currencies:
print("Currency::" + currency + " => Some(\"" +
currency + "\".to_string()),")
# Currency str -> Option<Currency>
for currency in bittrex_currencies:
print("\"" + currency + "\" => Some(Currency::" + currency + "),") | 0.303732 | 0.199035 |
import os
import glob
import argparse
import oneflow as flow
import transforms.spatial_transforms as ST
from image import *
from model import CSRNet
parser = argparse.ArgumentParser(description='Oneflow CSRNet')
parser.add_argument('modelPath', metavar='MODELPATH',type=str,
help='path to bestmodel')
parser.add_argument('picSrc', metavar='picSRC',type=str,
help='path to valPic')
def main():
transform = ST.Compose([
ST.ToNumpyForVal(), ST.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
global args
args = parser.parse_args()
root = './dataset/'
# now generate the ShanghaiA's ground truth
part_A_train = os.path.join(root, 'part_A_final/train_data', 'images')
part_A_test = os.path.join(root, 'part_A_final/test_data', 'images')
part_B_train = os.path.join(root, 'part_B_final/train_data', 'images')
part_B_test = os.path.join(root, 'part_B_final/test_data', 'images')
path_sets=[]
if args.picSrc=='part_A_test':
path_sets = [part_A_test]
elif args.picSrc=='part_B_test':
path_sets = [part_B_test]
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
model = CSRNet()
model = model.to("cuda")
checkpoint = flow.load(args.modelPath)
model.load_state_dict(checkpoint)
MAE = []
for i in range(len(img_paths)):
img = transform(Image.open(img_paths[i]).convert('RGB'))
img = np.asarray(img).astype(np.float32)
img = flow.Tensor(img, dtype=flow.float32, device="cuda")
img = img.to("cuda")
gt_file = h5py.File(img_paths[i].replace('.jpg', '.h5').replace('images', 'ground_truth'), 'r')
groundtruth = np.asarray(gt_file['density'])
with flow.no_grad():
output = model(img.unsqueeze(0))
mae = abs(output.detach().to("cpu").sum().numpy() - np.sum(groundtruth))[0]
MAE.append(mae)
avg_MAE = sum(MAE) / len(MAE)
print("test result: MAE:{:2f}".format(avg_MAE))
if __name__ == '__main__':
main() | CSRNet/val.py | import os
import glob
import argparse
import oneflow as flow
import transforms.spatial_transforms as ST
from image import *
from model import CSRNet
parser = argparse.ArgumentParser(description='Oneflow CSRNet')
parser.add_argument('modelPath', metavar='MODELPATH',type=str,
help='path to bestmodel')
parser.add_argument('picSrc', metavar='picSRC',type=str,
help='path to valPic')
def main():
transform = ST.Compose([
ST.ToNumpyForVal(), ST.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]),
])
global args
args = parser.parse_args()
root = './dataset/'
# now generate the ShanghaiA's ground truth
part_A_train = os.path.join(root, 'part_A_final/train_data', 'images')
part_A_test = os.path.join(root, 'part_A_final/test_data', 'images')
part_B_train = os.path.join(root, 'part_B_final/train_data', 'images')
part_B_test = os.path.join(root, 'part_B_final/test_data', 'images')
path_sets=[]
if args.picSrc=='part_A_test':
path_sets = [part_A_test]
elif args.picSrc=='part_B_test':
path_sets = [part_B_test]
img_paths = []
for path in path_sets:
for img_path in glob.glob(os.path.join(path, '*.jpg')):
img_paths.append(img_path)
model = CSRNet()
model = model.to("cuda")
checkpoint = flow.load(args.modelPath)
model.load_state_dict(checkpoint)
MAE = []
for i in range(len(img_paths)):
img = transform(Image.open(img_paths[i]).convert('RGB'))
img = np.asarray(img).astype(np.float32)
img = flow.Tensor(img, dtype=flow.float32, device="cuda")
img = img.to("cuda")
gt_file = h5py.File(img_paths[i].replace('.jpg', '.h5').replace('images', 'ground_truth'), 'r')
groundtruth = np.asarray(gt_file['density'])
with flow.no_grad():
output = model(img.unsqueeze(0))
mae = abs(output.detach().to("cpu").sum().numpy() - np.sum(groundtruth))[0]
MAE.append(mae)
avg_MAE = sum(MAE) / len(MAE)
print("test result: MAE:{:2f}".format(avg_MAE))
if __name__ == '__main__':
main() | 0.218503 | 0.227448 |
from typing import Any, Callable, Optional, Tuple
from _sysrepo import ffi, lib
from .util import c2str
# ------------------------------------------------------------------------------
class SysrepoError(Exception):
rc = None
__slots__ = ("msg",)
def __init__(self, msg: str):
super().__init__()
self.msg = msg
def __str__(self):
return "%s: %s" % (self.msg, c2str(lib.sr_strerror(self.rc)))
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.msg)
RC_CLASSES = {}
@staticmethod
def register(subclass):
SysrepoError.RC_CLASSES[subclass.rc] = subclass
return subclass
@staticmethod
def new(msg: str, rc: int) -> "SysrepoError":
err_class = SysrepoError.RC_CLASSES[rc]
return err_class(msg)
# ------------------------------------------------------------------------------
@SysrepoError.register
class SysrepoInvalArgError(SysrepoError):
rc = lib.SR_ERR_INVAL_ARG
@SysrepoError.register
class SysrepoNomemError(SysrepoError):
rc = lib.SR_ERR_NOMEM
@SysrepoError.register
class SysrepoNotFoundError(SysrepoError):
rc = lib.SR_ERR_NOT_FOUND
@SysrepoError.register
class SysrepoInternalError(SysrepoError):
rc = lib.SR_ERR_INTERNAL
@SysrepoError.register
class SysrepoUnsupportedError(SysrepoError):
rc = lib.SR_ERR_UNSUPPORTED
@SysrepoError.register
class SysrepoValidationFailedError(SysrepoError):
rc = lib.SR_ERR_VALIDATION_FAILED
@SysrepoError.register
class SysrepoOperationFailedError(SysrepoError):
rc = lib.SR_ERR_OPERATION_FAILED
@SysrepoError.register
class SysrepoUnauthorizedError(SysrepoError):
rc = lib.SR_ERR_UNAUTHORIZED
@SysrepoError.register
class SysrepoLockedError(SysrepoError):
rc = lib.SR_ERR_LOCKED
@SysrepoError.register
class SysrepoTimeOutError(SysrepoError):
rc = lib.SR_ERR_TIME_OUT
@SysrepoError.register
class SysrepoLyError(SysrepoError):
rc = lib.SR_ERR_LY
@SysrepoError.register
class SysrepoSysError(SysrepoError):
rc = lib.SR_ERR_SYS
@SysrepoError.register
class SysrepoExistsError(SysrepoError):
rc = lib.SR_ERR_EXISTS
@SysrepoError.register
class SysrepoCallbackFailedError(SysrepoError):
rc = lib.SR_ERR_CALLBACK_FAILED
@SysrepoError.register
class SysrepoCallbackShelveError(SysrepoError):
rc = lib.SR_ERR_CALLBACK_SHELVE
# ------------------------------------------------------------------------------
def _get_error_msg(session) -> Optional[str]:
"""
Get the error message information from the given session C pointer.
:arg "sr_session_ctx_t *" session:
A session C pointer allocated by libsysrepo.so.
"""
msg = None
err_info_p = ffi.new("sr_error_info_t **")
if lib.sr_get_error(session, err_info_p) == lib.SR_ERR_OK:
err_info = err_info_p[0]
error_strings = []
if err_info != ffi.NULL:
for i in range(err_info.err_count):
err = err_info.err[i]
strings = []
if err.xpath:
strings.append(c2str(err.xpath))
if err.message:
strings.append(c2str(err.message))
if strings:
error_strings.append(": ".join(strings))
msg = ", ".join(error_strings)
return msg
# ------------------------------------------------------------------------------
def check_call(
func: Callable[..., int],
*args: Any,
valid_codes: Tuple[int, ...] = (lib.SR_ERR_OK,)
) -> int:
"""
Wrapper around functions of libsysrepo.so.
:arg func:
A function from libsysrepo.so that is expected to return an int error
code.
:arg args:
Positional arguments for the function.
:arg valid_codes:
Error code values that are considered as a "success". If the function
returns a value not listed here, a SysrepoError exception will be risen.
:returns:
An error code SR_ERR_*.
:raises SysrepoError:
If the function returned an error code not listed in valid_codes. If
the first argument in args is a sr_session_ctx_t object, use it to call
sr_get_error() to get a detailed error message for the risen exception.
"""
ret = func(*args)
if ret not in valid_codes:
msg = None
if (
args
and isinstance(args[0], ffi.CData)
and ffi.typeof(args[0]) == ffi.typeof("sr_session_ctx_t *")
):
msg = _get_error_msg(args[0])
if not msg:
msg = "%s failed" % func.__name__
raise SysrepoError.new(msg, ret)
return ret | sysrepo/errors.py |
from typing import Any, Callable, Optional, Tuple
from _sysrepo import ffi, lib
from .util import c2str
# ------------------------------------------------------------------------------
class SysrepoError(Exception):
rc = None
__slots__ = ("msg",)
def __init__(self, msg: str):
super().__init__()
self.msg = msg
def __str__(self):
return "%s: %s" % (self.msg, c2str(lib.sr_strerror(self.rc)))
def __repr__(self):
return "%s(%r)" % (type(self).__name__, self.msg)
RC_CLASSES = {}
@staticmethod
def register(subclass):
SysrepoError.RC_CLASSES[subclass.rc] = subclass
return subclass
@staticmethod
def new(msg: str, rc: int) -> "SysrepoError":
err_class = SysrepoError.RC_CLASSES[rc]
return err_class(msg)
# ------------------------------------------------------------------------------
@SysrepoError.register
class SysrepoInvalArgError(SysrepoError):
rc = lib.SR_ERR_INVAL_ARG
@SysrepoError.register
class SysrepoNomemError(SysrepoError):
rc = lib.SR_ERR_NOMEM
@SysrepoError.register
class SysrepoNotFoundError(SysrepoError):
rc = lib.SR_ERR_NOT_FOUND
@SysrepoError.register
class SysrepoInternalError(SysrepoError):
rc = lib.SR_ERR_INTERNAL
@SysrepoError.register
class SysrepoUnsupportedError(SysrepoError):
rc = lib.SR_ERR_UNSUPPORTED
@SysrepoError.register
class SysrepoValidationFailedError(SysrepoError):
rc = lib.SR_ERR_VALIDATION_FAILED
@SysrepoError.register
class SysrepoOperationFailedError(SysrepoError):
rc = lib.SR_ERR_OPERATION_FAILED
@SysrepoError.register
class SysrepoUnauthorizedError(SysrepoError):
rc = lib.SR_ERR_UNAUTHORIZED
@SysrepoError.register
class SysrepoLockedError(SysrepoError):
rc = lib.SR_ERR_LOCKED
@SysrepoError.register
class SysrepoTimeOutError(SysrepoError):
rc = lib.SR_ERR_TIME_OUT
@SysrepoError.register
class SysrepoLyError(SysrepoError):
rc = lib.SR_ERR_LY
@SysrepoError.register
class SysrepoSysError(SysrepoError):
rc = lib.SR_ERR_SYS
@SysrepoError.register
class SysrepoExistsError(SysrepoError):
rc = lib.SR_ERR_EXISTS
@SysrepoError.register
class SysrepoCallbackFailedError(SysrepoError):
rc = lib.SR_ERR_CALLBACK_FAILED
@SysrepoError.register
class SysrepoCallbackShelveError(SysrepoError):
rc = lib.SR_ERR_CALLBACK_SHELVE
# ------------------------------------------------------------------------------
def _get_error_msg(session) -> Optional[str]:
"""
Get the error message information from the given session C pointer.
:arg "sr_session_ctx_t *" session:
A session C pointer allocated by libsysrepo.so.
"""
msg = None
err_info_p = ffi.new("sr_error_info_t **")
if lib.sr_get_error(session, err_info_p) == lib.SR_ERR_OK:
err_info = err_info_p[0]
error_strings = []
if err_info != ffi.NULL:
for i in range(err_info.err_count):
err = err_info.err[i]
strings = []
if err.xpath:
strings.append(c2str(err.xpath))
if err.message:
strings.append(c2str(err.message))
if strings:
error_strings.append(": ".join(strings))
msg = ", ".join(error_strings)
return msg
# ------------------------------------------------------------------------------
def check_call(
func: Callable[..., int],
*args: Any,
valid_codes: Tuple[int, ...] = (lib.SR_ERR_OK,)
) -> int:
"""
Wrapper around functions of libsysrepo.so.
:arg func:
A function from libsysrepo.so that is expected to return an int error
code.
:arg args:
Positional arguments for the function.
:arg valid_codes:
Error code values that are considered as a "success". If the function
returns a value not listed here, a SysrepoError exception will be risen.
:returns:
An error code SR_ERR_*.
:raises SysrepoError:
If the function returned an error code not listed in valid_codes. If
the first argument in args is a sr_session_ctx_t object, use it to call
sr_get_error() to get a detailed error message for the risen exception.
"""
ret = func(*args)
if ret not in valid_codes:
msg = None
if (
args
and isinstance(args[0], ffi.CData)
and ffi.typeof(args[0]) == ffi.typeof("sr_session_ctx_t *")
):
msg = _get_error_msg(args[0])
if not msg:
msg = "%s failed" % func.__name__
raise SysrepoError.new(msg, ret)
return ret | 0.650356 | 0.085327 |
import logging
import sys
import ccxt
import release_trader.webscaper as ws
websites = [
# 'binance',
# 'coinbase',
"gateio",
# 'kraken'
]
logging.basicConfig(
level=logging.INFO,
)
log_formatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
root_logger = logging.getLogger(__name__)
file_handler = logging.FileHandler("{0}/{1}.log".format(".", "release_trader"))
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.DEBUG)
root_logger.addHandler(console_handler)
def check_websites(
crypto: list[str], verbose: bool = False, testing: bool = False
) -> list[str]:
"""Check if websites (gate.io) lists the newly found coins.
Args:
crypto: list
List of coins to check if they are tradeable on gate.io.
verbose: bool
If True, print what is going on in more detail
testing: bool
Do not write to the history file if the test suit is calling the function
Returns:
pairs: list
List containing the available pairs: USDT or ETH with new coin
"""
found = []
pairs = []
for w in websites:
try:
e = getattr(ccxt, w)()
if verbose:
print(f"Searching among {len(e.load_markets())} markets on {w}...")
for m in e.load_markets():
msp = m.split("/")
if "USDT" in msp[1]:
if msp[0] in crypto:
print(f"Found {m}, ready to trade!")
found.append(msp[0])
pairs.append(m)
print(f"No mathch for coin(s) {list(set(crypto) - set(found))}")
except Exception:
root_logger.warning(f"Could not get markets/list of coins from {w}")
# TODO: history.txt should perhaps be binary/zipped so it is harder to change it.
with open("src/release_trader/history.txt", "a") as f:
allready_in = list(
line.strip() for line in open("src/release_trader/history.txt")
)
for found_coin in crypto:
if found_coin not in allready_in and not testing:
f.write(f"{found_coin}\n")
return pairs
def new_crypto(verbose=False, test_coin: str = "none") -> list[str]:
"""Check if coins have been traded allready.
A history file listing all coins that have been found by the program is
check, in addition to binance and coinbase themselves.
Args:
verbose: bool
If True, print more info.
test_coin: str
Used within the test suite. Send in a dummy coin to see if it is removed
Returns:
list: Coins that are new to the program.
"""
# Get the newly listed crypto from both websites and combine
if test_coin == "none":
crypto_b = ws.navigate_binance()
crypto_c = ws.navigate_coinbase()
crypto = crypto_b + crypto_c
else:
crypto = [test_coin]
# Check the history file
allready_in = {line.strip() for line in open("src/release_trader/history.txt")}
intersect = allready_in & set(crypto)
new_coin = []
if len(intersect) == 0:
new_coin = crypto
else:
for item in set(crypto) - intersect:
new_coin.append(item)
# Check binance and coinbase
for w in ["binance", "coinbase"]:
try:
e = getattr(ccxt, w)()
markets = set(e.load_markets())
for coin in new_coin:
if f"{coin}/USDT" in markets:
new_coin.remove(coin)
except Exception:
root_logger.warning(f"Could not get markets/list of coins from {w}")
# Check if the coins are listed on gateio and more, then return coins that
# are tradeable
if len(new_coin) != 0:
found = check_websites(new_coin)
return found
if verbose:
print("found nothing new")
return []
if __name__ == "__main__":
print("check_availability.__main__")
print(new_crypto()) | src/release_trader/check_availability.py | import logging
import sys
import ccxt
import release_trader.webscaper as ws
websites = [
# 'binance',
# 'coinbase',
"gateio",
# 'kraken'
]
logging.basicConfig(
level=logging.INFO,
)
log_formatter = logging.Formatter(
"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s"
)
root_logger = logging.getLogger(__name__)
file_handler = logging.FileHandler("{0}/{1}.log".format(".", "release_trader"))
file_handler.setFormatter(log_formatter)
root_logger.addHandler(file_handler)
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.DEBUG)
root_logger.addHandler(console_handler)
def check_websites(
crypto: list[str], verbose: bool = False, testing: bool = False
) -> list[str]:
"""Check if websites (gate.io) lists the newly found coins.
Args:
crypto: list
List of coins to check if they are tradeable on gate.io.
verbose: bool
If True, print what is going on in more detail
testing: bool
Do not write to the history file if the test suit is calling the function
Returns:
pairs: list
List containing the available pairs: USDT or ETH with new coin
"""
found = []
pairs = []
for w in websites:
try:
e = getattr(ccxt, w)()
if verbose:
print(f"Searching among {len(e.load_markets())} markets on {w}...")
for m in e.load_markets():
msp = m.split("/")
if "USDT" in msp[1]:
if msp[0] in crypto:
print(f"Found {m}, ready to trade!")
found.append(msp[0])
pairs.append(m)
print(f"No mathch for coin(s) {list(set(crypto) - set(found))}")
except Exception:
root_logger.warning(f"Could not get markets/list of coins from {w}")
# TODO: history.txt should perhaps be binary/zipped so it is harder to change it.
with open("src/release_trader/history.txt", "a") as f:
allready_in = list(
line.strip() for line in open("src/release_trader/history.txt")
)
for found_coin in crypto:
if found_coin not in allready_in and not testing:
f.write(f"{found_coin}\n")
return pairs
def new_crypto(verbose=False, test_coin: str = "none") -> list[str]:
"""Check if coins have been traded allready.
A history file listing all coins that have been found by the program is
check, in addition to binance and coinbase themselves.
Args:
verbose: bool
If True, print more info.
test_coin: str
Used within the test suite. Send in a dummy coin to see if it is removed
Returns:
list: Coins that are new to the program.
"""
# Get the newly listed crypto from both websites and combine
if test_coin == "none":
crypto_b = ws.navigate_binance()
crypto_c = ws.navigate_coinbase()
crypto = crypto_b + crypto_c
else:
crypto = [test_coin]
# Check the history file
allready_in = {line.strip() for line in open("src/release_trader/history.txt")}
intersect = allready_in & set(crypto)
new_coin = []
if len(intersect) == 0:
new_coin = crypto
else:
for item in set(crypto) - intersect:
new_coin.append(item)
# Check binance and coinbase
for w in ["binance", "coinbase"]:
try:
e = getattr(ccxt, w)()
markets = set(e.load_markets())
for coin in new_coin:
if f"{coin}/USDT" in markets:
new_coin.remove(coin)
except Exception:
root_logger.warning(f"Could not get markets/list of coins from {w}")
# Check if the coins are listed on gateio and more, then return coins that
# are tradeable
if len(new_coin) != 0:
found = check_websites(new_coin)
return found
if verbose:
print("found nothing new")
return []
if __name__ == "__main__":
print("check_availability.__main__")
print(new_crypto()) | 0.385143 | 0.141252 |
import csv
import bz2
import pickle
from hashlib import sha256
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_ALPHABET_INDEX = {char: index for index, char in enumerate(BASE58_ALPHABET)}
def hex_to_bytes(hexed):
if len(hexed) & 1:
hexed = '0' + hexed
return bytes.fromhex(hexed)
def int_to_unknown_bytes(num, byteorder='big'):
"""Converts an int to the least number of bytes as possible."""
return num.to_bytes((num.bit_length() + 7) // 8 or 1, byteorder)
def double_sha256(bytestr):
r = sha256(sha256(bytestr).digest()).digest()
return r
def double_sha256_checksum(bytestr):
r = double_sha256(bytestr)[:4]
return r
def b58decode(string):
alphabet_index = BASE58_ALPHABET_INDEX
num = 0
try:
for char in string:
num *= 58
num += alphabet_index[char]
except KeyError:
raise ValueError('"{}" is an invalid base58 encoded '
'character.'.format(char)) from None
bytestr = int_to_unknown_bytes(num)
pad = 0
for char in string:
if char == '1':
pad += 1
else:
break
return b'\x00' * pad + bytestr
def b58decode_check(string):
decoded = b58decode(string)
shortened = decoded[:-4]
decoded_checksum = decoded[-4:]
hash_checksum = double_sha256_checksum(shortened)
if decoded_checksum != hash_checksum:
raise ValueError('Decoded checksum {} derived from "{}" is not equal to hash '
'checksum {}.'.format(decoded_checksum, string, hash_checksum))
return shortened
with open('test.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
hash160_list = []
test_list = ['157RMZhbBLC1wucv3jxQqqHjbKezL1yy7g','1GL37AoxoUj45fPNYf8Dq5CTncyRYLqo7','3Q8dZUbatx5FC5CdQYRLg7gDnkQec5Pvp8','3JvL6Ymt8MVWiCNHC7oWU6nLeHNJKLZGLN'] # low entropy keys to test the sets
for x in test_list:
hash160 = b58decode_check(x)
hash160_list.append(hash160[1:])
for row in spamreader:
try:
address = row[0]
amount = int(row[1])
hash160 = b58decode_check(address)
hash160_list.append(hash160[1:])
except Exception as e:
print(e)
continue
print("total_len",len(hash160_list))
new_list = [hash160_list[i:i+1000000] for i in range(0, len(hash160_list), 1000000)]
print("new_list",len(new_list))
file_num = 0
for x in new_list:
f = bz2.BZ2File(f'{file_num}.set', 'wb')
pickle.dump(set(x), f)
file_num += 1
f.close() | csv_to_hash160_set.py | import csv
import bz2
import pickle
from hashlib import sha256
BASE58_ALPHABET = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz'
BASE58_ALPHABET_INDEX = {char: index for index, char in enumerate(BASE58_ALPHABET)}
def hex_to_bytes(hexed):
if len(hexed) & 1:
hexed = '0' + hexed
return bytes.fromhex(hexed)
def int_to_unknown_bytes(num, byteorder='big'):
"""Converts an int to the least number of bytes as possible."""
return num.to_bytes((num.bit_length() + 7) // 8 or 1, byteorder)
def double_sha256(bytestr):
r = sha256(sha256(bytestr).digest()).digest()
return r
def double_sha256_checksum(bytestr):
r = double_sha256(bytestr)[:4]
return r
def b58decode(string):
alphabet_index = BASE58_ALPHABET_INDEX
num = 0
try:
for char in string:
num *= 58
num += alphabet_index[char]
except KeyError:
raise ValueError('"{}" is an invalid base58 encoded '
'character.'.format(char)) from None
bytestr = int_to_unknown_bytes(num)
pad = 0
for char in string:
if char == '1':
pad += 1
else:
break
return b'\x00' * pad + bytestr
def b58decode_check(string):
decoded = b58decode(string)
shortened = decoded[:-4]
decoded_checksum = decoded[-4:]
hash_checksum = double_sha256_checksum(shortened)
if decoded_checksum != hash_checksum:
raise ValueError('Decoded checksum {} derived from "{}" is not equal to hash '
'checksum {}.'.format(decoded_checksum, string, hash_checksum))
return shortened
with open('test.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
hash160_list = []
test_list = ['157RMZhbBLC1wucv3jxQqqHjbKezL1yy7g','1GL37AoxoUj45fPNYf8Dq5CTncyRYLqo7','3Q8dZUbatx5FC5CdQYRLg7gDnkQec5Pvp8','3JvL6Ymt8MVWiCNHC7oWU6nLeHNJKLZGLN'] # low entropy keys to test the sets
for x in test_list:
hash160 = b58decode_check(x)
hash160_list.append(hash160[1:])
for row in spamreader:
try:
address = row[0]
amount = int(row[1])
hash160 = b58decode_check(address)
hash160_list.append(hash160[1:])
except Exception as e:
print(e)
continue
print("total_len",len(hash160_list))
new_list = [hash160_list[i:i+1000000] for i in range(0, len(hash160_list), 1000000)]
print("new_list",len(new_list))
file_num = 0
for x in new_list:
f = bz2.BZ2File(f'{file_num}.set', 'wb')
pickle.dump(set(x), f)
file_num += 1
f.close() | 0.350977 | 0.271234 |
import scipy as sp
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import numpy as np
import math
from discretelognorm import discretelognorm
def reservation_wage():
m = 20
v = 200
N = 500
Wmax = 100
Wmin = 0
gamma = .10
alpha = .5
beta = .9
e_params = (m, v)
u = lambda c: np.sqrt(c)
w = np.linspace(Wmin, Wmax, N)
uaw = u(alpha*w).reshape((N,1))
uw = u(w)
f = discretelognorm(w, *e_params)
VE = np.zeros(N)
EVU = np.zeros(N)
VU = np.zeros((N,N))
MVE = np.empty((N,N)) #tiled version of VE
MEVU = np.empty((N,N)) #tiled version of EVU
delta = 1.
i = 0
while delta >= 1e-9:
i+=1
#update tiled value functions
MVE[:,:] = VE.reshape((1,N))
MEVU[:,:] = EVU.reshape((N,1))
#calculate new value functions
VU1 = uaw + beta*np.max(np.dstack([MEVU, MVE]), axis=2)
VE1 = uw + beta*((1-gamma)*VE + gamma*EVU)
#test for convergence
d1 = ((VE1-VE)**2).sum()
d2 = ((VU1-VU)**2).sum()
delta = max(d1,d2)
#update
VU = VU1
VE = VE1
EVU = np.dot(VU,f).ravel()
#calculate policy function
PSI = np.argmax(np.dstack([MEVU,MVE]), axis=2)
#calculate and plot reservation wage function
wr_ind = np.argmax(np.diff(PSI), axis = 1)
wr = w[wr_ind]
plt.plot(w,wr)
plt.savefig('reservation_wage.pdf')
plt.clf()
#plot discrete policy function
def disc_policy():
#First compute policy function...==========================================
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
print w[250],wr[250]
#Then plot=================================================================
plt.plot(w,psiprime[250,:])
plt.ylim([-.5,1.5])
plt.xlabel(r'$w\prime$')
plt.yticks([0,1])
plt.savefig('disc_policy.pdf')
plt.clf()
if __name__ == "__main__":
reservation_wage()
disc_policy() | Labs/PolicyFunctionIteration/plots.py | import scipy as sp
import matplotlib
matplotlib.rcParams = matplotlib.rc_params_from_file('../../matplotlibrc')
import matplotlib.pyplot as plt
import numpy as np
import math
from discretelognorm import discretelognorm
def reservation_wage():
m = 20
v = 200
N = 500
Wmax = 100
Wmin = 0
gamma = .10
alpha = .5
beta = .9
e_params = (m, v)
u = lambda c: np.sqrt(c)
w = np.linspace(Wmin, Wmax, N)
uaw = u(alpha*w).reshape((N,1))
uw = u(w)
f = discretelognorm(w, *e_params)
VE = np.zeros(N)
EVU = np.zeros(N)
VU = np.zeros((N,N))
MVE = np.empty((N,N)) #tiled version of VE
MEVU = np.empty((N,N)) #tiled version of EVU
delta = 1.
i = 0
while delta >= 1e-9:
i+=1
#update tiled value functions
MVE[:,:] = VE.reshape((1,N))
MEVU[:,:] = EVU.reshape((N,1))
#calculate new value functions
VU1 = uaw + beta*np.max(np.dstack([MEVU, MVE]), axis=2)
VE1 = uw + beta*((1-gamma)*VE + gamma*EVU)
#test for convergence
d1 = ((VE1-VE)**2).sum()
d2 = ((VU1-VU)**2).sum()
delta = max(d1,d2)
#update
VU = VU1
VE = VE1
EVU = np.dot(VU,f).ravel()
#calculate policy function
PSI = np.argmax(np.dstack([MEVU,MVE]), axis=2)
#calculate and plot reservation wage function
wr_ind = np.argmax(np.diff(PSI), axis = 1)
wr = w[wr_ind]
plt.plot(w,wr)
plt.savefig('reservation_wage.pdf')
plt.clf()
#plot discrete policy function
def disc_policy():
#First compute policy function...==========================================
N = 500
w = sp.linspace(0,100,N)
w = w.reshape(N,1)
u = lambda c: sp.sqrt(c)
util_vec = u(w)
alpha = 0.5
alpha_util = u(alpha*w)
alpha_util_grid = sp.repeat(alpha_util,N,1)
m = 20
v = 200
f = discretelognorm(w,m,v)
VEprime = sp.zeros((N,1))
VUprime = sp.zeros((N,N))
EVUprime = sp.zeros((N,1))
psiprime = sp.ones((N,1))
gamma = 0.1
beta = 0.9
m = 15
tol = 10**-9
delta = 1+tol
it = 0
while (delta >= tol):
it += 1
psi = psiprime.copy()
arg1 = sp.repeat(sp.transpose(VEprime),N,0)
arg2 = sp.repeat(EVUprime,N,1)
arg = sp.array([arg2,arg1])
psiprime = sp.argmax(arg,axis = 0)
for j in sp.arange(0,m):
VE = VEprime.copy()
VU = VUprime.copy()
EVU = EVUprime.copy()
VEprime = util_vec + beta*((1-gamma)*VE + gamma*EVU)
arg1 = sp.repeat(sp.transpose(VE),N,0)*psiprime
arg2 = sp.repeat(EVU,N,1)*(1-psiprime)
arg = arg1+arg2
VUprime = alpha_util_grid + beta*arg
EVUprime = sp.dot(VUprime,f)
delta = sp.linalg.norm(psiprime -psi)
wr_ind = sp.argmax(sp.diff(psiprime), axis = 1)
wr = w[wr_ind]
print w[250],wr[250]
#Then plot=================================================================
plt.plot(w,psiprime[250,:])
plt.ylim([-.5,1.5])
plt.xlabel(r'$w\prime$')
plt.yticks([0,1])
plt.savefig('disc_policy.pdf')
plt.clf()
if __name__ == "__main__":
reservation_wage()
disc_policy() | 0.423458 | 0.598899 |
from typing import List
from sqlmodel import Session, or_
from models.job import Job, JobCreate, JobRead, JobUpdate
from models.user import User, UserSession
from services.base import BaseService
from helpers.exceptions import ApiException
from repositories.job import JobRepository
from repositories.user import UserRepository
class JobService(BaseService):
def __init__(self, user_session: UserSession, db: Session):
super().__init__(user_session, db)
self.job_repository = JobRepository(db)
self.user_repository = UserRepository(db)
async def get_job(self, job_id) -> JobRead:
try:
job = self.job_repository.get_entity(Job.id == job_id)
if job is None:
raise ApiException(
f'Job with id {job_id} not found')
return job
except ApiException as e:
raise e
except Exception:
raise ApiException(f'Job with id {job_id} not found')
async def get_jobs(self) -> List[JobRead]:
try:
jobs = self.job_repository.get_entities()
return jobs
except Exception:
raise ApiException('No jobs found')
async def create_job(self, job: JobCreate) -> JobRead:
try:
job: Job = self.job_repository.create_entity(job)
job.user = self.user_repository.get_entity(
User.id == self.user_session.user_id)
self.job_repository.update_entity_changes(job)
return job
except ApiException as e:
raise e
except Exception:
raise ApiException('Error creating job')
async def update_job(self, job_id, job: JobUpdate) -> JobRead:
try:
job = self.job_repository.update_entity(job, Job.id == job_id)
return job
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error updating job with id {job_id}')
async def delete_job(self, job_id) -> None:
try:
result = self.job_repository.delete_entity(
Job.id == job_id)
if not result:
raise ApiException(
f'Error deleting job with id {job_id}')
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error deleting job with id {job_id}') | services/job.py | from typing import List
from sqlmodel import Session, or_
from models.job import Job, JobCreate, JobRead, JobUpdate
from models.user import User, UserSession
from services.base import BaseService
from helpers.exceptions import ApiException
from repositories.job import JobRepository
from repositories.user import UserRepository
class JobService(BaseService):
def __init__(self, user_session: UserSession, db: Session):
super().__init__(user_session, db)
self.job_repository = JobRepository(db)
self.user_repository = UserRepository(db)
async def get_job(self, job_id) -> JobRead:
try:
job = self.job_repository.get_entity(Job.id == job_id)
if job is None:
raise ApiException(
f'Job with id {job_id} not found')
return job
except ApiException as e:
raise e
except Exception:
raise ApiException(f'Job with id {job_id} not found')
async def get_jobs(self) -> List[JobRead]:
try:
jobs = self.job_repository.get_entities()
return jobs
except Exception:
raise ApiException('No jobs found')
async def create_job(self, job: JobCreate) -> JobRead:
try:
job: Job = self.job_repository.create_entity(job)
job.user = self.user_repository.get_entity(
User.id == self.user_session.user_id)
self.job_repository.update_entity_changes(job)
return job
except ApiException as e:
raise e
except Exception:
raise ApiException('Error creating job')
async def update_job(self, job_id, job: JobUpdate) -> JobRead:
try:
job = self.job_repository.update_entity(job, Job.id == job_id)
return job
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error updating job with id {job_id}')
async def delete_job(self, job_id) -> None:
try:
result = self.job_repository.delete_entity(
Job.id == job_id)
if not result:
raise ApiException(
f'Error deleting job with id {job_id}')
except ApiException as e:
raise e
except Exception:
raise ApiException(
f'Error deleting job with id {job_id}') | 0.678327 | 0.158174 |
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from models import *
from slack import WebClient
from flask import request
from datetime import datetime
from sqlalchemy import create_engine
app = Flask(__name__)
db_url = os.environ.get('DB_URL')
app.config['SQLALCHEMY_DATABASE_URI'] = db_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
client = WebClient(token=os.environ.get('SLACK_TOKEN'))
db.init_app(app)
def main():
db.create_all()
if __name__ == "__main__":
with app.app_context():
main()
@app.route('/current-turn', methods=['POST', 'GET'])
def current_turn():
chore = request.args.get('chore')
current_user_ids = find_users_for(chore, "true")
current_turn_names = value_for("name", current_user_ids)
return ' - '.join(current_turn_names)
@app.route('/next-turn', methods=['POST', 'GET'])
def next_turn():
chore = request.args.get('chore')
return change_turn_for(chore)
@app.route('/new-team', methods=['POST'])
def new_team():
name = request.args.get('name')
if validate_unique_team(name) is True:
return create_team(name)
else:
return "Team already exists"
@app.route('/new-user', methods=['POST'])
def new_user():
email = request.args.get('email')
name = request.args.get('name')
team = request.args.get('team')
if validate_unique_user(email) is True:
return create_user(name, team, email)
else:
return "User already exists"
@app.route('/new-chore', methods=['POST'])
def new_chore():
name = request.args.get('name')
required_users = request.args.get('required_users')
if validate_unique_chore(name) is True:
return create_chore(name, required_users)
else:
return "Chore already exists"
@app.route('/list-users', methods=['POST', 'GET'])
def list_users():
try:
users = User.query.all()
return "\n".join(user.name + ' ' + str(user.team_id) for user in users)
except Exception as e:
return(str(e))
@app.route('/list-teams', methods=['POST', 'GET'])
def list_teams():
try:
teams = Team.query.all()
return "\n".join(team.name for team in teams)
except Exception as e:
return(str(e))
@app.route('/list-chores', methods=['POST', 'GET'])
def list_chores():
try:
chores = Chore.query.all()
return "\n".join(chore.name for chore in chores)
except Exception as e:
return(str(e))
@app.route('/assign-chore', methods=['POST', 'GET'])
def assign_chore():
chore = request.args.get('chore')
email = request.args.get('email')
return assign(email, chore)
def assign(email, chore):
try:
user = User.query.filter_by(email=email).first()
chore_object = Chore.query.filter_by(name=chore).first()
if user is None or chore_object is None:
return "Email or Chore does not exist"
else:
try:
user_chore = UserChore.query.filter_by(user_id=user.id, chore_id=chore_object.id).first()
if user_chore is None:
user_chore=UserChore(user_id=user.id, chore_id=chore_object.id, active=False, last_turn=datetime.now())
db.session.add(user_chore)
db.session.commit()
return user.name + " assigned to " + chore
else:
return "Chore was already assigned to " + user.name
except Exception as e:
try:
user_chore=UserChore(user_id=user.id, chore_id=chore_object.id, active=False, last_turn=datetime.now())
db.session.add(user_chore)
db.session.commit()
return user.name + " assigned to " + chore
except Exception as e:
return(str(e))
except Exception as e:
return (str(e))
def change_turn_for(chore):
next_user_ids = find_users_for(chore, "false")
current_user_ids = find_users_for(chore, "true")
next_turn_names = value_for("name", next_user_ids)
current_turn_names = value_for("name", current_user_ids)
update_status_for(current_user_ids, False)
update_status_for(next_user_ids, True)
return " - ".join(next_turn_names)
def update_status_for(user_ids, status):
mappings = []
for user_chore in db.session.query(UserChore).filter(UserChore.user_id.in_(list(user_ids))).all():
info = { 'id': user_chore.id, 'active': status }
extra = { 'last_turn': datetime.now() }
info = {**info, **extra} if status == True else info
mappings.append(info)
db.session.bulk_update_mappings(UserChore, mappings)
db.session.flush()
db.session.commit()
def find_users_for(chore, active_user):
chore_id=Chore.query.filter_by(name=chore).first().id
engine = create_engine(db_url)
connection = engine.connect()
result = connection.execute("select distinct on (team_id) team_id, u.id, name, email, last_turn from user_chores as uc, users as u where uc.active = " + active_user + " and uc.user_id = u.id and u.active = true and uc.chore_id = " + str(chore_id) + " order by team_id, last_turn asc")
ids = []
for row in result:
ids.append(row['id'])
return ids
def value_for(column, user_ids):
engine = create_engine(db_url)
connection = engine.connect()
user_ids_str = ", ".join(str(value) for value in user_ids)
result = connection.execute("select " + column + " from users where id in (" + user_ids_str + ")")
names = []
for row in result:
names.append(row[0])
return names
def assign_new_topic_on_channels(topic):
channels = os.environ.get('CHANNELS').split(" ")
for channel in channels:
client.api_call(api_method='conversations.setTopic',json={ 'channel': channel,'topic': topic })
def validate_unique_team(name):
try:
team=Team.query.filter_by(name=name).first()
if team is None:
return True
else:
return False
except Exception as e:
return True
def validate_unique_chore(name):
try:
chore=Chore.query.filter_by(name=name).first()
if chore is None:
return True
else:
return False
except Exception as e:
return True
def create_team(name):
try:
team=Team(name=name)
db.session.add(team)
db.session.commit()
return "Team added"
except Exception as e:
return(str(e))
def create_chore(name, required_users):
try:
chore=Chore(name=name, required_users=required_users)
db.session.add(chore)
db.session.commit()
return "Chore added"
except Exception as e:
return(str(e))
def validate_unique_user(email):
try:
user=User.query.filter_by(email=email).first()
if user is None:
return True
else:
return False
except Exception as e:
return True
def create_user(name, team, email):
try:
team=Team.query.filter_by(name=team).first()
user=User(name=name, email=email, team_id=team.id, active=True)
db.session.add(user)
db.session.commit()
return "User added"
except Exception as e:
return(str(e)) | app.py | import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from models import *
from slack import WebClient
from flask import request
from datetime import datetime
from sqlalchemy import create_engine
app = Flask(__name__)
db_url = os.environ.get('DB_URL')
app.config['SQLALCHEMY_DATABASE_URI'] = db_url
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
client = WebClient(token=os.environ.get('SLACK_TOKEN'))
db.init_app(app)
def main():
db.create_all()
if __name__ == "__main__":
with app.app_context():
main()
@app.route('/current-turn', methods=['POST', 'GET'])
def current_turn():
chore = request.args.get('chore')
current_user_ids = find_users_for(chore, "true")
current_turn_names = value_for("name", current_user_ids)
return ' - '.join(current_turn_names)
@app.route('/next-turn', methods=['POST', 'GET'])
def next_turn():
chore = request.args.get('chore')
return change_turn_for(chore)
@app.route('/new-team', methods=['POST'])
def new_team():
name = request.args.get('name')
if validate_unique_team(name) is True:
return create_team(name)
else:
return "Team already exists"
@app.route('/new-user', methods=['POST'])
def new_user():
email = request.args.get('email')
name = request.args.get('name')
team = request.args.get('team')
if validate_unique_user(email) is True:
return create_user(name, team, email)
else:
return "User already exists"
@app.route('/new-chore', methods=['POST'])
def new_chore():
name = request.args.get('name')
required_users = request.args.get('required_users')
if validate_unique_chore(name) is True:
return create_chore(name, required_users)
else:
return "Chore already exists"
@app.route('/list-users', methods=['POST', 'GET'])
def list_users():
try:
users = User.query.all()
return "\n".join(user.name + ' ' + str(user.team_id) for user in users)
except Exception as e:
return(str(e))
@app.route('/list-teams', methods=['POST', 'GET'])
def list_teams():
try:
teams = Team.query.all()
return "\n".join(team.name for team in teams)
except Exception as e:
return(str(e))
@app.route('/list-chores', methods=['POST', 'GET'])
def list_chores():
try:
chores = Chore.query.all()
return "\n".join(chore.name for chore in chores)
except Exception as e:
return(str(e))
@app.route('/assign-chore', methods=['POST', 'GET'])
def assign_chore():
chore = request.args.get('chore')
email = request.args.get('email')
return assign(email, chore)
def assign(email, chore):
try:
user = User.query.filter_by(email=email).first()
chore_object = Chore.query.filter_by(name=chore).first()
if user is None or chore_object is None:
return "Email or Chore does not exist"
else:
try:
user_chore = UserChore.query.filter_by(user_id=user.id, chore_id=chore_object.id).first()
if user_chore is None:
user_chore=UserChore(user_id=user.id, chore_id=chore_object.id, active=False, last_turn=datetime.now())
db.session.add(user_chore)
db.session.commit()
return user.name + " assigned to " + chore
else:
return "Chore was already assigned to " + user.name
except Exception as e:
try:
user_chore=UserChore(user_id=user.id, chore_id=chore_object.id, active=False, last_turn=datetime.now())
db.session.add(user_chore)
db.session.commit()
return user.name + " assigned to " + chore
except Exception as e:
return(str(e))
except Exception as e:
return (str(e))
def change_turn_for(chore):
next_user_ids = find_users_for(chore, "false")
current_user_ids = find_users_for(chore, "true")
next_turn_names = value_for("name", next_user_ids)
current_turn_names = value_for("name", current_user_ids)
update_status_for(current_user_ids, False)
update_status_for(next_user_ids, True)
return " - ".join(next_turn_names)
def update_status_for(user_ids, status):
mappings = []
for user_chore in db.session.query(UserChore).filter(UserChore.user_id.in_(list(user_ids))).all():
info = { 'id': user_chore.id, 'active': status }
extra = { 'last_turn': datetime.now() }
info = {**info, **extra} if status == True else info
mappings.append(info)
db.session.bulk_update_mappings(UserChore, mappings)
db.session.flush()
db.session.commit()
def find_users_for(chore, active_user):
chore_id=Chore.query.filter_by(name=chore).first().id
engine = create_engine(db_url)
connection = engine.connect()
result = connection.execute("select distinct on (team_id) team_id, u.id, name, email, last_turn from user_chores as uc, users as u where uc.active = " + active_user + " and uc.user_id = u.id and u.active = true and uc.chore_id = " + str(chore_id) + " order by team_id, last_turn asc")
ids = []
for row in result:
ids.append(row['id'])
return ids
def value_for(column, user_ids):
engine = create_engine(db_url)
connection = engine.connect()
user_ids_str = ", ".join(str(value) for value in user_ids)
result = connection.execute("select " + column + " from users where id in (" + user_ids_str + ")")
names = []
for row in result:
names.append(row[0])
return names
def assign_new_topic_on_channels(topic):
channels = os.environ.get('CHANNELS').split(" ")
for channel in channels:
client.api_call(api_method='conversations.setTopic',json={ 'channel': channel,'topic': topic })
def validate_unique_team(name):
try:
team=Team.query.filter_by(name=name).first()
if team is None:
return True
else:
return False
except Exception as e:
return True
def validate_unique_chore(name):
try:
chore=Chore.query.filter_by(name=name).first()
if chore is None:
return True
else:
return False
except Exception as e:
return True
def create_team(name):
try:
team=Team(name=name)
db.session.add(team)
db.session.commit()
return "Team added"
except Exception as e:
return(str(e))
def create_chore(name, required_users):
try:
chore=Chore(name=name, required_users=required_users)
db.session.add(chore)
db.session.commit()
return "Chore added"
except Exception as e:
return(str(e))
def validate_unique_user(email):
try:
user=User.query.filter_by(email=email).first()
if user is None:
return True
else:
return False
except Exception as e:
return True
def create_user(name, team, email):
try:
team=Team.query.filter_by(name=team).first()
user=User(name=name, email=email, team_id=team.id, active=True)
db.session.add(user)
db.session.commit()
return "User added"
except Exception as e:
return(str(e)) | 0.274935 | 0.043164 |
import numpy as np
from baselines.ecbp.agents.buffer.lru_knn_combine import LRU_KNN_COMBINE
from baselines.ecbp.agents.graph.model import *
import tensorflow as tf
from baselines.ecbp.agents.graph.build_graph_dueling import *
from baselines import logger
import copy
class OnlineAgent(object):
def __init__(self, model_func, exploration_schedule, obs_shape, input_type, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4,
tf_writer=None):
self.ec_buffer = LRU_KNN_COMBINE(num_actions, buffer_size, latent_dim, latent_dim, obs_shape, 'game')
self.obs = None
self.z = None
self.state_tp1 = None
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.heuristic_exploration = True
self.hash_func, _, _ = build_train_dueling(
make_obs_ph=lambda name: input_type(obs_shape, name=name),
model_func=model_func,
q_func=model,
imitate=False,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_norm_clipping=10,
)
def act(self, obs, is_train=True):
self.obs = obs
z = np.array(self.hash_func(np.array(obs))).reshape((self.latent_dim,))
self.z = z
self.steps += 1
# instance_inr = np.max(self.exploration_coef(self.count[obs]))
if (np.random.random() < max(0, self.exploration_schedule.value(self.steps))) and is_train:
action = np.random.randint(0, self.num_actions)
# print("random")
return action
else:
extrinsic_qs = np.zeros((self.num_actions, 1))
intrinsic_qs = np.zeros((self.num_actions, 1))
finds = np.zeros((1,))
# print(self.num_actions)
for a in range(self.num_actions):
extrinsic_qs[a], intrinsic_qs[a], find = self.ec_buffer[a].act_value(np.array([z]), self.knn)
finds += sum(find)
if is_train and self.heuristic_exploration:
q = extrinsic_qs + intrinsic_qs
else:
q = extrinsic_qs
q_max = np.max(q)
max_action = np.where(q >= q_max - 1e-7)[0]
action_selected = np.random.randint(0, len(max_action))
return max_action[action_selected]
def observe(self, action, reward, state_tp1, done, train=True):
if not train:
return
self.sequence.append((copy.deepcopy(self.z), action, reward, done))
self.state_tp1 = state_tp1
if done and train:
self.update_sequence()
def train(self):
pass
def update_sequence(self):
exRtn = [0]
inRtn = [0]
inrtd = 0
for z, a, r, done in reversed(self.sequence):
exrtd = self.gamma * exRtn[-1] + r
inrtd = self.gamma * inRtn[-1] + inrtd
exRtn.append(exrtd)
inRtn.append(inrtd)
q, inrtd = self.ec_buffer[a].peek(z, exrtd, inrtd, True)
if q is None: # new action
self.ec_buffer[a].add(z, exrtd, inrtd)
inrtd = self.ec_buffer[a].beta
self.sequence = []
return exRtn, inRtn | baselines/ecbp/agents/online_agent.py | import numpy as np
from baselines.ecbp.agents.buffer.lru_knn_combine import LRU_KNN_COMBINE
from baselines.ecbp.agents.graph.model import *
import tensorflow as tf
from baselines.ecbp.agents.graph.build_graph_dueling import *
from baselines import logger
import copy
class OnlineAgent(object):
def __init__(self, model_func, exploration_schedule, obs_shape, input_type, lr=1e-4, buffer_size=1000000,
num_actions=6, latent_dim=32,
gamma=0.99, knn=4,
tf_writer=None):
self.ec_buffer = LRU_KNN_COMBINE(num_actions, buffer_size, latent_dim, latent_dim, obs_shape, 'game')
self.obs = None
self.z = None
self.state_tp1 = None
self.writer = tf_writer
self.sequence = []
self.gamma = gamma
self.num_actions = num_actions
self.exploration_schedule = exploration_schedule
self.latent_dim = latent_dim
self.knn = knn
self.steps = 0
self.heuristic_exploration = True
self.hash_func, _, _ = build_train_dueling(
make_obs_ph=lambda name: input_type(obs_shape, name=name),
model_func=model_func,
q_func=model,
imitate=False,
num_actions=num_actions,
optimizer=tf.train.AdamOptimizer(learning_rate=lr, epsilon=1e-4),
gamma=gamma,
grad_norm_clipping=10,
)
def act(self, obs, is_train=True):
self.obs = obs
z = np.array(self.hash_func(np.array(obs))).reshape((self.latent_dim,))
self.z = z
self.steps += 1
# instance_inr = np.max(self.exploration_coef(self.count[obs]))
if (np.random.random() < max(0, self.exploration_schedule.value(self.steps))) and is_train:
action = np.random.randint(0, self.num_actions)
# print("random")
return action
else:
extrinsic_qs = np.zeros((self.num_actions, 1))
intrinsic_qs = np.zeros((self.num_actions, 1))
finds = np.zeros((1,))
# print(self.num_actions)
for a in range(self.num_actions):
extrinsic_qs[a], intrinsic_qs[a], find = self.ec_buffer[a].act_value(np.array([z]), self.knn)
finds += sum(find)
if is_train and self.heuristic_exploration:
q = extrinsic_qs + intrinsic_qs
else:
q = extrinsic_qs
q_max = np.max(q)
max_action = np.where(q >= q_max - 1e-7)[0]
action_selected = np.random.randint(0, len(max_action))
return max_action[action_selected]
def observe(self, action, reward, state_tp1, done, train=True):
if not train:
return
self.sequence.append((copy.deepcopy(self.z), action, reward, done))
self.state_tp1 = state_tp1
if done and train:
self.update_sequence()
def train(self):
pass
def update_sequence(self):
exRtn = [0]
inRtn = [0]
inrtd = 0
for z, a, r, done in reversed(self.sequence):
exrtd = self.gamma * exRtn[-1] + r
inrtd = self.gamma * inRtn[-1] + inrtd
exRtn.append(exrtd)
inRtn.append(inrtd)
q, inrtd = self.ec_buffer[a].peek(z, exrtd, inrtd, True)
if q is None: # new action
self.ec_buffer[a].add(z, exrtd, inrtd)
inrtd = self.ec_buffer[a].beta
self.sequence = []
return exRtn, inRtn | 0.416559 | 0.23579 |
import random
import string
import subprocess #Process commands
import socket #Process socket data
import pyfiglet
import sys
import os
from subprocess import call
def SplashScreen():
print(" . ) )")
print(" ( (| .")
print(" ) )\/ ( ( (")
print(" * ( (( / ))\)) ( ) )")
print(" ( \ )\( | ))( ) (|")
print(" >) ))/ | )/ \(( ) \ ")
print(" ( ( . -. V )/ )( ( ")
print(" \ / . \ . \)) ))")
print(" )( ( | | ) . ( /")
print(" )( ,')) \ / \( `. )")
print(" (\> ,'/__ )) __`. /")
print(" ( \ | / ___ ( \/ ___ \ | ( (")
print(" \.) |/ / \__ __/ \ \| ))")
print(" . \. |> \ | __ | / <| /")
print(" )/ \____/ :..: \____/ \ <")
print(" ) \ (|__ . / ;: \ __| ) (")
print(" (( )\) ~--_ -- -- _--~ / ))")
print(" \ ( | || || | ( /")
print(" \. | ||_ _|| | /")
print(" > : | ~V+-I_I_I-+V~ | : (.")
print(" ( \: T\ _ _ /T : ./")
print(" \ : T^T T-+-T T^T ;<")
print(" \..`_ -+- _' )")
print(" ) . `--=.._____..=--'. ./ (")
print(" (( ) ( ) ( ) ( )> ")
print(" > \/^/) )) ( ( /(. )) ))._/(__))./ (_.")
print(" ( _../ ( \)) ) \ ( / \. ./ || ..__:| _. \ ")
print(" | \__. ) | (/ /: :)) | \/ |( <.._ )| ) )")
print(")) _./ | ) )) __ < | :( :)) .//( : : |")
print("(: < ): --: ^ \ )( )\/: / /_/ ) :._) :")
print(" \..) (_.. .. : : : .( \..:.. ./__. ./")
print(" ^ ^ \^ ^ ^\/^ ^ JaL")
def MenuFont():
ascii_banner = pyfiglet.figlet_format("PSW-FINFUCK")
print(ascii_banner)
return "+++By Anonik V1.0+++"
def CreditsMenu():
print("")
print("")
print("()==========================================()")
print("() ()")
print("() CREDITS BY: ()")
print("() ()")
print("() *)General Scripting: Anonik ()")
print("() ()")
print("() *)crucnh.py Scripting By derv82 ()")
print("() ()")
print("()==========================================()")
print("")
print("")
print("()==========================================()")
print("() SOCIAL - GITHUB ()")
print("()==========================================()")
print("")
print("()=Github Anonik: https://github.com/anonik9900")
print("()=Github derv: https://github.com/derv82")
print("")
print("()==========================================()")
print("() END CREDITS AND SOCIAL ()")
print("()==========================================()")
print("")
print("")
creditChoice = input("Type 1) to go home or 2) to exit: ")
if creditChoice == str("1"):
os.system('clear')
return SceltaMenu()
if creditChoice == str("2"):
sys.exit()
else:
print("Error... Invalid command. Closing program")
sys.exit()
def MenuCHoice():
print(")-------------------------------------------(")
print(")- -(")
print(")---------Build YOur ExPlOiT PsW------------(")
print(")----- -----(")
print(")--------------By Anonik&Others-------------(")
print(")----- -----(")
print(")-------------------------------------------(")
print("")
print(" ----- Menu -----")
print(" ()--------------------------()")
print(" ()---1*Create Psw with comb-()")
print("")
def Menucrunch():
print("")
print("()---------------------------------()")
print("()-----------/Crunch Tool\---------()")
print("()----------| |--------()")
print("()----------\_____________/--------()")
print("()---------------------------------()")
print(SplashScreen())
vic = input ("Please enter a value for create new wordlist: ")
minChar = input("Enter the minimum word lenght: ")
maxChar = input("Enter the maximum word lenght: ")
print("")
saveFile = input("Did you save all words inside a file ?: [Y]or[N] ")
if saveFile == str("Y") or saveFile == str("y"):
nameSfile = input("Enter the name of your wordlist is saved ex: (word.txt) : ")
print("")
print("Success")
os.system('python3 crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile)
#call(['python3', 'crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile])
#call(['python3 crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile])
print("")
print("()================================()")
print("() COMPLETED ()")
print("()================================()")
print("")
print("The wordlist called "+nameSfile+" is saved inside the pswf.py folder !")
print("")
sys.exit()
if saveFile == str("N") or saveFile == str("n"):
print("Go to the next step \../ ")
print("")
call(["python3", "crunch.py -m "+minChar+" -M "+maxChar +" -c "+vic])
#os.system('python crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic)
print("")
print("()================================()")
print("() COMPLETED ()")
print("()================================()")
print("")
sys.exit()
else:
print("Error.... :(")
sys.exit()
#os.system('cowsay ' + vic)
#os.system('python crunch.py -m 1 -M 3'+' -c '+vic)
#os.system(mark)
#os.system('%s %s' % ('ls', '-l'))
sys.exit()
def SceltaMenu():
scelta = input("Type (1) to enter, (2) To Exit, (3) to use the crunch Tool \nor (4) to see the credits ")
if scelta == str("1"):
return MenuCHoice() #MEnu
if scelta == str("2"):
sys.exit()
if scelta == str("3"):
return Menucrunch() #MenuCrunch
if scelta == str("4"):
return CreditsMenu() #Menu Credits
else:
print("Invalid input - try again")
print("")
return SceltaMenu()
def CreaPassword():
print("")
print(")------- Password Creator for Exploit -------(")
print("")
nome = input("Please enter your Victim name: ")
print("")
cognome = input("Please enter your Victim Surname: ")
print("")
anno_nascita = input("Enter your victim year of birth: ")
print("")
mese_nascita = input("Enter your victim month of birth: ")
print("")
giorno_nascita = input("Enter your victim day of birth: ")
print("")
#If se ha il cane
has_dog = input("Yor victim has an animal?: ")
if has_dog == "yes":
dog_number = input("Enter the number of animals: ")
if dog_number == str(1):
dog_name = input("Enter the animal name: ")
if dog_number ==str(2):
dog_name = input("Enter the 1st animal name: ")
dog_name2 = input("Enter the 2nd animal name: ")
if dog_number ==str(3):
dog_name = input("Enter the 1st animal name: ")
dog_name2 = input("Enter the 2nd animal name: ")
dog_name3 = input("Enter the 3td animal name: ")
dog_name_value =1
elif has_dog == "no":
dog_name = ""
dog_name_value =2
else:
print(CreaPassword())
#print "La password e': "+nome+ " " +cognome
#Combinazione Password
print("")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(" Genero Password")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print("comb1: "+nome+cognome) #name + surname
print("comb2: "+nome+ " "+cognome) #name + surnamen + with space
print("comb3: "+nome+cognome+anno_nascita) #name+surname+year of birth
print("comb4: "+cognome+nome) #surname+name
print("comb5: "+cognome+ " "+nome) #surname + name with space
print("comb6: "+cognome+nome+anno_nascita) #surname + name + year of birth
print("comb7: "+nome+anno_nascita) #name+year of birth
print("comb8: "+cognome+anno_nascita)#surname+year of birth
print("comb9: "+nome+cognome+mese_nascita) #name+surname+month of birth
print("comb10: "+cognome+nome+mese_nascita) #surname+name+month of birth
print("comb11: "+nome+mese_nascita+cognome) #name+month of birth+surname
print("comb12: "+cognome+mese_nascita+nome) #surname+month of birth+name
print("comb13: "+nome+anno_nascita+mese_nascita) #name+year-of-birth+month-of-birth
print("comb14: "+nome+mese_nascita+anno_nascita) #name+month-of-birth-year-of-birth
print("comb15: "+cognome+anno_nascita+mese_nascita) #surname+year-of-birth+month-of-birth
print("comb16: "+cognome+mese_nascita+anno_nascita) #surname+month-of-birth-year-of-birth
print("comb17: "+nome+cognome+anno_nascita+mese_nascita) #name+surname+year of birth+month of birth
print("comb18: "+nome+cognome+mese_nascita+anno_nascita) #name+surname+month of birth+ year of birth
print("comb19: "+cognome+nome+anno_nascita+mese_nascita) #surname+name+yearofbith+monthofbirth
print("comb20: "+cognome+nome+mese_nascita+anno_nascita) #surname+name+monthofbirt+yearofbirth
print("comb21: "+nome+cognome+anno_nascita+mese_nascita+giorno_nascita) #name+surname+year+month+day
print("comb22: "+nome+cognome+mese_nascita+anno_nascita+giorno_nascita) #name+surname+month+year+day
print("comb23: "+nome+cognome+giorno_nascita+mese_nascita+anno_nascita) #name+surname+day+month+year
print("comb24: "+nome+cognome+giorno_nascita+anno_nascita+mese_nascita) #name+surname+day+year+month
print("comb25: "+cognome+nome+anno_nascita+mese_nascita+giorno_nascita) #surname+name+year+month+day
print("comb26: "+cognome+nome+mese_nascita+anno_nascita+giorno_nascita) #surname+name+month+year+day
print("comb27: "+cognome+nome+giorno_nascita+mese_nascita+anno_nascita) #surname+name+day+month+year
print("comb28: "+cognome+nome+giorno_nascita+anno_nascita+mese_nascita) #surname+name+day+year+month
print("comb29: "+nome+giorno_nascita) #name+day
print("comb30: "+cognome+giorno_nascita) #surname+day
print("comb31: "+nome+cognome+giorno_nascita) #name+surname+day
print("comb32: "+nome+giorno_nascita+cognome) #name+day+surname
print("comb33: "+cognome+nome+giorno_nascita) #surname+name+day
print("comb34: "+cognome+giorno_nascita+nome) #surname+day+name
print("comb35: "+nome+anno_nascita+cognome) #name+year+surname
print("comb36: "+cognome+anno_nascita+nome) #surname+year+name
print("comb37: "+nome+anno_nascita+cognome+mese_nascita) #name+year+surname+month
print("comb38: "+cognome+anno_nascita+nome+mese_nascita) #surname+year+name+month
print("comb39: "+nome+mese_nascita+cognome+anno_nascita) #name+month+surname+year
print("comb40: "+cognome+mese_nascita+nome+anno_nascita) #surname+month+name+year
print("comb41: "+nome+anno_nascita+mese_nascita+cognome) #name+year+month+surname
print("comb42: "+nome+mese_nascita+anno_nascita+cognome) #name+month+year+surname
print("comb43: "+cognome+anno_nascita+mese_nascita+nome) #surname+year+month+name
print("comb44: "+cognome+mese_nascita+anno_nascita+nome) #surname+month+year+name
print("comb45: "+anno_nascita+mese_nascita+giorno_nascita) #YY/MM/DD
print("comb46: "+mese_nascita+giorno_nascita+anno_nascita) #MM/DD/YY
print("comb47: "+giorno_nascita+mese_nascita+anno_nascita) #DD/MM/YY
print("comb48: "+nome+anno_nascita+mese_nascita+giorno_nascita) #name+YY/MM/DD
print("comb49: "+nome+mese_nascita+giorno_nascita+anno_nascita) #name+MM/DD/YY
print("comb50: "+nome+giorno_nascita+mese_nascita+anno_nascita) #name+DD/MM/YY
print("comb51: "+cognome+anno_nascita+mese_nascita+giorno_nascita) #surname+YY/MM/DD
print("comb52: "+cognome+mese_nascita+giorno_nascita+anno_nascita) #surname+MM/DD/YY
print("comb53: "+cognome+giorno_nascita+mese_nascita+anno_nascita) #surname+DD/MM/YY
print("comb54: "+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #name+YY/MM/DD+surname
print("comb55: "+nome+mese_nascita+giorno_nascita+anno_nascita+cognome) #name+MM/DD/YY+surname
print("comb56: "+nome+giorno_nascita+mese_nascita+anno_nascita+cognome) #name+DD/MM/YY+surname
print("comb57: "+cognome+anno_nascita+mese_nascita+giorno_nascita+nome) #surname+YY/MM/DD+name
print("comb58: "+cognome+mese_nascita+giorno_nascita+anno_nascita+nome) #surname+MM/DD/YY+name
print("comb59: "+cognome+giorno_nascita+mese_nascita+anno_nascita+nome) #surname+DD/MM/YY+name
#Animale
if dog_name_value ==1 and dog_number==str(1):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
if dog_name_value ==1 and dog_number==str(2):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal1
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
print("combAnimal Second: "+nome+cognome+dog_name2) #name+surname+animal2
if dog_name_value ==1 and dog_number==str(3):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal1
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
print("combAnimalSecond: "+nome+cognome+dog_name2) #name+surname+animal2
print("combAniaml Thidrd: "+nome+cognome+dog_name3) #name+surname+animal3
elif dog_name_value ==2:
print("")
else:
print("")
print("")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(" END PASSWORD")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(MenuFont()) #ASCII LOGO
print(SceltaMenu())
print(CreaPassword()) | tools/pswf.py | import random
import string
import subprocess #Process commands
import socket #Process socket data
import pyfiglet
import sys
import os
from subprocess import call
def SplashScreen():
print(" . ) )")
print(" ( (| .")
print(" ) )\/ ( ( (")
print(" * ( (( / ))\)) ( ) )")
print(" ( \ )\( | ))( ) (|")
print(" >) ))/ | )/ \(( ) \ ")
print(" ( ( . -. V )/ )( ( ")
print(" \ / . \ . \)) ))")
print(" )( ( | | ) . ( /")
print(" )( ,')) \ / \( `. )")
print(" (\> ,'/__ )) __`. /")
print(" ( \ | / ___ ( \/ ___ \ | ( (")
print(" \.) |/ / \__ __/ \ \| ))")
print(" . \. |> \ | __ | / <| /")
print(" )/ \____/ :..: \____/ \ <")
print(" ) \ (|__ . / ;: \ __| ) (")
print(" (( )\) ~--_ -- -- _--~ / ))")
print(" \ ( | || || | ( /")
print(" \. | ||_ _|| | /")
print(" > : | ~V+-I_I_I-+V~ | : (.")
print(" ( \: T\ _ _ /T : ./")
print(" \ : T^T T-+-T T^T ;<")
print(" \..`_ -+- _' )")
print(" ) . `--=.._____..=--'. ./ (")
print(" (( ) ( ) ( ) ( )> ")
print(" > \/^/) )) ( ( /(. )) ))._/(__))./ (_.")
print(" ( _../ ( \)) ) \ ( / \. ./ || ..__:| _. \ ")
print(" | \__. ) | (/ /: :)) | \/ |( <.._ )| ) )")
print(")) _./ | ) )) __ < | :( :)) .//( : : |")
print("(: < ): --: ^ \ )( )\/: / /_/ ) :._) :")
print(" \..) (_.. .. : : : .( \..:.. ./__. ./")
print(" ^ ^ \^ ^ ^\/^ ^ JaL")
def MenuFont():
ascii_banner = pyfiglet.figlet_format("PSW-FINFUCK")
print(ascii_banner)
return "+++By Anonik V1.0+++"
def CreditsMenu():
print("")
print("")
print("()==========================================()")
print("() ()")
print("() CREDITS BY: ()")
print("() ()")
print("() *)General Scripting: Anonik ()")
print("() ()")
print("() *)crucnh.py Scripting By derv82 ()")
print("() ()")
print("()==========================================()")
print("")
print("")
print("()==========================================()")
print("() SOCIAL - GITHUB ()")
print("()==========================================()")
print("")
print("()=Github Anonik: https://github.com/anonik9900")
print("()=Github derv: https://github.com/derv82")
print("")
print("()==========================================()")
print("() END CREDITS AND SOCIAL ()")
print("()==========================================()")
print("")
print("")
creditChoice = input("Type 1) to go home or 2) to exit: ")
if creditChoice == str("1"):
os.system('clear')
return SceltaMenu()
if creditChoice == str("2"):
sys.exit()
else:
print("Error... Invalid command. Closing program")
sys.exit()
def MenuCHoice():
print(")-------------------------------------------(")
print(")- -(")
print(")---------Build YOur ExPlOiT PsW------------(")
print(")----- -----(")
print(")--------------By Anonik&Others-------------(")
print(")----- -----(")
print(")-------------------------------------------(")
print("")
print(" ----- Menu -----")
print(" ()--------------------------()")
print(" ()---1*Create Psw with comb-()")
print("")
def Menucrunch():
print("")
print("()---------------------------------()")
print("()-----------/Crunch Tool\---------()")
print("()----------| |--------()")
print("()----------\_____________/--------()")
print("()---------------------------------()")
print(SplashScreen())
vic = input ("Please enter a value for create new wordlist: ")
minChar = input("Enter the minimum word lenght: ")
maxChar = input("Enter the maximum word lenght: ")
print("")
saveFile = input("Did you save all words inside a file ?: [Y]or[N] ")
if saveFile == str("Y") or saveFile == str("y"):
nameSfile = input("Enter the name of your wordlist is saved ex: (word.txt) : ")
print("")
print("Success")
os.system('python3 crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile)
#call(['python3', 'crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile])
#call(['python3 crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic+ ' -o '+nameSfile])
print("")
print("()================================()")
print("() COMPLETED ()")
print("()================================()")
print("")
print("The wordlist called "+nameSfile+" is saved inside the pswf.py folder !")
print("")
sys.exit()
if saveFile == str("N") or saveFile == str("n"):
print("Go to the next step \../ ")
print("")
call(["python3", "crunch.py -m "+minChar+" -M "+maxChar +" -c "+vic])
#os.system('python crunch.py -m '+minChar+' -M '+maxChar +' -c '+vic)
print("")
print("()================================()")
print("() COMPLETED ()")
print("()================================()")
print("")
sys.exit()
else:
print("Error.... :(")
sys.exit()
#os.system('cowsay ' + vic)
#os.system('python crunch.py -m 1 -M 3'+' -c '+vic)
#os.system(mark)
#os.system('%s %s' % ('ls', '-l'))
sys.exit()
def SceltaMenu():
scelta = input("Type (1) to enter, (2) To Exit, (3) to use the crunch Tool \nor (4) to see the credits ")
if scelta == str("1"):
return MenuCHoice() #MEnu
if scelta == str("2"):
sys.exit()
if scelta == str("3"):
return Menucrunch() #MenuCrunch
if scelta == str("4"):
return CreditsMenu() #Menu Credits
else:
print("Invalid input - try again")
print("")
return SceltaMenu()
def CreaPassword():
print("")
print(")------- Password Creator for Exploit -------(")
print("")
nome = input("Please enter your Victim name: ")
print("")
cognome = input("Please enter your Victim Surname: ")
print("")
anno_nascita = input("Enter your victim year of birth: ")
print("")
mese_nascita = input("Enter your victim month of birth: ")
print("")
giorno_nascita = input("Enter your victim day of birth: ")
print("")
#If se ha il cane
has_dog = input("Yor victim has an animal?: ")
if has_dog == "yes":
dog_number = input("Enter the number of animals: ")
if dog_number == str(1):
dog_name = input("Enter the animal name: ")
if dog_number ==str(2):
dog_name = input("Enter the 1st animal name: ")
dog_name2 = input("Enter the 2nd animal name: ")
if dog_number ==str(3):
dog_name = input("Enter the 1st animal name: ")
dog_name2 = input("Enter the 2nd animal name: ")
dog_name3 = input("Enter the 3td animal name: ")
dog_name_value =1
elif has_dog == "no":
dog_name = ""
dog_name_value =2
else:
print(CreaPassword())
#print "La password e': "+nome+ " " +cognome
#Combinazione Password
print("")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(" Genero Password")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print("comb1: "+nome+cognome) #name + surname
print("comb2: "+nome+ " "+cognome) #name + surnamen + with space
print("comb3: "+nome+cognome+anno_nascita) #name+surname+year of birth
print("comb4: "+cognome+nome) #surname+name
print("comb5: "+cognome+ " "+nome) #surname + name with space
print("comb6: "+cognome+nome+anno_nascita) #surname + name + year of birth
print("comb7: "+nome+anno_nascita) #name+year of birth
print("comb8: "+cognome+anno_nascita)#surname+year of birth
print("comb9: "+nome+cognome+mese_nascita) #name+surname+month of birth
print("comb10: "+cognome+nome+mese_nascita) #surname+name+month of birth
print("comb11: "+nome+mese_nascita+cognome) #name+month of birth+surname
print("comb12: "+cognome+mese_nascita+nome) #surname+month of birth+name
print("comb13: "+nome+anno_nascita+mese_nascita) #name+year-of-birth+month-of-birth
print("comb14: "+nome+mese_nascita+anno_nascita) #name+month-of-birth-year-of-birth
print("comb15: "+cognome+anno_nascita+mese_nascita) #surname+year-of-birth+month-of-birth
print("comb16: "+cognome+mese_nascita+anno_nascita) #surname+month-of-birth-year-of-birth
print("comb17: "+nome+cognome+anno_nascita+mese_nascita) #name+surname+year of birth+month of birth
print("comb18: "+nome+cognome+mese_nascita+anno_nascita) #name+surname+month of birth+ year of birth
print("comb19: "+cognome+nome+anno_nascita+mese_nascita) #surname+name+yearofbith+monthofbirth
print("comb20: "+cognome+nome+mese_nascita+anno_nascita) #surname+name+monthofbirt+yearofbirth
print("comb21: "+nome+cognome+anno_nascita+mese_nascita+giorno_nascita) #name+surname+year+month+day
print("comb22: "+nome+cognome+mese_nascita+anno_nascita+giorno_nascita) #name+surname+month+year+day
print("comb23: "+nome+cognome+giorno_nascita+mese_nascita+anno_nascita) #name+surname+day+month+year
print("comb24: "+nome+cognome+giorno_nascita+anno_nascita+mese_nascita) #name+surname+day+year+month
print("comb25: "+cognome+nome+anno_nascita+mese_nascita+giorno_nascita) #surname+name+year+month+day
print("comb26: "+cognome+nome+mese_nascita+anno_nascita+giorno_nascita) #surname+name+month+year+day
print("comb27: "+cognome+nome+giorno_nascita+mese_nascita+anno_nascita) #surname+name+day+month+year
print("comb28: "+cognome+nome+giorno_nascita+anno_nascita+mese_nascita) #surname+name+day+year+month
print("comb29: "+nome+giorno_nascita) #name+day
print("comb30: "+cognome+giorno_nascita) #surname+day
print("comb31: "+nome+cognome+giorno_nascita) #name+surname+day
print("comb32: "+nome+giorno_nascita+cognome) #name+day+surname
print("comb33: "+cognome+nome+giorno_nascita) #surname+name+day
print("comb34: "+cognome+giorno_nascita+nome) #surname+day+name
print("comb35: "+nome+anno_nascita+cognome) #name+year+surname
print("comb36: "+cognome+anno_nascita+nome) #surname+year+name
print("comb37: "+nome+anno_nascita+cognome+mese_nascita) #name+year+surname+month
print("comb38: "+cognome+anno_nascita+nome+mese_nascita) #surname+year+name+month
print("comb39: "+nome+mese_nascita+cognome+anno_nascita) #name+month+surname+year
print("comb40: "+cognome+mese_nascita+nome+anno_nascita) #surname+month+name+year
print("comb41: "+nome+anno_nascita+mese_nascita+cognome) #name+year+month+surname
print("comb42: "+nome+mese_nascita+anno_nascita+cognome) #name+month+year+surname
print("comb43: "+cognome+anno_nascita+mese_nascita+nome) #surname+year+month+name
print("comb44: "+cognome+mese_nascita+anno_nascita+nome) #surname+month+year+name
print("comb45: "+anno_nascita+mese_nascita+giorno_nascita) #YY/MM/DD
print("comb46: "+mese_nascita+giorno_nascita+anno_nascita) #MM/DD/YY
print("comb47: "+giorno_nascita+mese_nascita+anno_nascita) #DD/MM/YY
print("comb48: "+nome+anno_nascita+mese_nascita+giorno_nascita) #name+YY/MM/DD
print("comb49: "+nome+mese_nascita+giorno_nascita+anno_nascita) #name+MM/DD/YY
print("comb50: "+nome+giorno_nascita+mese_nascita+anno_nascita) #name+DD/MM/YY
print("comb51: "+cognome+anno_nascita+mese_nascita+giorno_nascita) #surname+YY/MM/DD
print("comb52: "+cognome+mese_nascita+giorno_nascita+anno_nascita) #surname+MM/DD/YY
print("comb53: "+cognome+giorno_nascita+mese_nascita+anno_nascita) #surname+DD/MM/YY
print("comb54: "+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #name+YY/MM/DD+surname
print("comb55: "+nome+mese_nascita+giorno_nascita+anno_nascita+cognome) #name+MM/DD/YY+surname
print("comb56: "+nome+giorno_nascita+mese_nascita+anno_nascita+cognome) #name+DD/MM/YY+surname
print("comb57: "+cognome+anno_nascita+mese_nascita+giorno_nascita+nome) #surname+YY/MM/DD+name
print("comb58: "+cognome+mese_nascita+giorno_nascita+anno_nascita+nome) #surname+MM/DD/YY+name
print("comb59: "+cognome+giorno_nascita+mese_nascita+anno_nascita+nome) #surname+DD/MM/YY+name
#Animale
if dog_name_value ==1 and dog_number==str(1):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
if dog_name_value ==1 and dog_number==str(2):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal1
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
print("combAnimal Second: "+nome+cognome+dog_name2) #name+surname+animal2
if dog_name_value ==1 and dog_number==str(3):
print("")
print("combAnimal1: "+nome+cognome+dog_name) #name+surname+animal1
print("combAnimal2: "+cognome+nome+dog_name) #surname+name+animal
print("combAnimal3: "+dog_name+nome+cognome) #animal+name+surname
print("combAnimal4: "+dog_name+cognome+nome) #animal+surname+name
print("combAnimal5: "+nome+cognome+dog_name+anno_nascita) #name+surname+animal+year
print("combAnimal6: "+cognome+nome+dog_name+anno_nascita) #surname+name+animal+year
print("combAnimal7: "+nome+dog_name) #name+animal
print("combAnimal8: "+dog_name+nome) #animal+name
print("combAnimal9: "+cognome+dog_name) #surname+animal
print("combAnimal10: "+dog_name+cognome) #animal+surname
print("combAnimal11: "+dog_name+anno_nascita) #animal+year
print("combAnimal12: "+dog_name+giorno_nascita) #animal+day
print("combAnimal13: "+dog_name+mese_nascita) #animal+month
print("combAnimal14: "+anno_nascita+dog_name) #year+animal
print("combAnimal15: "+mese_nascita+dog_name) #month+animal
print("combAnimal16: "+giorno_nascita+dog_name) #day+animal
print("combAnimal17: "+nome+anno_nascita+dog_name) #name+year+animal
print("combAnimal18: "+nome+mese_nascita+dog_name) #name+month+animal
print("combAnimal19: "+nome+giorno_nascita+dog_name) #name+day+animal
print("combAnimal20: "+nome+dog_name+anno_nascita) #name+animal+year
print("combAnimal21: "+nome+dog_name+mese_nascita) #name+animal+month
print("combAnimal22: "+nome+dog_name+giorno_nascita) #name+animal+day
print("combAnimal23: "+dog_name+nome+anno_nascita) #animal+name+year
print("combAnimal24: "+dog_name+nome+mese_nascita) #animal+name+month
print("combAnimal25: "+dog_name+nome+giorno_nascita) #animal+name+day
print("combAnimal26: "+dog_name+anno_nascita+nome) #animal+year+name
print("combAnimal27: "+dog_name+mese_nascita+nome) #animal+month+name
print("combAnimal28: "+dog_name+giorno_nascita+nome) #animal+day+name
print("combAnimal29: "+cognome+anno_nascita+dog_name) #surname+year+animal
print("combAnimal30: "+cognome+mese_nascita+dog_name) #surname+month+animal
print("combAnimal31: "+cognome+giorno_nascita+dog_name) #surname+day+animal
print("combAnimal32: "+cognome+dog_name+anno_nascita) #surname+animal+year
print("combAnimal33: "+cognome+dog_name+mese_nascita) #surname+animal+month
print("combAnimal34: "+cognome+dog_name+giorno_nascita) #surname+animal+day
print("combAnimal35: "+dog_name+cognome+anno_nascita) #animal+surname+year
print("combAnimal36: "+dog_name+cognome+mese_nascita) #animal+surname+month
print("combAnimal37: "+dog_name+cognome+giorno_nascita) #animal+surname+day
print("combAnimal38: "+dog_name+anno_nascita+cognome) #animal+year+surname
print("combAnimal39: "+dog_name+mese_nascita+cognome) #animal+month+surname
print("combAnimal40: "+dog_name+giorno_nascita+cognome) #animal+day+surname
print("combAnimal41: "+dog_name+nome+anno_nascita+cognome) #animal+name+year+surname
print("combAnimal42: "+dog_name+nome+anno_nascita+mese_nascita+cognome) #animal+name+year+month+surname
print("combAnimal43: "+dog_name+nome+anno_nascita+mese_nascita+giorno_nascita+cognome) #animal+name+year+month+day+surname
print("combAnimal44: "+dog_name+nome+anno_nascita+mese_nascita+cognome+giorno_nascita) #animal+name+year+month+surname+day
print("combAnimal45: "+dog_name+nome+anno_nascita+giorno_nascita+cognome+mese_nascita) #animal+name+year+day+surname+month
print("combAnimal46: "+dog_name+nome+mese_nascita+giorno_nascita+cognome+anno_nascita) #animal+name+month+day+surname+year
print("combAnimal47: "+nome+dog_name+anno_nascita+cognome+mese_nascita) #name+animal+year+surname+month
print("combAnimal48: "+nome+dog_name+mese_nascita+cognome+giorno_nascita) #name+animal+month+surname+day
print("combAnimal49: "+nome+dog_name+giorno_nascita+cognome+anno_nascita) #name+animal+day+surname+year
print("combAnimal50: "+cognome+dog_name+anno_nascita+nome+mese_nascita) #surname+animal+year+name+month
print("combAnimal51: "+cognome+dog_name+mese_nascita+nome+giorno_nascita) #surname+animal+month+name+day
print("combAnimal52: "+cognome+dog_name+giorno_nascita+nome+anno_nascita) #surname+animal+day+name+year
print("combAnimalSecond: "+nome+cognome+dog_name2) #name+surname+animal2
print("combAniaml Thidrd: "+nome+cognome+dog_name3) #name+surname+animal3
elif dog_name_value ==2:
print("")
else:
print("")
print("")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(" END PASSWORD")
print("")
print("---------------------------------------------------------------------------")
print("---------------------------------------------------------------------------")
print("")
print(MenuFont()) #ASCII LOGO
print(SceltaMenu())
print(CreaPassword()) | 0.093388 | 0.058561 |
import time
import struct
from collections import namedtuple
from mesh.generic.tdmaState import TDMAStatus
from mesh.generic.cmdDict import CmdDict
from mesh.generic.cmds import NodeCmds, PixhawkCmds, TDMACmds, PixhawkFCCmds
from mesh.generic.commandMsg import CommandMsg
from mesh.generic.navigation import convertLatLonAlt
from mesh.generic.nodeHeader import createHeader
from mesh.generic.nodeParams import NodeParams
from unittests.testConfig import configFilePath
nodeParams = NodeParams(configFile=configFilePath)
# TestCmd type
TestCmd = namedtuple('TestCmd', ['cmdData', 'body', 'header'])
# Node configuration
nodeId = 1
cmdCounter = 11
# Test commands dictionary
testCmds = dict()
### NodeCmds
# NodeCmds['GCSCmd']
cmdId = NodeCmds['GCSCmd']
cmdData = [0, 1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId, 0, cmdCounter]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# NodeCmds['ConfigRequest']
cmdId = NodeCmds['ConfigRequest']
cmdData = nodeParams.config.calculateHash()
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId, cmdCounter]])
testCmds.update({cmdId: TestCmd({'configHash': cmdData}, cmdData, header)})
# NodeCmds['ParamUpdate']
cmdId = NodeCmds['ParamUpdate']
cmdData = {'paramId': 100, 'paramValue': b'123'}
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId, cmdCounter]])
body = struct.pack(CmdDict[cmdId].packFormat, cmdData['paramId']) + cmdData['paramValue']
testCmds.update({cmdId: TestCmd(cmdData, body, header)})
### PixhawkCmds
# PixhawkCmds['FormationCmd']
cmdId = PixhawkCmds['FormationCmd']
cmdData = [10, 3, 34.1234567, -86.7654321, 20.0, 0]
msgData = [10, 3] + convertLatLonAlt(cmdData[2:5]) + [0]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkCmds['NodeStateUpdate']
from mesh.generic.nodeState import NodeState
from uav.pixhawkNavigation import PixhawkNavigation
cmdId = PixhawkCmds['NodeStateUpdate']
nav = PixhawkNavigation()
nav.state = [34.1234567, -86.7654321, 20.0]
nav.timestamp = 0
nodeStatus = [NodeState(i) for i in range(2)]
nodeStatus[nodeId].state = nav.state
nodeStatus[nodeId].timestamp = nav.timestamp
for i in range(2):
nodeStatus[i].present = True
nodeStatus[i].updating = True
# This node
precision = [1000000,1]
state = convertLatLonAlt([nav.state[0], nav.state[1], nav.state[2]], precision=precision)
msgData = [nodeId] + state + [0, nodeStatus[nodeId-1].formationMode, nodeStatus[nodeId-1].status]
body = struct.pack(CmdDict['NodeStateUpdateContents'].packFormat, *msgData)
# Other nodes
msgData[0] = nodeId + 1
body = struct.pack(CmdDict[cmdId].packFormat, 2) + body + struct.pack(CmdDict['NodeStateUpdateContents'].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeStatus, 'nodeId': nodeId, 'nav': nav}, body, header)})
# PixhawkCmds['PosCmd']
cmdId = PixhawkCmds['PosCmd']
cmdData = [34.1234567, -86.7654321, 20.0, 3]
msgData = cmdData[3:] + convertLatLonAlt(cmdData[0:3])
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'formationMode': cmdData[3], 'formCmd': cmdData[0:3]}, body, header)})
# PixhawkCmds['StateUpdate']
cmdId = PixhawkCmds['StateUpdate']
cmdData = [34.1234567, -86.7654321, 20.0, 2, 1700, 3456789]
msgData = convertLatLonAlt(cmdData[0:3]) + cmdData[3:]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkCmds['TargetUpdate']
cmdId = PixhawkCmds['TargetUpdate']
cmdData = [34.1234567, -86.7654321, 20.0]
msgData = convertLatLonAlt(cmdData)
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
### TDMACmds
# TDMACmds['MeshStatus']
cmdId = TDMACmds['MeshStatus']
cmdData = [int(time.time()), TDMAStatus.nominal]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'commStartTimeSec': cmdData[0], 'status': TDMAStatus.nominal}, body, header)})
# TDMACmds['TimeOffset']
cmdId = TDMACmds['TimeOffset']
nodeState = NodeState(1)
nodeState.timeOffset = 0.40
body = struct.pack(CmdDict[cmdId].packFormat, int(nodeState.timeOffset*100))
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeState}, body, header)})
# TDMACmds['TimeOffsetSummary']
cmdId = TDMACmds['TimeOffsetSummary']
nodeStatus = [NodeState(i) for i in range(2)]
nodeStatus[0].timeOffset = 0.40
nodeStatus[1].timeOffset = 0.50
body = struct.pack(CmdDict[cmdId].packFormat + 'HH', len(nodeStatus), int(nodeStatus[0].timeOffset*100), int(nodeStatus[1].timeOffset*100))
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeStatus}, body, header)})
### PixhawkFCCmds
# PixhawkFCCmds['ModeChange']
cmdId = PixhawkFCCmds['ModeChange']
cmdData = [1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkFCCmds['ArmCommand']
cmdId = PixhawkFCCmds['ArmCommand']
cmdData = [1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkFCCmds['VehicleStatus']
cmdId = PixhawkFCCmds['VehicleStatus']
cmdData = [1, 0, 34.1234567, -86.7654321, 20.0, 0.5]
state = convertLatLonAlt(cmdData[2:5])
msgData = cmdData[0:2] + state + [int(cmdData[5]*100)]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)}) | python/unittests/testCmds.py | import time
import struct
from collections import namedtuple
from mesh.generic.tdmaState import TDMAStatus
from mesh.generic.cmdDict import CmdDict
from mesh.generic.cmds import NodeCmds, PixhawkCmds, TDMACmds, PixhawkFCCmds
from mesh.generic.commandMsg import CommandMsg
from mesh.generic.navigation import convertLatLonAlt
from mesh.generic.nodeHeader import createHeader
from mesh.generic.nodeParams import NodeParams
from unittests.testConfig import configFilePath
nodeParams = NodeParams(configFile=configFilePath)
# TestCmd type
TestCmd = namedtuple('TestCmd', ['cmdData', 'body', 'header'])
# Node configuration
nodeId = 1
cmdCounter = 11
# Test commands dictionary
testCmds = dict()
### NodeCmds
# NodeCmds['GCSCmd']
cmdId = NodeCmds['GCSCmd']
cmdData = [0, 1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId, 0, cmdCounter]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# NodeCmds['ConfigRequest']
cmdId = NodeCmds['ConfigRequest']
cmdData = nodeParams.config.calculateHash()
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId, cmdCounter]])
testCmds.update({cmdId: TestCmd({'configHash': cmdData}, cmdData, header)})
# NodeCmds['ParamUpdate']
cmdId = NodeCmds['ParamUpdate']
cmdData = {'paramId': 100, 'paramValue': b'123'}
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId, cmdCounter]])
body = struct.pack(CmdDict[cmdId].packFormat, cmdData['paramId']) + cmdData['paramValue']
testCmds.update({cmdId: TestCmd(cmdData, body, header)})
### PixhawkCmds
# PixhawkCmds['FormationCmd']
cmdId = PixhawkCmds['FormationCmd']
cmdData = [10, 3, 34.1234567, -86.7654321, 20.0, 0]
msgData = [10, 3] + convertLatLonAlt(cmdData[2:5]) + [0]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkCmds['NodeStateUpdate']
from mesh.generic.nodeState import NodeState
from uav.pixhawkNavigation import PixhawkNavigation
cmdId = PixhawkCmds['NodeStateUpdate']
nav = PixhawkNavigation()
nav.state = [34.1234567, -86.7654321, 20.0]
nav.timestamp = 0
nodeStatus = [NodeState(i) for i in range(2)]
nodeStatus[nodeId].state = nav.state
nodeStatus[nodeId].timestamp = nav.timestamp
for i in range(2):
nodeStatus[i].present = True
nodeStatus[i].updating = True
# This node
precision = [1000000,1]
state = convertLatLonAlt([nav.state[0], nav.state[1], nav.state[2]], precision=precision)
msgData = [nodeId] + state + [0, nodeStatus[nodeId-1].formationMode, nodeStatus[nodeId-1].status]
body = struct.pack(CmdDict['NodeStateUpdateContents'].packFormat, *msgData)
# Other nodes
msgData[0] = nodeId + 1
body = struct.pack(CmdDict[cmdId].packFormat, 2) + body + struct.pack(CmdDict['NodeStateUpdateContents'].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeStatus, 'nodeId': nodeId, 'nav': nav}, body, header)})
# PixhawkCmds['PosCmd']
cmdId = PixhawkCmds['PosCmd']
cmdData = [34.1234567, -86.7654321, 20.0, 3]
msgData = cmdData[3:] + convertLatLonAlt(cmdData[0:3])
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'formationMode': cmdData[3], 'formCmd': cmdData[0:3]}, body, header)})
# PixhawkCmds['StateUpdate']
cmdId = PixhawkCmds['StateUpdate']
cmdData = [34.1234567, -86.7654321, 20.0, 2, 1700, 3456789]
msgData = convertLatLonAlt(cmdData[0:3]) + cmdData[3:]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkCmds['TargetUpdate']
cmdId = PixhawkCmds['TargetUpdate']
cmdData = [34.1234567, -86.7654321, 20.0]
msgData = convertLatLonAlt(cmdData)
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
### TDMACmds
# TDMACmds['MeshStatus']
cmdId = TDMACmds['MeshStatus']
cmdData = [int(time.time()), TDMAStatus.nominal]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'commStartTimeSec': cmdData[0], 'status': TDMAStatus.nominal}, body, header)})
# TDMACmds['TimeOffset']
cmdId = TDMACmds['TimeOffset']
nodeState = NodeState(1)
nodeState.timeOffset = 0.40
body = struct.pack(CmdDict[cmdId].packFormat, int(nodeState.timeOffset*100))
header = createHeader([CmdDict[cmdId].header, [cmdId, nodeId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeState}, body, header)})
# TDMACmds['TimeOffsetSummary']
cmdId = TDMACmds['TimeOffsetSummary']
nodeStatus = [NodeState(i) for i in range(2)]
nodeStatus[0].timeOffset = 0.40
nodeStatus[1].timeOffset = 0.50
body = struct.pack(CmdDict[cmdId].packFormat + 'HH', len(nodeStatus), int(nodeStatus[0].timeOffset*100), int(nodeStatus[1].timeOffset*100))
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'nodeStatus': nodeStatus}, body, header)})
### PixhawkFCCmds
# PixhawkFCCmds['ModeChange']
cmdId = PixhawkFCCmds['ModeChange']
cmdData = [1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkFCCmds['ArmCommand']
cmdId = PixhawkFCCmds['ArmCommand']
cmdData = [1]
body = struct.pack(CmdDict[cmdId].packFormat, *cmdData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)})
# PixhawkFCCmds['VehicleStatus']
cmdId = PixhawkFCCmds['VehicleStatus']
cmdData = [1, 0, 34.1234567, -86.7654321, 20.0, 0.5]
state = convertLatLonAlt(cmdData[2:5])
msgData = cmdData[0:2] + state + [int(cmdData[5]*100)]
body = struct.pack(CmdDict[cmdId].packFormat, *msgData)
header = createHeader([CmdDict[cmdId].header, [cmdId]])
testCmds.update({cmdId: TestCmd({'cmd': CommandMsg(cmdId, cmdData)}, body, header)}) | 0.134066 | 0.112308 |
import os.path
import logging
from typing import List, Optional
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from .calendar import Calendar
class GoogleCalendarService:
# For now, we only need the readonly permission of Google Calendar.
# If modifying these scopes, delete the file token file.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
FIELDS = ["cal_name", "title", "time", "location", "description"]
def __init__(self) -> None:
# `creds` is a confusing name here. It actually means user's access
# token, not the developer's credentials
self.creds = None
self.service = None
self.is_auth = False
self.cal_map = None
def auth(self, creds_path: str, token_path: str) -> None:
"""Perform authentications.
Two files are involved:
- credentials file: to prove to Google that the current application is SNOW.
- token file: to ask the user to grant the access of the calendar data.
"""
if self.is_auth:
return
if os.path.exists(token_path):
self.creds = Credentials.from_authorized_user_file(
token_path, self.SCOPES)
# If there are no (valid) credentials available, let the user login.
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
logging.info("No valid token found. Will try to refresh.")
try:
self.creds.refresh(Request())
except RefreshError:
logging.info(
"Fail to refresh token. User must retry login.")
else:
logging.info("No valid token found. Please retry login.")
if not self.creds or not self.creds.valid:
flow = InstalledAppFlow.from_client_secrets_file(
creds_path, self.SCOPES)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, 'w') as token:
token.write(self.creds.to_json())
self.service = build('calendar', 'v3', credentials=self.creds)
self.is_auth = True
def fetch_calendars(self) -> None:
assert self.is_auth
if self.cal_map is not None:
return
self.cal_map = {}
# calendar list is broken into multiple pages
# use page_token to iterate through the pages
page_token = None
while True:
cal_list_page = self.service.calendarList().list(
pageToken=page_token).execute()
for cal_data in cal_list_page['items']:
cal = Calendar(self.service, cal_data)
self.cal_map[cal.name] = cal
logging.info(f"Get calendar: {cal.name}")
page_token = cal_list_page.get('nextPageToken')
if not page_token:
break
def list_calendars_name(self) -> List[str]:
"""Return all calendars' name of this user."""
self.fetch_calendars()
return self.cal_map.keys()
def get_calendar(self, cal_name: str) -> Optional[Calendar]:
self.fetch_calendars()
return self.cal_map.get(cal_name) | src/service/gcal/__init__.py | import os.path
import logging
from typing import List, Optional
from google.auth.transport.requests import Request
from google.auth.exceptions import RefreshError
from google.oauth2.credentials import Credentials
from google_auth_oauthlib.flow import InstalledAppFlow
from googleapiclient.discovery import build
from .calendar import Calendar
class GoogleCalendarService:
# For now, we only need the readonly permission of Google Calendar.
# If modifying these scopes, delete the file token file.
SCOPES = ['https://www.googleapis.com/auth/calendar.readonly']
FIELDS = ["cal_name", "title", "time", "location", "description"]
def __init__(self) -> None:
# `creds` is a confusing name here. It actually means user's access
# token, not the developer's credentials
self.creds = None
self.service = None
self.is_auth = False
self.cal_map = None
def auth(self, creds_path: str, token_path: str) -> None:
"""Perform authentications.
Two files are involved:
- credentials file: to prove to Google that the current application is SNOW.
- token file: to ask the user to grant the access of the calendar data.
"""
if self.is_auth:
return
if os.path.exists(token_path):
self.creds = Credentials.from_authorized_user_file(
token_path, self.SCOPES)
# If there are no (valid) credentials available, let the user login.
if not self.creds or not self.creds.valid:
if self.creds and self.creds.expired and self.creds.refresh_token:
logging.info("No valid token found. Will try to refresh.")
try:
self.creds.refresh(Request())
except RefreshError:
logging.info(
"Fail to refresh token. User must retry login.")
else:
logging.info("No valid token found. Please retry login.")
if not self.creds or not self.creds.valid:
flow = InstalledAppFlow.from_client_secrets_file(
creds_path, self.SCOPES)
self.creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open(token_path, 'w') as token:
token.write(self.creds.to_json())
self.service = build('calendar', 'v3', credentials=self.creds)
self.is_auth = True
def fetch_calendars(self) -> None:
assert self.is_auth
if self.cal_map is not None:
return
self.cal_map = {}
# calendar list is broken into multiple pages
# use page_token to iterate through the pages
page_token = None
while True:
cal_list_page = self.service.calendarList().list(
pageToken=page_token).execute()
for cal_data in cal_list_page['items']:
cal = Calendar(self.service, cal_data)
self.cal_map[cal.name] = cal
logging.info(f"Get calendar: {cal.name}")
page_token = cal_list_page.get('nextPageToken')
if not page_token:
break
def list_calendars_name(self) -> List[str]:
"""Return all calendars' name of this user."""
self.fetch_calendars()
return self.cal_map.keys()
def get_calendar(self, cal_name: str) -> Optional[Calendar]:
self.fetch_calendars()
return self.cal_map.get(cal_name) | 0.698535 | 0.216032 |
import os
from pathlib import Path
from numba import cuda, jit
import numpy as np
os.chdir(Path.cwd().parent)
from pahmc_ode_gpu import cuda_lib_dynamics
os.chdir(Path.cwd()/'unit_tests')
"""Prepare data, as well as variables to be compared to."""
name = 'lorenz96'
D = 200
M = 2000
X = np.random.uniform(-8.0, 8.0, (D,M))
par = np.array([8.17])
stimulus = np.random.uniform(-1.0, 1.0, (D,M))
# these functions have been tested in pahmc_ode_cpu
@jit(nopython=True)
def cpu_field(X, par, stimulus):
(D, M) = np.shape(X)
vecfield = np.zeros((D,M))
for m in range(M):
vecfield[0, m] = (X[1, m] - X[D-2, m]) * X[D-1, m] - X[0, m]
vecfield[1, m] = (X[2, m] - X[D-1, m]) * X[0, m] - X[1, m]
vecfield[D-1, m] = (X[0, m] - X[D-3, m]) * X[D-2, m] - X[D-1, m]
for a in range(2, D-1):
vecfield[a, m] = (X[a+1, m] - X[a-2, m]) * X[a-1, m] - X[a, m]
return vecfield + par[0]
@jit(nopython=True)
def cpu_jacobian(X, par):
(D, M) = np.shape(X)
jacob = np.zeros((D,D,M))
for m in range(M):
for i in range(1, D+1):
for j in range(1, D+1):
jacob[i-1, j-1, m] \
= (1 + (i - 2) % D == j) \
* (X[i%D, m] - X[(i-3)%D, m]) \
+ ((1 + i % D == j) - (1 + (i - 3) % D == j)) \
* X[(i-2)%D, m] - (i == j)
return jacob
@jit(nopython=True)
def cpu_dfield_dpar(X, par):
(D, M) = np.shape(X)
return np.ones((D,len(par),M))
print('\nTesting... ', end='')
field_compared = cpu_field(X, par, stimulus)
jacobian_compared = cpu_jacobian(X, par)
dfield_dpar_compared = cpu_dfield_dpar(X, par)
"""Fetch the kernels, transfer data, and specify grid dimensions."""
k__field = getattr(cuda_lib_dynamics, f'k__{name}_field')
k__jacobian = getattr(cuda_lib_dynamics, f'k__{name}_jacobian')
k__dfield_dpar = getattr(cuda_lib_dynamics, f'k__{name}_dfield_dpar')
d_X = cuda.to_device(X)
d_par = cuda.to_device(par)
d_stimulus = cuda.to_device(stimulus)
d_field = cuda.to_device(np.zeros((D,M)))
d_jacobian = cuda.to_device(np.zeros((D,D,M)))
d_dfield_dpar = cuda.to_device(np.zeros((D,1,M)))
"""Define convenience functions."""
def gtimer1():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
cuda.synchronize()
def gtimer2():
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
cuda.synchronize()
def gtimer3():
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
def gtimer4():
gtimer1(); gtimer2(); gtimer3()
def gtimer5():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
"""Make sure everything is correct."""
gtimer5()
field = d_field.copy_to_host()
jacobian = d_jacobian.copy_to_host()
dfield_dpar = d_dfield_dpar.copy_to_host()
np.testing.assert_almost_equal(field, field_compared, decimal=12)
np.testing.assert_almost_equal(jacobian, jacobian_compared, decimal=12)
np.testing.assert_almost_equal(dfield_dpar, dfield_dpar_compared, decimal=12)
print('ok.')
#======================================================================
for _ in range(5):
gtimer5()
temp = cpu_field(X, par, stimulus)
temp = cpu_jacobian(X, par)
temp = cpu_dfield_dpar(X, par)
"""
%timeit -r 50 -n 10 temp = cpu_field(X, par, stimulus)
%timeit -r 50 -n 10 gtimer1()
%timeit -r 50 -n 10 temp = cpu_jacobian(X, par)
%timeit -r 50 -n 10 gtimer2()
%timeit -r 50 -n 10 temp = cpu_dfield_dpar(X, par)
%timeit -r 50 -n 10 gtimer3()
%timeit -r 50 -n 10 gtimer4()
%timeit -r 50 -n 10 gtimer5()
""" | unit_tests/test-cuda_dynamics (lorenz96).py | import os
from pathlib import Path
from numba import cuda, jit
import numpy as np
os.chdir(Path.cwd().parent)
from pahmc_ode_gpu import cuda_lib_dynamics
os.chdir(Path.cwd()/'unit_tests')
"""Prepare data, as well as variables to be compared to."""
name = 'lorenz96'
D = 200
M = 2000
X = np.random.uniform(-8.0, 8.0, (D,M))
par = np.array([8.17])
stimulus = np.random.uniform(-1.0, 1.0, (D,M))
# these functions have been tested in pahmc_ode_cpu
@jit(nopython=True)
def cpu_field(X, par, stimulus):
(D, M) = np.shape(X)
vecfield = np.zeros((D,M))
for m in range(M):
vecfield[0, m] = (X[1, m] - X[D-2, m]) * X[D-1, m] - X[0, m]
vecfield[1, m] = (X[2, m] - X[D-1, m]) * X[0, m] - X[1, m]
vecfield[D-1, m] = (X[0, m] - X[D-3, m]) * X[D-2, m] - X[D-1, m]
for a in range(2, D-1):
vecfield[a, m] = (X[a+1, m] - X[a-2, m]) * X[a-1, m] - X[a, m]
return vecfield + par[0]
@jit(nopython=True)
def cpu_jacobian(X, par):
(D, M) = np.shape(X)
jacob = np.zeros((D,D,M))
for m in range(M):
for i in range(1, D+1):
for j in range(1, D+1):
jacob[i-1, j-1, m] \
= (1 + (i - 2) % D == j) \
* (X[i%D, m] - X[(i-3)%D, m]) \
+ ((1 + i % D == j) - (1 + (i - 3) % D == j)) \
* X[(i-2)%D, m] - (i == j)
return jacob
@jit(nopython=True)
def cpu_dfield_dpar(X, par):
(D, M) = np.shape(X)
return np.ones((D,len(par),M))
print('\nTesting... ', end='')
field_compared = cpu_field(X, par, stimulus)
jacobian_compared = cpu_jacobian(X, par)
dfield_dpar_compared = cpu_dfield_dpar(X, par)
"""Fetch the kernels, transfer data, and specify grid dimensions."""
k__field = getattr(cuda_lib_dynamics, f'k__{name}_field')
k__jacobian = getattr(cuda_lib_dynamics, f'k__{name}_jacobian')
k__dfield_dpar = getattr(cuda_lib_dynamics, f'k__{name}_dfield_dpar')
d_X = cuda.to_device(X)
d_par = cuda.to_device(par)
d_stimulus = cuda.to_device(stimulus)
d_field = cuda.to_device(np.zeros((D,M)))
d_jacobian = cuda.to_device(np.zeros((D,D,M)))
d_dfield_dpar = cuda.to_device(np.zeros((D,1,M)))
"""Define convenience functions."""
def gtimer1():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
cuda.synchronize()
def gtimer2():
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
cuda.synchronize()
def gtimer3():
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
def gtimer4():
gtimer1(); gtimer2(); gtimer3()
def gtimer5():
k__field[(16,32), (2,128)](d_X, d_par, d_stimulus, d_field)
k__jacobian[(4,4,32), (2,2,64)](d_X, d_par, d_jacobian)
k__dfield_dpar[(4,4,32), (2,2,64)](d_X, d_par, d_dfield_dpar)
cuda.synchronize()
"""Make sure everything is correct."""
gtimer5()
field = d_field.copy_to_host()
jacobian = d_jacobian.copy_to_host()
dfield_dpar = d_dfield_dpar.copy_to_host()
np.testing.assert_almost_equal(field, field_compared, decimal=12)
np.testing.assert_almost_equal(jacobian, jacobian_compared, decimal=12)
np.testing.assert_almost_equal(dfield_dpar, dfield_dpar_compared, decimal=12)
print('ok.')
#======================================================================
for _ in range(5):
gtimer5()
temp = cpu_field(X, par, stimulus)
temp = cpu_jacobian(X, par)
temp = cpu_dfield_dpar(X, par)
"""
%timeit -r 50 -n 10 temp = cpu_field(X, par, stimulus)
%timeit -r 50 -n 10 gtimer1()
%timeit -r 50 -n 10 temp = cpu_jacobian(X, par)
%timeit -r 50 -n 10 gtimer2()
%timeit -r 50 -n 10 temp = cpu_dfield_dpar(X, par)
%timeit -r 50 -n 10 gtimer3()
%timeit -r 50 -n 10 gtimer4()
%timeit -r 50 -n 10 gtimer5()
""" | 0.453988 | 0.398611 |
import csv
import string
from model import Model
import numpy as np
from progress.bar import Bar
import util
from datetime import datetime
import codecs
from lime.lime_tabular import LimeTabularExplainer
class TimeModel(Model):
model = 'time'
def __init__(self, datapath=""):
super().__init__('time', datapath)
# Note: there are less than 1 million questions with answers (that are after the question was created)
self.num_samples = 900000
self.train_count = 10000
self.train_batches = 80
self.test_batches = 10
self.test_count = 10000
self.log_y = True
def calc_time(self, time1, time2):
"""
Calculate the number of seconds between time1 and time2
time1 - answer date
time2 - quesiton date
elapsed - seconds between question and answer
"""
dt1 = datetime.fromisoformat(time1[:-1])
dt2 = datetime.fromisoformat(time2[:-1])
delta = dt1 - dt2
elapsed = delta.total_seconds()
return elapsed
def data(self, start, count):
answers, lines, times, i = {}, [], [], start
with codecs.open(self.datapath + 'Answers-Final.csv', 'r', 'utf-8') as f:
fptr = csv.reader(f, delimiter=',')
for line in fptr:
answers[line[2]] = line[1]
is_first = True
with Bar("Loading data...", max=count) as bar:
with codecs.open(self.datapath + 'Questions-Final.csv', 'r', 'utf-8') as f:
fptr = csv.reader(f, delimiter=',')
for line in fptr:
if is_first:
is_first = False
i += 1
elif i == start + count + 1:
break
elif i > start:
if line[0] in answers:
delta = self.calc_time(answers[line[0]], line[1])
# Only considers answers that have a later date than the question
if delta >= 0:
try:
tokens = util.clean_tokenize(
line[4] + line[5])
tokens = [tok.translate(str.maketrans(
'', '', string.punctuation)) for tok in tokens]
lines.append(' '.join(tokens))
times.append(delta)
i += 1
bar.next()
except:
print("\nerror")
return lines, times
def run(self, load_data=True, tune_parameter=True):
if load_data:
lines, values = self.data(0, self.num_samples)
self.vectorize_text(lines, values)
# If tune_parameter is false, we run with our experimented parameters
if tune_parameter:
self.tune_parameters()
else:
self.index = 1
self.param = {"alpha": 0.05,
"learning_rate": "invscaling", "penalty": "l2"}
reg = self.train()
y_pred = self.test(reg)
print(max(y_pred))
# Using log(y) so convert back to seconds with exp(y_pred)
y_pred = np.expm1(y_pred)
y_test = np.load(self.Y_test, mmap_mode='r')
self.print_stats(y_pred, y_test)
X_train = np.load(self.X_train[self.index], mmap_mode='r')
X_test = np.load(self.X_test[self.index], mmap_mode='r')
explainer = LimeTabularExplainer(X_train, mode="regression")
exp = explainer.explain_instance(X_test[self.text_index], reg.predict)
exp.as_pyplot_figure() | linreg/time_model.py | import csv
import string
from model import Model
import numpy as np
from progress.bar import Bar
import util
from datetime import datetime
import codecs
from lime.lime_tabular import LimeTabularExplainer
class TimeModel(Model):
model = 'time'
def __init__(self, datapath=""):
super().__init__('time', datapath)
# Note: there are less than 1 million questions with answers (that are after the question was created)
self.num_samples = 900000
self.train_count = 10000
self.train_batches = 80
self.test_batches = 10
self.test_count = 10000
self.log_y = True
def calc_time(self, time1, time2):
"""
Calculate the number of seconds between time1 and time2
time1 - answer date
time2 - quesiton date
elapsed - seconds between question and answer
"""
dt1 = datetime.fromisoformat(time1[:-1])
dt2 = datetime.fromisoformat(time2[:-1])
delta = dt1 - dt2
elapsed = delta.total_seconds()
return elapsed
def data(self, start, count):
answers, lines, times, i = {}, [], [], start
with codecs.open(self.datapath + 'Answers-Final.csv', 'r', 'utf-8') as f:
fptr = csv.reader(f, delimiter=',')
for line in fptr:
answers[line[2]] = line[1]
is_first = True
with Bar("Loading data...", max=count) as bar:
with codecs.open(self.datapath + 'Questions-Final.csv', 'r', 'utf-8') as f:
fptr = csv.reader(f, delimiter=',')
for line in fptr:
if is_first:
is_first = False
i += 1
elif i == start + count + 1:
break
elif i > start:
if line[0] in answers:
delta = self.calc_time(answers[line[0]], line[1])
# Only considers answers that have a later date than the question
if delta >= 0:
try:
tokens = util.clean_tokenize(
line[4] + line[5])
tokens = [tok.translate(str.maketrans(
'', '', string.punctuation)) for tok in tokens]
lines.append(' '.join(tokens))
times.append(delta)
i += 1
bar.next()
except:
print("\nerror")
return lines, times
def run(self, load_data=True, tune_parameter=True):
if load_data:
lines, values = self.data(0, self.num_samples)
self.vectorize_text(lines, values)
# If tune_parameter is false, we run with our experimented parameters
if tune_parameter:
self.tune_parameters()
else:
self.index = 1
self.param = {"alpha": 0.05,
"learning_rate": "invscaling", "penalty": "l2"}
reg = self.train()
y_pred = self.test(reg)
print(max(y_pred))
# Using log(y) so convert back to seconds with exp(y_pred)
y_pred = np.expm1(y_pred)
y_test = np.load(self.Y_test, mmap_mode='r')
self.print_stats(y_pred, y_test)
X_train = np.load(self.X_train[self.index], mmap_mode='r')
X_test = np.load(self.X_test[self.index], mmap_mode='r')
explainer = LimeTabularExplainer(X_train, mode="regression")
exp = explainer.explain_instance(X_test[self.text_index], reg.predict)
exp.as_pyplot_figure() | 0.39129 | 0.404155 |
import hmac
import hashlib
import binascii
import base64
import json
from datetime import datetime
key = "<KEY>"
byte_key = binascii.unhexlify(key)
# creates dictionary object
def create_jwt(exp: int = 60 * 60 * 1000) -> dict:
return {
"header": {
"alg": "HS256",
"typ": "JWT"
},
"payload": {
"exp": datetime.now().timestamp() + exp
}
}
def add_expiration_time(jwt: dict, interval_milliseconds: int = 60 * 60 * 1000):
jwt["payload"]["exp"] = datetime.now().timestamp() + interval_milliseconds
def add_claim(jwt: dict, claim, value):
jwt["payload"][claim] = value
# use this to encode header and payload
def encode_jwt_part(jwt_part: dict) -> bytes:
jwt_part_string: str = json.dumps(jwt_part)
bytes_object: bytes = jwt_part_string.encode('utf-8')
return base64.b64encode(bytes_object)
def sign_jwt(jwt: dict) -> str:
header_encoded: bytes = encode_jwt_part(jwt["header"])
payload_encoded: bytes = encode_jwt_part(jwt["payload"])
header_decoded: str = header_encoded.decode('utf-8')
payload_decoded: str = payload_encoded.decode('utf-8')
signature: str = hmac.new(byte_key, (header_decoded + "." + payload_decoded).encode('utf-8'), hashlib.sha256)\
.hexdigest().upper()
return header_decoded + "." + payload_decoded + "." + signature
def verify_jwt(token: str) -> bool:
[header_decoded, payload_decoded, signature_part] = token.split(".")
signature: str = hmac.new(byte_key, (header_decoded + "." + payload_decoded).encode('utf-8'), hashlib.sha256) \
.hexdigest().upper()
if signature != signature_part:
return False
payload_dict: dict = decode_jwt_part(payload_decoded)
if payload_dict["exp"] < datetime.now().timestamp():
return False
else:
return True
# use this to decode payload or header from token to dictionary
def decode_jwt_part(jwt_part: str) -> dict:
payload_encoded: bytes = jwt_part.encode('utf-8')
payload_bytes: bytes = base64.b64decode(payload_encoded)
payload_string: str = payload_bytes.decode('utf-8')
return json.loads(payload_string)
def extract_jwt_dictionary_from_token(token: str) -> dict:
[header_decoded, payload_decoded, signature_part] = token.split(".")
header_dict: dict = decode_jwt_part(header_decoded)
payload_dict: dict = decode_jwt_part(payload_decoded)
return {
"header": header_dict,
"payload": payload_dict
}
def extract_claim(token: str, claim):
[header_decoded, payload_decoded, signature_part] = token.split(".")
payload_dict: dict = decode_jwt_part(payload_decoded)
if claim not in payload_dict:
raise Exception("Claim " + claim + " is not set")
else:
return payload_dict[claim] | pyjwt.py | import hmac
import hashlib
import binascii
import base64
import json
from datetime import datetime
key = "<KEY>"
byte_key = binascii.unhexlify(key)
# creates dictionary object
def create_jwt(exp: int = 60 * 60 * 1000) -> dict:
return {
"header": {
"alg": "HS256",
"typ": "JWT"
},
"payload": {
"exp": datetime.now().timestamp() + exp
}
}
def add_expiration_time(jwt: dict, interval_milliseconds: int = 60 * 60 * 1000):
jwt["payload"]["exp"] = datetime.now().timestamp() + interval_milliseconds
def add_claim(jwt: dict, claim, value):
jwt["payload"][claim] = value
# use this to encode header and payload
def encode_jwt_part(jwt_part: dict) -> bytes:
jwt_part_string: str = json.dumps(jwt_part)
bytes_object: bytes = jwt_part_string.encode('utf-8')
return base64.b64encode(bytes_object)
def sign_jwt(jwt: dict) -> str:
header_encoded: bytes = encode_jwt_part(jwt["header"])
payload_encoded: bytes = encode_jwt_part(jwt["payload"])
header_decoded: str = header_encoded.decode('utf-8')
payload_decoded: str = payload_encoded.decode('utf-8')
signature: str = hmac.new(byte_key, (header_decoded + "." + payload_decoded).encode('utf-8'), hashlib.sha256)\
.hexdigest().upper()
return header_decoded + "." + payload_decoded + "." + signature
def verify_jwt(token: str) -> bool:
[header_decoded, payload_decoded, signature_part] = token.split(".")
signature: str = hmac.new(byte_key, (header_decoded + "." + payload_decoded).encode('utf-8'), hashlib.sha256) \
.hexdigest().upper()
if signature != signature_part:
return False
payload_dict: dict = decode_jwt_part(payload_decoded)
if payload_dict["exp"] < datetime.now().timestamp():
return False
else:
return True
# use this to decode payload or header from token to dictionary
def decode_jwt_part(jwt_part: str) -> dict:
payload_encoded: bytes = jwt_part.encode('utf-8')
payload_bytes: bytes = base64.b64decode(payload_encoded)
payload_string: str = payload_bytes.decode('utf-8')
return json.loads(payload_string)
def extract_jwt_dictionary_from_token(token: str) -> dict:
[header_decoded, payload_decoded, signature_part] = token.split(".")
header_dict: dict = decode_jwt_part(header_decoded)
payload_dict: dict = decode_jwt_part(payload_decoded)
return {
"header": header_dict,
"payload": payload_dict
}
def extract_claim(token: str, claim):
[header_decoded, payload_decoded, signature_part] = token.split(".")
payload_dict: dict = decode_jwt_part(payload_decoded)
if claim not in payload_dict:
raise Exception("Claim " + claim + " is not set")
else:
return payload_dict[claim] | 0.395951 | 0.144934 |
from django.shortcuts import get_object_or_404
from rest_framework import filters, permissions, status
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api import serializers
from api.filters import TitleFilter
from api.models import Category, Comment, Genre, Review, Title
from api.permissions import ObjectPatchDeletePermission, ReadOnly
from api.viewsets import CreateListViewSet
from users.models import User
from users.permissions import IsAdmin
from users.serializers import UserSerializer
class UserViewSet(ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
pagination_class = PageNumberPagination
permission_classes = (IsAdmin | IsAdminUser,)
lookup_field = 'username'
@action(
detail=False,
methods=['get'],
permission_classes=(IsAuthenticated,)
)
def me(self, request, **kwargs):
user = request.user
serializer = UserSerializer(user)
return Response(serializer.data)
@me.mapping.patch
def patch_me(self, request, **kwargs):
user = request.user
serializer = self.serializer_class(
user, data=request.data, partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(status=status.status.HTTP_400_BAD_REQUEST)
class TitleViewSet(ModelViewSet):
queryset = Title.objects.all()
serializer_class = serializers.TitleListRetrieveSerializer
permission_classes = (IsAdmin | IsAdminUser | ReadOnly,)
filterset_class = TitleFilter
pagination_class = PageNumberPagination
def get_serializer_class(self):
if self.request.method in permissions.SAFE_METHODS:
return serializers.TitleListRetrieveSerializer
return serializers.TitlePostPatchSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
obj = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
safe_serializer = serializers.TitleListRetrieveSerializer(obj)
return Response(
safe_serializer.data,
status=status.HTTP_201_CREATED,
headers=headers
)
def perform_create(self, serializer):
return serializer.save()
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(
instance,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
obj = self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
safe_serializer = serializers.TitleListRetrieveSerializer(obj)
return Response(safe_serializer.data)
def perform_update(self, serializer):
return serializer.save()
class ReviewViewSet(ModelViewSet):
serializer_class = serializers.ReviewSerializer
permissions = {
'create': (IsAuthenticated,),
'retrieve': (AllowAny,),
'update': (ObjectPatchDeletePermission,),
'partial_update': (ObjectPatchDeletePermission,),
'destroy': (ObjectPatchDeletePermission,),
'list': (AllowAny,),
}
def get_permissions(self):
permission_classes = self.permissions[self.action]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
title = get_object_or_404(Title, pk=self.kwargs['title_id'])
serializer.save(author=self.request.user, title=title)
def get_queryset(self):
title = get_object_or_404(Title, id=self.kwargs['title_id'])
return title.reviews.all()
class CategoriesViewSet(CreateListViewSet):
queryset = Category.objects.all()
serializer_class = serializers.CategorySerializer
permission_classes = (IsAdminUser | IsAdmin | ReadOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name',)
lookup_field = 'slug'
class GenreViewSet(CreateListViewSet):
queryset = Genre.objects.all()
serializer_class = serializers.GenreSerializer
permission_classes = (IsAdminUser | IsAdmin | ReadOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name',)
lookup_field = 'slug'
class CommentsViewSet(ModelViewSet):
queryset = Comment.objects.all()
serializer_class = serializers.CommentsSerializer
permissions = {
'create': (IsAuthenticated,),
'retrieve': (AllowAny,),
'update': (ObjectPatchDeletePermission,),
'partial_update': (ObjectPatchDeletePermission,),
'destroy': (ObjectPatchDeletePermission,),
'list': (AllowAny,),
}
def get_permissions(self):
permission_classes = self.permissions[self.action]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
review = get_object_or_404(Review, pk=self.kwargs['review_id'])
serializer.save(author=self.request.user, review=review)
def get_queryset(self):
review = get_object_or_404(Review, id=self.kwargs['review_id'])
return review.comments.all() | api/views.py | from django.shortcuts import get_object_or_404
from rest_framework import filters, permissions, status
from rest_framework.decorators import action
from rest_framework.pagination import PageNumberPagination
from rest_framework.permissions import AllowAny, IsAdminUser, IsAuthenticated
from rest_framework.response import Response
from rest_framework.viewsets import ModelViewSet
from api import serializers
from api.filters import TitleFilter
from api.models import Category, Comment, Genre, Review, Title
from api.permissions import ObjectPatchDeletePermission, ReadOnly
from api.viewsets import CreateListViewSet
from users.models import User
from users.permissions import IsAdmin
from users.serializers import UserSerializer
class UserViewSet(ModelViewSet):
queryset = User.objects.all()
serializer_class = UserSerializer
pagination_class = PageNumberPagination
permission_classes = (IsAdmin | IsAdminUser,)
lookup_field = 'username'
@action(
detail=False,
methods=['get'],
permission_classes=(IsAuthenticated,)
)
def me(self, request, **kwargs):
user = request.user
serializer = UserSerializer(user)
return Response(serializer.data)
@me.mapping.patch
def patch_me(self, request, **kwargs):
user = request.user
serializer = self.serializer_class(
user, data=request.data, partial=True
)
if serializer.is_valid(raise_exception=True):
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(status=status.status.HTTP_400_BAD_REQUEST)
class TitleViewSet(ModelViewSet):
queryset = Title.objects.all()
serializer_class = serializers.TitleListRetrieveSerializer
permission_classes = (IsAdmin | IsAdminUser | ReadOnly,)
filterset_class = TitleFilter
pagination_class = PageNumberPagination
def get_serializer_class(self):
if self.request.method in permissions.SAFE_METHODS:
return serializers.TitleListRetrieveSerializer
return serializers.TitlePostPatchSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
obj = self.perform_create(serializer)
headers = self.get_success_headers(serializer.data)
safe_serializer = serializers.TitleListRetrieveSerializer(obj)
return Response(
safe_serializer.data,
status=status.HTTP_201_CREATED,
headers=headers
)
def perform_create(self, serializer):
return serializer.save()
def update(self, request, *args, **kwargs):
partial = kwargs.pop('partial', False)
instance = self.get_object()
serializer = self.get_serializer(
instance,
data=request.data,
partial=partial,
)
serializer.is_valid(raise_exception=True)
obj = self.perform_update(serializer)
if getattr(instance, '_prefetched_objects_cache', None):
instance._prefetched_objects_cache = {}
safe_serializer = serializers.TitleListRetrieveSerializer(obj)
return Response(safe_serializer.data)
def perform_update(self, serializer):
return serializer.save()
class ReviewViewSet(ModelViewSet):
serializer_class = serializers.ReviewSerializer
permissions = {
'create': (IsAuthenticated,),
'retrieve': (AllowAny,),
'update': (ObjectPatchDeletePermission,),
'partial_update': (ObjectPatchDeletePermission,),
'destroy': (ObjectPatchDeletePermission,),
'list': (AllowAny,),
}
def get_permissions(self):
permission_classes = self.permissions[self.action]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
title = get_object_or_404(Title, pk=self.kwargs['title_id'])
serializer.save(author=self.request.user, title=title)
def get_queryset(self):
title = get_object_or_404(Title, id=self.kwargs['title_id'])
return title.reviews.all()
class CategoriesViewSet(CreateListViewSet):
queryset = Category.objects.all()
serializer_class = serializers.CategorySerializer
permission_classes = (IsAdminUser | IsAdmin | ReadOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name',)
lookup_field = 'slug'
class GenreViewSet(CreateListViewSet):
queryset = Genre.objects.all()
serializer_class = serializers.GenreSerializer
permission_classes = (IsAdminUser | IsAdmin | ReadOnly,)
filter_backends = (filters.SearchFilter,)
search_fields = ('name',)
lookup_field = 'slug'
class CommentsViewSet(ModelViewSet):
queryset = Comment.objects.all()
serializer_class = serializers.CommentsSerializer
permissions = {
'create': (IsAuthenticated,),
'retrieve': (AllowAny,),
'update': (ObjectPatchDeletePermission,),
'partial_update': (ObjectPatchDeletePermission,),
'destroy': (ObjectPatchDeletePermission,),
'list': (AllowAny,),
}
def get_permissions(self):
permission_classes = self.permissions[self.action]
return [permission() for permission in permission_classes]
def perform_create(self, serializer):
review = get_object_or_404(Review, pk=self.kwargs['review_id'])
serializer.save(author=self.request.user, review=review)
def get_queryset(self):
review = get_object_or_404(Review, id=self.kwargs['review_id'])
return review.comments.all() | 0.620392 | 0.077727 |
from sympy import *
import sys
sys.path.insert(1, '..')
from tait_bryan_R_utils import *
from rodrigues_R_utils import *
from quaternion_R_utils import *
px_1, py_1, pz_1 = symbols('px_1 py_1 pz_1')
om_1, fi_1, ka_1 = symbols('om_1 fi_1 ka_1')
#sx_1, sy_1, sz_1 = symbols('sx_1 sy_1 sz_1')
#q0_1, q1_1, q2_1, q3_1 = symbols('q0_1 q1_1 q2_1 q3_1')
px_2, py_2, pz_2 = symbols('px_2 py_2 pz_2')
om_2, fi_2, ka_2 = symbols('om_2 fi_2 ka_2')
#sx_2, sy_2, sz_2 = symbols('sx_2 sy_2 sz_2')
#q0_2, q1_2, q2_2, q3_2 = symbols('q0_2 q1_2 q2_2 q3_2')
ksi_1, eta_1, ksi_2, eta_2, ksi_01, eta_01, ksi_02, eta_02, c_1, c_2 = symbols('ksi_1 eta_1 ksi_2 eta_2 ksi_01 eta_01 ksi_02 eta_02 c_1 c_2');
position_symbols_1 = [px_1, py_1, pz_1]
orientation_symbols_1 = [om_1, fi_1, ka_1]
#orientation_symbols_1 = [sx_1, sy_1, sz_1]
#orientation_symbols_1 = [q0_1, q1_1, q2_1, q3_1]
position_symbols_2 = [px_2, py_2, pz_2]
orientation_symbols_2 = [om_2, fi_2, ka_2]
#orientation_symbols_2 = [sx_2, sy_2, sz_2]
#orientation_symbols_2 = [q0_2, q1_2, q2_2, q3_2]
c_symbols = [c_1, c_2]
all_symbols = position_symbols_1 + orientation_symbols_1 + position_symbols_2 + orientation_symbols_2
bx=px_2-px_1
by=py_2-py_1
bz=pz_2-pz_1
b=Matrix([[0, -bz, by], [bz, 0, -bx], [-by, bx, 0]])
C_1t=Matrix([[1, 0, -ksi_01], [0, 1, -eta_01], [0, 0, -c_1]]).transpose()
C_2=Matrix([[1, 0, -ksi_02], [0, 1, -eta_02], [0, 0, -c_2]])
camera_matrix_1 = matrix44FromTaitBryan(px_1, py_1, pz_1, om_1, fi_1, ka_1)
#camera_matrix_1 = matrix44FromRodrigues(px_1, py_1, pz_1, sx_1, sy_1, sz_1)
#camera_matrix_1 = matrix44FromQuaternion(px_1, py_1, pz_1, q0_1, q1_1, q2_1, q3_1)
R_1t=camera_matrix_1[:-1,:-1].transpose()
camera_matrix_2 = matrix44FromTaitBryan(px_2, py_2, pz_2, om_2, fi_2, ka_2)
#camera_matrix_2 = matrix44FromRodrigues(px_2, py_2, pz_2, sx_2, sy_2, sz_2)
#camera_matrix_2 = matrix44FromQuaternion(px_2, py_2, pz_2, q0_2, q1_2, q2_2, q3_2)
R_2=camera_matrix_2[:-1,:-1]
ksieta_1=Matrix([[ksi_1, eta_1, 1]])
ksieta_2t=Matrix([[ksi_2, eta_2, 1]]).transpose()
target_value = Matrix([[0]])
model_function = ksieta_1 * C_1t * R_1t * b * R_2 * C_2 * ksieta_2t
obs_eq = target_value - model_function
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(obs_eq)
print(obs_eq_jacobian)
with open("metric_camera_coplanarity_tait_bryan_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_tait_bryan_wc(double &delta, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double om_1, double fi_1, double ka_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double om_2, double fi_2, double ka_2)\n")
f_cpp.write("{")
f_cpp.write("delta = %s;\n"%(ccode(obs_eq[0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_tait_bryan_wc_jacobian(Eigen::Matrix<double, 1, 12, Eigen::RowMajor> &j, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double om_1, double fi_1, double ka_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double om_2, double fi_2, double ka_2)\n")
f_cpp.write("{")
for i in range (12):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(0,i, ccode(obs_eq_jacobian[0,i])))
f_cpp.write("}") | codes/python-scripts/camera-metrics/metric_camera_coplanarity_tait_bryan_wc_jacobian.py | from sympy import *
import sys
sys.path.insert(1, '..')
from tait_bryan_R_utils import *
from rodrigues_R_utils import *
from quaternion_R_utils import *
px_1, py_1, pz_1 = symbols('px_1 py_1 pz_1')
om_1, fi_1, ka_1 = symbols('om_1 fi_1 ka_1')
#sx_1, sy_1, sz_1 = symbols('sx_1 sy_1 sz_1')
#q0_1, q1_1, q2_1, q3_1 = symbols('q0_1 q1_1 q2_1 q3_1')
px_2, py_2, pz_2 = symbols('px_2 py_2 pz_2')
om_2, fi_2, ka_2 = symbols('om_2 fi_2 ka_2')
#sx_2, sy_2, sz_2 = symbols('sx_2 sy_2 sz_2')
#q0_2, q1_2, q2_2, q3_2 = symbols('q0_2 q1_2 q2_2 q3_2')
ksi_1, eta_1, ksi_2, eta_2, ksi_01, eta_01, ksi_02, eta_02, c_1, c_2 = symbols('ksi_1 eta_1 ksi_2 eta_2 ksi_01 eta_01 ksi_02 eta_02 c_1 c_2');
position_symbols_1 = [px_1, py_1, pz_1]
orientation_symbols_1 = [om_1, fi_1, ka_1]
#orientation_symbols_1 = [sx_1, sy_1, sz_1]
#orientation_symbols_1 = [q0_1, q1_1, q2_1, q3_1]
position_symbols_2 = [px_2, py_2, pz_2]
orientation_symbols_2 = [om_2, fi_2, ka_2]
#orientation_symbols_2 = [sx_2, sy_2, sz_2]
#orientation_symbols_2 = [q0_2, q1_2, q2_2, q3_2]
c_symbols = [c_1, c_2]
all_symbols = position_symbols_1 + orientation_symbols_1 + position_symbols_2 + orientation_symbols_2
bx=px_2-px_1
by=py_2-py_1
bz=pz_2-pz_1
b=Matrix([[0, -bz, by], [bz, 0, -bx], [-by, bx, 0]])
C_1t=Matrix([[1, 0, -ksi_01], [0, 1, -eta_01], [0, 0, -c_1]]).transpose()
C_2=Matrix([[1, 0, -ksi_02], [0, 1, -eta_02], [0, 0, -c_2]])
camera_matrix_1 = matrix44FromTaitBryan(px_1, py_1, pz_1, om_1, fi_1, ka_1)
#camera_matrix_1 = matrix44FromRodrigues(px_1, py_1, pz_1, sx_1, sy_1, sz_1)
#camera_matrix_1 = matrix44FromQuaternion(px_1, py_1, pz_1, q0_1, q1_1, q2_1, q3_1)
R_1t=camera_matrix_1[:-1,:-1].transpose()
camera_matrix_2 = matrix44FromTaitBryan(px_2, py_2, pz_2, om_2, fi_2, ka_2)
#camera_matrix_2 = matrix44FromRodrigues(px_2, py_2, pz_2, sx_2, sy_2, sz_2)
#camera_matrix_2 = matrix44FromQuaternion(px_2, py_2, pz_2, q0_2, q1_2, q2_2, q3_2)
R_2=camera_matrix_2[:-1,:-1]
ksieta_1=Matrix([[ksi_1, eta_1, 1]])
ksieta_2t=Matrix([[ksi_2, eta_2, 1]]).transpose()
target_value = Matrix([[0]])
model_function = ksieta_1 * C_1t * R_1t * b * R_2 * C_2 * ksieta_2t
obs_eq = target_value - model_function
obs_eq_jacobian = obs_eq.jacobian(all_symbols)
print(obs_eq)
print(obs_eq_jacobian)
with open("metric_camera_coplanarity_tait_bryan_wc_jacobian.h",'w') as f_cpp:
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_tait_bryan_wc(double &delta, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double om_1, double fi_1, double ka_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double om_2, double fi_2, double ka_2)\n")
f_cpp.write("{")
f_cpp.write("delta = %s;\n"%(ccode(obs_eq[0])))
f_cpp.write("}")
f_cpp.write("\n")
f_cpp.write("inline void observation_equation_metric_camera_coplanarity_tait_bryan_wc_jacobian(Eigen::Matrix<double, 1, 12, Eigen::RowMajor> &j, double ksi_01, double eta_01, double c_1, double ksi_1, double eta_1, double px_1, double py_1, double pz_1, double om_1, double fi_1, double ka_1, double ksi_02, double eta_02, double c_2, double ksi_2, double eta_2, double px_2, double py_2, double pz_2, double om_2, double fi_2, double ka_2)\n")
f_cpp.write("{")
for i in range (12):
f_cpp.write("j.coeffRef(%d,%d) = %s;\n"%(0,i, ccode(obs_eq_jacobian[0,i])))
f_cpp.write("}") | 0.320821 | 0.503967 |
from logging import getLogger
import requests_cache
from kivy.clock import Clock
from naturtag.app import alert, get_app
from naturtag.controllers import Controller
from naturtag.inat_metadata import get_http_cache_size
from naturtag.thumbnails import delete_thumbnails, get_thumbnail_cache_size
logger = getLogger(__name__)
class CacheController(Controller):
"""Controller class to manage Settings screen, and reading from and writing to settings file"""
def __init__(self, cache_screen):
self.screen = cache_screen
# Bind buttons
self.screen.cache_size_output.bind(on_release=self.update_cache_sizes)
self.screen.clear_request_cache_button.bind(on_release=self.clear_http_cache)
self.screen.clear_thumbnail_cache_button.bind(on_release=self.clear_thumbnail_cache)
self.screen.clear_history_button.bind(on_release=self.clear_history)
self.screen.refresh_observed_taxa_button.bind(on_release=self.refresh_observed_taxa)
Clock.schedule_once(self.update_cache_sizes, 5)
def clear_history(self, *args):
logger.info('Settings: Clearing history')
history, _, frequent, _ = get_app().settings_controller.stored_taxa
history.clear()
frequent.clear()
# Update everything that refers to history/frequent items
get_app().save_settings()
get_app().refresh_history()
self.update_cache_sizes()
alert('History has been cleared')
def clear_http_cache(self, *args):
logger.info('Settings: Clearing HTTP request cache')
requests_cache.clear()
self.update_cache_sizes()
alert('Cache has been cleared')
def clear_thumbnail_cache(self, *args):
logger.info('Settings: Clearing thumbnail cache')
delete_thumbnails()
self.update_cache_sizes()
alert('Cache has been cleared')
@staticmethod
def refresh_observed_taxa(*args):
get_app().refresh_observed_taxa()
alert('Refreshing observed species...')
def update_cache_sizes(self, *args):
"""Populate 'Cache Size' sections with calculated totals"""
out = self.screen.cache_size_output
out.text = f'Request cache size: {get_http_cache_size()}'
num_thumbs, thumbnail_total_size = get_thumbnail_cache_size()
out.secondary_text = f'Thumbnail cache size: {num_thumbs} files totaling {thumbnail_total_size}'
history, _, frequent, _ = get_app().settings_controller.stored_taxa
out.tertiary_text = f'History: {len(history)} items ({len(frequent)} unique)' | naturtag/controllers/cache_controller.py | from logging import getLogger
import requests_cache
from kivy.clock import Clock
from naturtag.app import alert, get_app
from naturtag.controllers import Controller
from naturtag.inat_metadata import get_http_cache_size
from naturtag.thumbnails import delete_thumbnails, get_thumbnail_cache_size
logger = getLogger(__name__)
class CacheController(Controller):
"""Controller class to manage Settings screen, and reading from and writing to settings file"""
def __init__(self, cache_screen):
self.screen = cache_screen
# Bind buttons
self.screen.cache_size_output.bind(on_release=self.update_cache_sizes)
self.screen.clear_request_cache_button.bind(on_release=self.clear_http_cache)
self.screen.clear_thumbnail_cache_button.bind(on_release=self.clear_thumbnail_cache)
self.screen.clear_history_button.bind(on_release=self.clear_history)
self.screen.refresh_observed_taxa_button.bind(on_release=self.refresh_observed_taxa)
Clock.schedule_once(self.update_cache_sizes, 5)
def clear_history(self, *args):
logger.info('Settings: Clearing history')
history, _, frequent, _ = get_app().settings_controller.stored_taxa
history.clear()
frequent.clear()
# Update everything that refers to history/frequent items
get_app().save_settings()
get_app().refresh_history()
self.update_cache_sizes()
alert('History has been cleared')
def clear_http_cache(self, *args):
logger.info('Settings: Clearing HTTP request cache')
requests_cache.clear()
self.update_cache_sizes()
alert('Cache has been cleared')
def clear_thumbnail_cache(self, *args):
logger.info('Settings: Clearing thumbnail cache')
delete_thumbnails()
self.update_cache_sizes()
alert('Cache has been cleared')
@staticmethod
def refresh_observed_taxa(*args):
get_app().refresh_observed_taxa()
alert('Refreshing observed species...')
def update_cache_sizes(self, *args):
"""Populate 'Cache Size' sections with calculated totals"""
out = self.screen.cache_size_output
out.text = f'Request cache size: {get_http_cache_size()}'
num_thumbs, thumbnail_total_size = get_thumbnail_cache_size()
out.secondary_text = f'Thumbnail cache size: {num_thumbs} files totaling {thumbnail_total_size}'
history, _, frequent, _ = get_app().settings_controller.stored_taxa
out.tertiary_text = f'History: {len(history)} items ({len(frequent)} unique)' | 0.785966 | 0.053034 |
from azure.cli.core.commands.parameters import (
tags_type,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
from azext_healthcareapis.action import (
AddAccessPolicies,
AddCosmosDbConfiguration,
AddAuthenticationConfiguration,
AddCorsConfiguration,
AddServicesOciArtifacts,
AddPrivateEndpointConnections,
AddPrivateLinkServiceConnectionState,
AddIngestionEndpointConfiguration,
AddFhirservicesAccessPolicies,
AddFhirservicesAuthenticationConfiguration,
AddFhirservicesCorsConfiguration,
AddResourceTypeOverrides,
AddFhirservicesOciArtifacts
)
def load_arguments(self, _):
with self.argument_context('healthcareapis service list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('healthcareapis service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
c.argument('kind', arg_type=get_enum_type(['fhir', 'fhir-Stu3', 'fhir-R4']), help='The kind of the service.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='Type of identity being '
'specified, currently SystemAssigned and None are allowed.')
c.argument('access_policies', action=AddAccessPolicies, nargs='*', help='The access policies of the service '
'instance.')
c.argument('cosmos_db_configuration', action=AddCosmosDbConfiguration, nargs='*', help='The settings for the '
'Cosmos DB database backing the service.')
c.argument('authentication_configuration', options_list=['--authentication-configuration', '-c'],
action=AddAuthenticationConfiguration, nargs='*',
help='The authentication configuration for the service instance.')
c.argument('cors_configuration', action=AddCorsConfiguration, nargs='*', help='The settings for the CORS '
'configuration of the service instance.')
c.argument('private_endpoint_connections', action=AddPrivateEndpointConnections, nargs='*', help='The list of '
'private endpoint connections that are set up for this resource.')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be added to the service instance.', arg_group='Acr Configuration')
c.argument('oci_artifacts',
action=AddServicesOciArtifacts, nargs='*',
help='The list of Open Container Initiative (OCI) artifacts.', arg_group='Acr Configuration')
c.argument('export_configuration_storage_account_name',
options_list=['--export-configuration-storage-account-name', '-s'],
type=str, help='The name of the default export storage '
'account.')
with self.argument_context('healthcareapis service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('tags', tags_type)
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis operation-result show') as c:
c.argument('location_name', type=str, help='The location of the operation.', id_part='name')
c.argument('operation_result_id', type=str, help='The ID of the operation result to get.',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.argument('private_link_service_connection_state_status',
arg_type=get_enum_type(['Pending', 'Approved', 'Rejected']),
help='Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_description', type=str,
help='The reason for approval/rejection of the connection.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_actions_required', type=str, help='A message indicating if '
'changes on the service provider require any updates on the consumer.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
with self.argument_context('healthcareapis private-endpoint-connection update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.argument('private_link_service_connection_state_status',
arg_type=get_enum_type(['Pending', 'Approved', 'Rejected']),
help='Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_description', type=str,
help='The reason for approval/rejection of the connection.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_actions_required', type=str, help='A message indicating if '
'changes on the service provider require any updates on the consumer.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
with self.argument_context('healthcareapis private-endpoint-connection delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'], type=str,
help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-link-resource list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis private-link-resource show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('group_name', type=str, help='The name of the private link resource group.',
id_part='child_name_1')
with self.argument_context('healthcareapis acr list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis acr add') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be added to the service instance.')
with self.argument_context('healthcareapis acr remove') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be removed from the service instance.')
with self.argument_context('healthcareapis acr reset') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers to substitute for the existing one.')
with self.argument_context('healthcareapis workspace list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('healthcareapis workspace show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis workspace update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('healthcareapis workspace delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace dicom-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace dicom-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace dicom-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis workspace dicom-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace dicom-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace dicom-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace iot-connector show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('ingestion_endpoint_configuration',
options_list=['--ingestion-endpoint-configuration', '-c'],
action=AddIngestionEndpointConfiguration, nargs='*', help='Source configuration.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Device Mapping')
with self.argument_context('healthcareapis workspace iot-connector update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace iot-connector delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace iot-connector wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
c.ignore('iot_fhir_destination')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace fhir-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace fhir-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace fhir-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('kind', arg_type=get_enum_type(['fhir-Stu3', 'fhir-R4']), help='The kind of the service.')
c.argument('access_policies', action=AddFhirservicesAccessPolicies, nargs='*', help='Fhir Service access '
'policies.')
c.argument('authentication_configuration', options_list=['--authentication-configuration', '-c'],
action=AddFhirservicesAuthenticationConfiguration, nargs='*',
help='Fhir Service authentication configuration.')
c.argument('cors_configuration', action=AddFhirservicesCorsConfiguration, nargs='*', help='Fhir Service Cors '
'configuration.')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
c.argument('default', arg_type=get_enum_type(['no-version', 'versioned', 'versioned-update']), help='The '
'default value for tracking history across all resources.', arg_group='Resource Version Policy '
'Configuration')
c.argument('resource_type_overrides', options_list=['--resource-type-overrides', '-r'],
action=AddResourceTypeOverrides, nargs='*', help='A list of FHIR '
'Resources and their version policy overrides. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...',
arg_group='Resource Version Policy Configuration')
c.argument('export_configuration_storage_account_name',
options_list=['--export-configuration-storage-account-name', '-s'],
type=str, help='The name of the default export storage account.',
arg_group='Export Configuration')
c.argument('login_servers', nargs='*', help='The list of the Azure container registry login servers.',
arg_group='Acr Configuration')
c.argument('oci_artifacts', action=AddFhirservicesOciArtifacts, nargs='*', help='The list of Open Container '
'Initiative (OCI) artifacts.', arg_group='Acr Configuration')
with self.argument_context('healthcareapis workspace fhir-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace fhir-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace fhir-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
with self.argument_context('healthcareapis workspace private-endpoint-connection update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.ignore('properties')
with self.argument_context('healthcareapis workspace private-endpoint-connection delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-link-resource list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace private-link-resource show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('group_name', type=str, help='The name of the private link resource group.',
id_part='child_name_1') | src/healthcareapis/azext_healthcareapis/generated/_params.py |
from azure.cli.core.commands.parameters import (
tags_type,
get_enum_type,
resource_group_name_type,
get_location_type
)
from azure.cli.core.commands.validators import (
get_default_location_from_resource_group,
validate_file_or_dict
)
from azext_healthcareapis.action import (
AddAccessPolicies,
AddCosmosDbConfiguration,
AddAuthenticationConfiguration,
AddCorsConfiguration,
AddServicesOciArtifacts,
AddPrivateEndpointConnections,
AddPrivateLinkServiceConnectionState,
AddIngestionEndpointConfiguration,
AddFhirservicesAccessPolicies,
AddFhirservicesAuthenticationConfiguration,
AddFhirservicesCorsConfiguration,
AddResourceTypeOverrides,
AddFhirservicesOciArtifacts
)
def load_arguments(self, _):
with self.argument_context('healthcareapis service list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('healthcareapis service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
c.argument('kind', arg_type=get_enum_type(['fhir', 'fhir-Stu3', 'fhir-R4']), help='The kind of the service.')
c.argument('location', arg_type=get_location_type(self.cli_ctx),
validator=get_default_location_from_resource_group)
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('identity_type', arg_type=get_enum_type(['SystemAssigned', 'None']), help='Type of identity being '
'specified, currently SystemAssigned and None are allowed.')
c.argument('access_policies', action=AddAccessPolicies, nargs='*', help='The access policies of the service '
'instance.')
c.argument('cosmos_db_configuration', action=AddCosmosDbConfiguration, nargs='*', help='The settings for the '
'Cosmos DB database backing the service.')
c.argument('authentication_configuration', options_list=['--authentication-configuration', '-c'],
action=AddAuthenticationConfiguration, nargs='*',
help='The authentication configuration for the service instance.')
c.argument('cors_configuration', action=AddCorsConfiguration, nargs='*', help='The settings for the CORS '
'configuration of the service instance.')
c.argument('private_endpoint_connections', action=AddPrivateEndpointConnections, nargs='*', help='The list of '
'private endpoint connections that are set up for this resource.')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be added to the service instance.', arg_group='Acr Configuration')
c.argument('oci_artifacts',
action=AddServicesOciArtifacts, nargs='*',
help='The list of Open Container Initiative (OCI) artifacts.', arg_group='Acr Configuration')
c.argument('export_configuration_storage_account_name',
options_list=['--export-configuration-storage-account-name', '-s'],
type=str, help='The name of the default export storage '
'account.')
with self.argument_context('healthcareapis service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('tags', tags_type)
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
with self.argument_context('healthcareapis operation-result show') as c:
c.argument('location_name', type=str, help='The location of the operation.', id_part='name')
c.argument('operation_result_id', type=str, help='The ID of the operation result to get.',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.argument('private_link_service_connection_state_status',
arg_type=get_enum_type(['Pending', 'Approved', 'Rejected']),
help='Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_description', type=str,
help='The reason for approval/rejection of the connection.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_actions_required', type=str, help='A message indicating if '
'changes on the service provider require any updates on the consumer.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
with self.argument_context('healthcareapis private-endpoint-connection update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.argument('private_link_service_connection_state_status',
arg_type=get_enum_type(['Pending', 'Approved', 'Rejected']),
help='Indicates whether the connection has been Approved/Rejected/Removed by the owner of the service.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_description', type=str,
help='The reason for approval/rejection of the connection.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
c.argument('private_link_service_connection_state_actions_required', type=str, help='A message indicating if '
'changes on the service provider require any updates on the consumer.',
deprecate_info=c.deprecate(redirect='--private-link-service-connection-state'))
with self.argument_context('healthcareapis private-endpoint-connection delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-endpoint-connection wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'], type=str,
help='The name of the private endpoint connection associated with the Azure resource',
id_part='child_name_1')
with self.argument_context('healthcareapis private-link-resource list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis private-link-resource show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('group_name', type=str, help='The name of the private link resource group.',
id_part='child_name_1')
with self.argument_context('healthcareapis acr list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.')
with self.argument_context('healthcareapis acr add') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be added to the service instance.')
with self.argument_context('healthcareapis acr remove') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers that shall'
'be removed from the service instance.')
with self.argument_context('healthcareapis acr reset') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('resource_name', type=str, help='The name of the service instance.', id_part='name')
c.argument('login_servers', type=str, help='The list of login servers to substitute for the existing one.')
with self.argument_context('healthcareapis workspace list') as c:
c.argument('resource_group_name', resource_group_name_type)
with self.argument_context('healthcareapis workspace show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis workspace update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
c.argument('tags', tags_type)
with self.argument_context('healthcareapis workspace delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', options_list=['--name', '-n', '--workspace-name'], type=str, help='The name of '
'workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace dicom-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace dicom-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace dicom-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
with self.argument_context('healthcareapis workspace dicom-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace dicom-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace dicom-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('dicom_service_name', options_list=['--name', '-n', '--dicom-service-name'], type=str, help='The '
'name of DICOM Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace iot-connector show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('ingestion_endpoint_configuration',
options_list=['--ingestion-endpoint-configuration', '-c'],
action=AddIngestionEndpointConfiguration, nargs='*', help='Source configuration.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Device Mapping')
with self.argument_context('healthcareapis workspace iot-connector update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace iot-connector delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace iot-connector wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', options_list=['--name', '-n', '--iot-connector-name'], type=str, help='The '
'name of IoT Connector resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('resource_identity_resolution_type',
options_list=['--resource-identity-resolution-type', '-t'],
arg_type=get_enum_type(['Create', 'Lookup']),
help='Determines how resource identity is resolved on the destination.')
c.argument('fhir_service_resource_id',
options_list=['--fhir-service-resource-id', '-r'],
type=str, help='Fully qualified resource id of the FHIR service to connect to.')
c.argument('content', type=validate_file_or_dict, help='The mapping. Expected value: '
'json-string/json-file/@json-file.', arg_group='Fhir Mapping')
c.ignore('iot_fhir_destination')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace iot-connector fhir-destination wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('iot_connector_name', type=str, help='The name of IoT Connector resource.', id_part='child_name_1')
c.argument('fhir_destination_name', type=str, help='The name of IoT Connector FHIR destination resource.',
id_part='child_name_2')
with self.argument_context('healthcareapis workspace fhir-service list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace fhir-service show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace fhir-service create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.')
c.argument('tags', tags_type)
c.argument('etag', type=str, help='An etag associated with the resource, used for optimistic concurrency when '
'editing it.')
c.argument('location', arg_type=get_location_type(self.cli_ctx), required=False,
validator=get_default_location_from_resource_group)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
c.argument('kind', arg_type=get_enum_type(['fhir-Stu3', 'fhir-R4']), help='The kind of the service.')
c.argument('access_policies', action=AddFhirservicesAccessPolicies, nargs='*', help='Fhir Service access '
'policies.')
c.argument('authentication_configuration', options_list=['--authentication-configuration', '-c'],
action=AddFhirservicesAuthenticationConfiguration, nargs='*',
help='Fhir Service authentication configuration.')
c.argument('cors_configuration', action=AddFhirservicesCorsConfiguration, nargs='*', help='Fhir Service Cors '
'configuration.')
c.argument('public_network_access', arg_type=get_enum_type(['Enabled', 'Disabled']), help='Control permission '
'for data plane traffic coming from public networks while private endpoint is enabled.')
c.argument('default', arg_type=get_enum_type(['no-version', 'versioned', 'versioned-update']), help='The '
'default value for tracking history across all resources.', arg_group='Resource Version Policy '
'Configuration')
c.argument('resource_type_overrides', options_list=['--resource-type-overrides', '-r'],
action=AddResourceTypeOverrides, nargs='*', help='A list of FHIR '
'Resources and their version policy overrides. Expect value: KEY1=VALUE1 KEY2=VALUE2 ...',
arg_group='Resource Version Policy Configuration')
c.argument('export_configuration_storage_account_name',
options_list=['--export-configuration-storage-account-name', '-s'],
type=str, help='The name of the default export storage account.',
arg_group='Export Configuration')
c.argument('login_servers', nargs='*', help='The list of the Azure container registry login servers.',
arg_group='Acr Configuration')
c.argument('oci_artifacts', action=AddFhirservicesOciArtifacts, nargs='*', help='The list of Open Container '
'Initiative (OCI) artifacts.', arg_group='Acr Configuration')
with self.argument_context('healthcareapis workspace fhir-service update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('tags', tags_type)
c.argument('identity_type',
arg_type=get_enum_type(['None', 'SystemAssigned', 'UserAssigned', 'SystemAssigned,UserAssigned']),
help='Type of identity being specified, currently SystemAssigned and None are allowed.',
arg_group='Identity')
c.argument('user_assigned_identities', options_list=['--user-assigned-identities', '-i'],
type=validate_file_or_dict, help='The set of user assigned identities '
'associated with the resource. The userAssignedIdentities dictionary keys will be ARM resource ids '
'in the form: \'/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microso'
'ft.ManagedIdentity/userAssignedIdentities/{identityName}. The dictionary values can be empty '
'objects ({}) in requests. Expected value: json-string/json-file/@json-file.',
arg_group='Identity')
with self.argument_context('healthcareapis workspace fhir-service delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
with self.argument_context('healthcareapis workspace fhir-service wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('fhir_service_name', options_list=['--name', '-n', '--fhir-service-name'], type=str, help='The name '
'of FHIR Service resource.', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace private-endpoint-connection show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection create') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
with self.argument_context('healthcareapis workspace private-endpoint-connection update') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
c.argument('private_link_service_connection_state',
options_list=['--private-link-service-connection-state', '-s'],
action=AddPrivateLinkServiceConnectionState, nargs='*',
help='A collection of information about the state of the connection between service consumer and '
'provider.')
c.ignore('properties')
with self.argument_context('healthcareapis workspace private-endpoint-connection delete') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-endpoint-connection wait') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('private_endpoint_connection_name',
options_list=['--name', '-n', '--private-endpoint-connection-name'],
type=str, help='The name of the private endpoint connection '
'associated with the Azure resource', id_part='child_name_1')
with self.argument_context('healthcareapis workspace private-link-resource list') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.')
with self.argument_context('healthcareapis workspace private-link-resource show') as c:
c.argument('resource_group_name', resource_group_name_type)
c.argument('workspace_name', type=str, help='The name of workspace resource.', id_part='name')
c.argument('group_name', type=str, help='The name of the private link resource group.',
id_part='child_name_1') | 0.627951 | 0.056288 |
from os import name
from src.Types.TokenTypes import TokenTypes
from src.Node import Node
from src.SymbolTable import SymbolTable
from src.Nodes.FuncArguments import FuncArguments
from llvmlite import ir
class FuncDeclaration(Node):
def __init__(self, func_name, statements, args: FuncArguments):
self.args = args
self.statements = statements
super().__init__(
value=func_name,
children=[statements, args],
node_type='FUNCDEC'
)
def Evaluate(self, symbol_table: SymbolTable):
if self.value == 'main':
symbol_table.set_function('main', self, symbol_table, None)
return
func_return_type = None
if self.args.args[self.value]['type'].type == TokenTypes.INT:
func_return_type = ir.IntType(8)
elif self.args.args[self.value]['type'].type == TokenTypes.BOOL_TYPE:
func_return_type = ir.IntType(1)
else:
func_return_type = ir.ArrayType(ir.IntType(8), 64)
args_types = []
defined_args = self.args.args
for arg in self.args.args.keys():
if arg == self.value:
continue
if defined_args[arg]['type'] == TokenTypes.INT:
args_types.append(ir.IntType(8))
elif defined_args[arg]['type'] == TokenTypes.BOOL_TYPE:
args_types.append(ir.IntType(1))
else:
args_types.append(ir.ArrayType(ir.IntType(8), 64))
func_type = ir.FunctionType(func_return_type, args_types)
func_i = ir.Function(self.module, func_type, name=self.value)
func_entry_block = func_i.append_basic_block(f'func_{self.value}_entry')
previous_position = self.builder
Node.builder = ir.IRBuilder(func_entry_block)
args_pointers = []
func_symbol_table = SymbolTable()
func_symbol_table.functions = symbol_table.functions
args_keys = list(self.args.args.keys())
for idx, arg in enumerate(args_types):
arg_pointer = self.builder.alloca(arg, name=f'{args_keys[idx+1]}')
self.builder.store(func_i.args[idx], arg_pointer)
args_pointers.append(arg_pointer)
for idx, ptr in enumerate(args_pointers):
func_symbol_table.set(f'{args_keys[idx+1]}', ptr)
symbol_table.set_function(key=self.value, value=self, func_symbol_table=func_symbol_table, func_i=func_i)
self.statements.Evaluate(func_symbol_table)
Node.builder = previous_position
return | src/Nodes/FuncDeclaration.py | from os import name
from src.Types.TokenTypes import TokenTypes
from src.Node import Node
from src.SymbolTable import SymbolTable
from src.Nodes.FuncArguments import FuncArguments
from llvmlite import ir
class FuncDeclaration(Node):
def __init__(self, func_name, statements, args: FuncArguments):
self.args = args
self.statements = statements
super().__init__(
value=func_name,
children=[statements, args],
node_type='FUNCDEC'
)
def Evaluate(self, symbol_table: SymbolTable):
if self.value == 'main':
symbol_table.set_function('main', self, symbol_table, None)
return
func_return_type = None
if self.args.args[self.value]['type'].type == TokenTypes.INT:
func_return_type = ir.IntType(8)
elif self.args.args[self.value]['type'].type == TokenTypes.BOOL_TYPE:
func_return_type = ir.IntType(1)
else:
func_return_type = ir.ArrayType(ir.IntType(8), 64)
args_types = []
defined_args = self.args.args
for arg in self.args.args.keys():
if arg == self.value:
continue
if defined_args[arg]['type'] == TokenTypes.INT:
args_types.append(ir.IntType(8))
elif defined_args[arg]['type'] == TokenTypes.BOOL_TYPE:
args_types.append(ir.IntType(1))
else:
args_types.append(ir.ArrayType(ir.IntType(8), 64))
func_type = ir.FunctionType(func_return_type, args_types)
func_i = ir.Function(self.module, func_type, name=self.value)
func_entry_block = func_i.append_basic_block(f'func_{self.value}_entry')
previous_position = self.builder
Node.builder = ir.IRBuilder(func_entry_block)
args_pointers = []
func_symbol_table = SymbolTable()
func_symbol_table.functions = symbol_table.functions
args_keys = list(self.args.args.keys())
for idx, arg in enumerate(args_types):
arg_pointer = self.builder.alloca(arg, name=f'{args_keys[idx+1]}')
self.builder.store(func_i.args[idx], arg_pointer)
args_pointers.append(arg_pointer)
for idx, ptr in enumerate(args_pointers):
func_symbol_table.set(f'{args_keys[idx+1]}', ptr)
symbol_table.set_function(key=self.value, value=self, func_symbol_table=func_symbol_table, func_i=func_i)
self.statements.Evaluate(func_symbol_table)
Node.builder = previous_position
return | 0.400632 | 0.234385 |
import time
from code_statistics import *
imgurl = "https://ae01.alicdn.com/kf/U56b2fb24ff7b4e5fa05c5acf7d3d0318r.jpg"
file_name = "day.html"
if __name__ == "__main__":
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " [日]")
time1 = time.strftime("%Y-%m-%d 00:00:00") # 开始时间
time2 = time.strftime("%Y-%m-%d 23:59:59") # 结束时间
# header1
content_save(
"<!DOCTYPE html><html><head><title>GitLab代码提交量统计</title><meta charset=\"utf-8\"/><link href=\"https://cdn.jsdelivr.net/gh/Fog-Forest/cdn@2.1.6.2/markdown/markdown.css\" rel=\"stylesheet\" type=\"text/css\" /></head><body><h3>GitLab代码提交量统计(单位:行) 【日报】</h3><br><h5><font color=red>注:统计将会剔除所有合并操作</font></h5><h5>上次更新时间:" + time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()) + "</h5><br><blockquote><p>群组仓库</p></blockquote><ul><li><p><strong>master</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody>",
"w", file_name)
gitlab_info(time1, time2, "group") # 群组仓库统计
gitlab_statistics_data(1) # master
gitlab_statistics_content(file_name)
content_save(
"</tbody></table></li><li><p><strong>dev</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody>",
"a", file_name)
gitlab_statistics_data(2) # dev
gitlab_statistics_content(file_name)
# 准备第二次统计前需要清空
print("\n******************* switch *******************\n")
info_master.clear()
info_other.clear()
# header2
content_save(
"</tbody></table></li></ul><blockquote><p>个人仓库</p></blockquote><ul><li><p><strong>all</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody></tbody>",
"a", file_name)
gitlab_info(time1, time2, "user") # 个人仓库统计
gitlab_statistics_data(2) # 个人仓直接统计全部分支
gitlab_statistics_content(file_name)
# footer
content_save("</tbody></table></li></ul></body></html>", "a", file_name)
dingding("http://localhost/" + file_name, imgurl, "[日报]") # 不需要推送可以注释掉
print("http://localhost/" + file_name) | day.py | import time
from code_statistics import *
imgurl = "https://ae01.alicdn.com/kf/U56b2fb24ff7b4e5fa05c5acf7d3d0318r.jpg"
file_name = "day.html"
if __name__ == "__main__":
print(time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + " [日]")
time1 = time.strftime("%Y-%m-%d 00:00:00") # 开始时间
time2 = time.strftime("%Y-%m-%d 23:59:59") # 结束时间
# header1
content_save(
"<!DOCTYPE html><html><head><title>GitLab代码提交量统计</title><meta charset=\"utf-8\"/><link href=\"https://cdn.jsdelivr.net/gh/Fog-Forest/cdn@2.1.6.2/markdown/markdown.css\" rel=\"stylesheet\" type=\"text/css\" /></head><body><h3>GitLab代码提交量统计(单位:行) 【日报】</h3><br><h5><font color=red>注:统计将会剔除所有合并操作</font></h5><h5>上次更新时间:" + time.strftime(
"%Y-%m-%d %H:%M:%S",
time.localtime()) + "</h5><br><blockquote><p>群组仓库</p></blockquote><ul><li><p><strong>master</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody>",
"w", file_name)
gitlab_info(time1, time2, "group") # 群组仓库统计
gitlab_statistics_data(1) # master
gitlab_statistics_content(file_name)
content_save(
"</tbody></table></li><li><p><strong>dev</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody>",
"a", file_name)
gitlab_statistics_data(2) # dev
gitlab_statistics_content(file_name)
# 准备第二次统计前需要清空
print("\n******************* switch *******************\n")
info_master.clear()
info_other.clear()
# header2
content_save(
"</tbody></table></li></ul><blockquote><p>个人仓库</p></blockquote><ul><li><p><strong>all</strong></p><table><thead><tr><th>Git用户名</th><th>新增代码数</th><th>删除代码数</th><th>总计代码数</th><th>提交次数</th></tr></thead><tbody></tbody>",
"a", file_name)
gitlab_info(time1, time2, "user") # 个人仓库统计
gitlab_statistics_data(2) # 个人仓直接统计全部分支
gitlab_statistics_content(file_name)
# footer
content_save("</tbody></table></li></ul></body></html>", "a", file_name)
dingding("http://localhost/" + file_name, imgurl, "[日报]") # 不需要推送可以注释掉
print("http://localhost/" + file_name) | 0.125426 | 0.122418 |