code stringlengths 2k 1.04M | repo_path stringlengths 5 517 | parsed_code stringlengths 0 1.04M | quality_prob float64 0.02 0.95 | learning_prob float64 0.02 0.93 |
|---|---|---|---|---|
import math
import os
import pytest
import subprocess
import time
from watchtower.streamer.writer import dropbox_writer
from watchtower.streamer.writer.disk_writer import DiskWriter
def test_dropbox_writer_integration(writer, random_data, tmp_path):
"""
Integration test to feed a DropboxWriter chunks of data and verify that the
decrypted data is identical to the input data. A MockDropboxUploader is
used to output to the tmp_path instead of Dropbox.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not writer.is_finished_writing():
time.sleep(0.05)
# Read in all of the data that the DropboxWriter output to disk.
files = os.listdir(tmp_path)
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
with open(os.path.join(tmp_path, file_name), 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 0)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
def test_dropbox_writer_encrypted_integration(encrypted_writer, random_data, tmp_path, installation_path):
"""
Integration test to feed a DropboxWriter chunks of data, decrypt the
output, and verify that the decrypted data is identical to the input data.
A MockDropboxUploader is used to output to the tmp_path instead of Dropbox.
This also serves as a good test for decrypt.py, by decrypting each file
output by the DropboxWriter and verifying that the bytes are identical to
the original.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
encrypted_writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not encrypted_writer.is_finished_writing():
time.sleep(0.05)
# The installation path is one directory up from the package path.
private_key_path = os.path.join(tmp_path, 'private.pem')
decrypt_script_path = os.path.join(installation_path, 'ancillary', 'decryption', 'decrypt.py')
# Read in all of the data that the DropboxWriter output to disk. Ignore the .pem files.
files = list(filter(lambda name: name.endswith('.bin'), os.listdir(tmp_path)))
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
in_path = os.path.join(tmp_path, file_name)
out_path = os.path.join(tmp_path, file_name + '.dec')
# Decrypt each file using the decrypt.py program.
subprocess.call(['python', decrypt_script_path,
'-k', private_key_path,
'-i', in_path,
'-o', out_path])
# Append the decrypted data.
with open(out_path, 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 1)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
# ---- Fixtures
@pytest.fixture
def writer(tmp_path):
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
test_dropbox_uploader=MockDropboxUploader())
@pytest.fixture
def encrypted_writer(tmp_path):
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
# Generate a private and public key and save these in the tmp_path.
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
with open(os.path.join(tmp_path, 'private.pem'), 'wb') as private_out:
private_out.write(private_pem)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(os.path.join(tmp_path, 'public.pem'), 'wb') as public_out:
public_out.write(public_pem)
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
public_pem_path=os.path.join(tmp_path, 'public.pem'),
test_dropbox_uploader=MockDropboxUploader())
# ---- Mock objects
class MockDropboxUploader():
"""
Mock object to be used in place of a dropbox object. Each call to
files_upload will create a new file on disk.
"""
def files_upload(self, bts, path):
writer = DiskWriter(path)
writer.append_bytes(bts, close=True) | watchtower/tests/streamer/writer/test_dropbox_writer.py | import math
import os
import pytest
import subprocess
import time
from watchtower.streamer.writer import dropbox_writer
from watchtower.streamer.writer.disk_writer import DiskWriter
def test_dropbox_writer_integration(writer, random_data, tmp_path):
"""
Integration test to feed a DropboxWriter chunks of data and verify that the
decrypted data is identical to the input data. A MockDropboxUploader is
used to output to the tmp_path instead of Dropbox.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not writer.is_finished_writing():
time.sleep(0.05)
# Read in all of the data that the DropboxWriter output to disk.
files = os.listdir(tmp_path)
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
with open(os.path.join(tmp_path, file_name), 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 0)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
def test_dropbox_writer_encrypted_integration(encrypted_writer, random_data, tmp_path, installation_path):
"""
Integration test to feed a DropboxWriter chunks of data, decrypt the
output, and verify that the decrypted data is identical to the input data.
A MockDropboxUploader is used to output to the tmp_path instead of Dropbox.
This also serves as a good test for decrypt.py, by decrypting each file
output by the DropboxWriter and verifying that the bytes are identical to
the original.
"""
# Append chunks of bytes to the DropboxWriter instance. This simulates a
# live feed.
append_count = 20
amount_to_read = len(random_data)//append_count
for i in range(append_count):
data = random_data[i*amount_to_read:(i+1) * amount_to_read]
encrypted_writer.append_bytes(data, close=(i == append_count-1)) # Close on the last chunk.
# Wait for writers to stop.
while not encrypted_writer.is_finished_writing():
time.sleep(0.05)
# The installation path is one directory up from the package path.
private_key_path = os.path.join(tmp_path, 'private.pem')
decrypt_script_path = os.path.join(installation_path, 'ancillary', 'decryption', 'decrypt.py')
# Read in all of the data that the DropboxWriter output to disk. Ignore the .pem files.
files = list(filter(lambda name: name.endswith('.bin'), os.listdir(tmp_path)))
files.sort(key=lambda name: int(name.strip('test_file').strip('.bin'))) # Sort them into [test_file0.bin, test_file1.bin, ...]
written_data = ''.encode()
for file_name in files:
in_path = os.path.join(tmp_path, file_name)
out_path = os.path.join(tmp_path, file_name + '.dec')
# Decrypt each file using the decrypt.py program.
subprocess.call(['python', decrypt_script_path,
'-k', private_key_path,
'-i', in_path,
'-o', out_path])
# Append the decrypted data.
with open(out_path, 'rb') as f:
written_data += f.read()
# Assert that multiple files were written to disk.
assert(len(files) > 1)
assert(len(files) == math.ceil(len(random_data)/dropbox_writer.DEFAULT_FILE_CHUNK_SIZE))
# Assert the writer's input data is identical to the data output to disk.
assert(written_data == random_data)
# ---- Fixtures
@pytest.fixture
def writer(tmp_path):
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
test_dropbox_uploader=MockDropboxUploader())
@pytest.fixture
def encrypted_writer(tmp_path):
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization, hashes
from cryptography.hazmat.primitives.asymmetric import rsa, padding
# Generate a private and public key and save these in the tmp_path.
private_key = rsa.generate_private_key(public_exponent=65537, key_size=2048, backend=default_backend())
private_pem = private_key.private_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption())
with open(os.path.join(tmp_path, 'private.pem'), 'wb') as private_out:
private_out.write(private_pem)
public_key = private_key.public_key()
public_pem = public_key.public_bytes(encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo)
with open(os.path.join(tmp_path, 'public.pem'), 'wb') as public_out:
public_out.write(public_pem)
return dropbox_writer.DropboxWriter(os.path.join(tmp_path, 'test_file.bin'),
dropbox_token="",
public_pem_path=os.path.join(tmp_path, 'public.pem'),
test_dropbox_uploader=MockDropboxUploader())
# ---- Mock objects
class MockDropboxUploader():
"""
Mock object to be used in place of a dropbox object. Each call to
files_upload will create a new file on disk.
"""
def files_upload(self, bts, path):
writer = DiskWriter(path)
writer.append_bytes(bts, close=True) | 0.392803 | 0.405684 |
__all__ = ('SpoonAnalyser',)
from typing import Any, Iterator, List, Mapping, Sequence
import contextlib
import json
import os
import shlex
import subprocess
from dockerblade import DockerDaemon as DockerBladeDockerDaemon
from loguru import logger
import attr
from .analysis import SpoonFunction, SpoonStatement
from .post_install import IMAGE_NAME as SPOON_IMAGE_NAME
from ..analyser import Analyser
from ..analysis import Analysis
from ..container import ProjectContainer
from ..core import FileLocationRange
from ..functions import ProgramFunctions
from ..loops import ProgramLoops
from ..project import Project
from ..statements import ProgramStatements
@attr.s
class SpoonAnalyser(Analyser):
_dockerblade: DockerBladeDockerDaemon = attr.ib(repr=False)
@contextlib.contextmanager
def _container(self, project: Project) -> Iterator[ProjectContainer]:
"""Provisions an ephemeral container for a given project."""
launch = self._dockerblade.client.containers.run
with contextlib.ExitStack() as stack:
# create a temporary volume from the project image
volume_name = 'kaskaraspoon'
cmd_create_volume = (f'docker run --rm -v {volume_name}:'
f'{shlex.quote(project.directory)} '
f'{project.image} /bin/true')
cmd_kill_volume = f'docker volume rm {volume_name}'
logger.debug(f'created temporary volume [{volume_name}] '
f'from project image [{project.image}] '
f'via command: {cmd_create_volume}')
subprocess.check_output(cmd_create_volume, shell=True)
stack.callback(subprocess.call, cmd_kill_volume,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stdin=subprocess.DEVNULL)
docker_analyser = launch(SPOON_IMAGE_NAME, '/bin/sh',
stdin_open=True,
volumes={volume_name: {
'bind': '/workspace',
'mode': 'ro'}},
detach=True)
stack.callback(docker_analyser.remove, force=True)
dockerblade = self._dockerblade.attach(docker_analyser.id)
yield ProjectContainer(project=project, dockerblade=dockerblade)
def analyse(self, project: Project) -> Analysis:
logger.debug(f"analysing Spoon project: {project}")
with self._container(project) as container:
return self._analyse_container(container)
def _analyse_container(self, container: ProjectContainer) -> Analysis:
dir_source = '/workspace'
dir_output = '/output'
container.shell.check_output(f'kaskara {dir_source} -o {dir_output}')
# load statements
filename_statements = os.path.join(dir_output, 'statements.json')
statements_dict = json.loads(container.files.read(filename_statements))
statements = self._load_statements_from_dict(statements_dict)
# load functions
filename_functions = os.path.join(dir_output, 'functions.json')
functions_dict = json.loads(container.files.read(filename_functions))
functions = self._load_functions_from_dict(functions_dict)
# load loops
filename_loops = os.path.join(dir_output, 'loops.json')
loops_dict = json.loads(container.files.read(filename_loops))
loops = self._load_loops_from_dict(loops_dict)
# find insertion points
insertions = statements.insertions()
return Analysis(project=container.project,
loops=loops,
functions=functions,
statements=statements,
insertions=insertions)
def _load_statements_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramStatements:
"""Loads the statement database from a given dictionary."""
logger.debug('parsing statements database')
statements = \
ProgramStatements([SpoonStatement.from_dict(d) for d in dict_])
logger.debug(f'parsed {len(statements)} statements')
return statements
def _load_functions_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramFunctions:
"""Loads the function database from a given dictionary."""
logger.debug('parsing function database')
functions = \
ProgramFunctions([SpoonFunction.from_dict(d) for d in dict_])
logger.debug(f'parsed {len(functions)} functions')
return functions
def _load_loops_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramLoops:
"""Loads the loops database from a given dictionary."""
logger.debug('parsing loop database')
loop_bodies: List[FileLocationRange] = []
for loop_info in dict_:
loc = FileLocationRange.from_string(loop_info['body'])
loop_bodies.append(loc)
loops = ProgramLoops.from_body_location_ranges(loop_bodies)
logger.debug(f'parsed loops')
return loops | lib/kaskara/spoon/analyser.py | __all__ = ('SpoonAnalyser',)
from typing import Any, Iterator, List, Mapping, Sequence
import contextlib
import json
import os
import shlex
import subprocess
from dockerblade import DockerDaemon as DockerBladeDockerDaemon
from loguru import logger
import attr
from .analysis import SpoonFunction, SpoonStatement
from .post_install import IMAGE_NAME as SPOON_IMAGE_NAME
from ..analyser import Analyser
from ..analysis import Analysis
from ..container import ProjectContainer
from ..core import FileLocationRange
from ..functions import ProgramFunctions
from ..loops import ProgramLoops
from ..project import Project
from ..statements import ProgramStatements
@attr.s
class SpoonAnalyser(Analyser):
_dockerblade: DockerBladeDockerDaemon = attr.ib(repr=False)
@contextlib.contextmanager
def _container(self, project: Project) -> Iterator[ProjectContainer]:
"""Provisions an ephemeral container for a given project."""
launch = self._dockerblade.client.containers.run
with contextlib.ExitStack() as stack:
# create a temporary volume from the project image
volume_name = 'kaskaraspoon'
cmd_create_volume = (f'docker run --rm -v {volume_name}:'
f'{shlex.quote(project.directory)} '
f'{project.image} /bin/true')
cmd_kill_volume = f'docker volume rm {volume_name}'
logger.debug(f'created temporary volume [{volume_name}] '
f'from project image [{project.image}] '
f'via command: {cmd_create_volume}')
subprocess.check_output(cmd_create_volume, shell=True)
stack.callback(subprocess.call, cmd_kill_volume,
shell=True,
stderr=subprocess.DEVNULL,
stdout=subprocess.DEVNULL,
stdin=subprocess.DEVNULL)
docker_analyser = launch(SPOON_IMAGE_NAME, '/bin/sh',
stdin_open=True,
volumes={volume_name: {
'bind': '/workspace',
'mode': 'ro'}},
detach=True)
stack.callback(docker_analyser.remove, force=True)
dockerblade = self._dockerblade.attach(docker_analyser.id)
yield ProjectContainer(project=project, dockerblade=dockerblade)
def analyse(self, project: Project) -> Analysis:
logger.debug(f"analysing Spoon project: {project}")
with self._container(project) as container:
return self._analyse_container(container)
def _analyse_container(self, container: ProjectContainer) -> Analysis:
dir_source = '/workspace'
dir_output = '/output'
container.shell.check_output(f'kaskara {dir_source} -o {dir_output}')
# load statements
filename_statements = os.path.join(dir_output, 'statements.json')
statements_dict = json.loads(container.files.read(filename_statements))
statements = self._load_statements_from_dict(statements_dict)
# load functions
filename_functions = os.path.join(dir_output, 'functions.json')
functions_dict = json.loads(container.files.read(filename_functions))
functions = self._load_functions_from_dict(functions_dict)
# load loops
filename_loops = os.path.join(dir_output, 'loops.json')
loops_dict = json.loads(container.files.read(filename_loops))
loops = self._load_loops_from_dict(loops_dict)
# find insertion points
insertions = statements.insertions()
return Analysis(project=container.project,
loops=loops,
functions=functions,
statements=statements,
insertions=insertions)
def _load_statements_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramStatements:
"""Loads the statement database from a given dictionary."""
logger.debug('parsing statements database')
statements = \
ProgramStatements([SpoonStatement.from_dict(d) for d in dict_])
logger.debug(f'parsed {len(statements)} statements')
return statements
def _load_functions_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramFunctions:
"""Loads the function database from a given dictionary."""
logger.debug('parsing function database')
functions = \
ProgramFunctions([SpoonFunction.from_dict(d) for d in dict_])
logger.debug(f'parsed {len(functions)} functions')
return functions
def _load_loops_from_dict(self,
dict_: Sequence[Mapping[str, Any]]
) -> ProgramLoops:
"""Loads the loops database from a given dictionary."""
logger.debug('parsing loop database')
loop_bodies: List[FileLocationRange] = []
for loop_info in dict_:
loc = FileLocationRange.from_string(loop_info['body'])
loop_bodies.append(loc)
loops = ProgramLoops.from_body_location_ranges(loop_bodies)
logger.debug(f'parsed loops')
return loops | 0.687945 | 0.09236 |
import os
import sys
import ctypes
from decouple import config
target_dir = config('CARGO_TARGET_DIR', os.path.join(os.path.dirname(__file__), '../../target'))
build_profile = config('BUILD_PROFILE', 'debug')
ext = 'dylib' if sys.platform == 'darwin' else 'so'
dll = ctypes.cdll.LoadLibrary(os.path.join(target_dir, '%s/libcro_clib.%s' % (build_profile, ext)))
dll.cro_jsonrpc_call.argtypes = dll.cro_jsonrpc_call_mock.argtypes = [
ctypes.c_char_p, # storage_dir
ctypes.c_char_p, # websocket_url
ctypes.c_char, # network id
ctypes.c_char_p, # request
ctypes.c_char_p, # buf
ctypes.c_size_t, # buf_size
ctypes.c_void_p, # progress callback
ctypes.c_void_p, # user_data
]
dll.cro_jsonrpc_call.restype = dll.cro_jsonrpc_call_mock.restype = ctypes.c_int
dll.cro_create_jsonrpc.argtypes = dll.cro_create_mock_jsonrpc.argtypes = [
ctypes.POINTER(ctypes.c_void_p), # rpc_out
ctypes.c_char_p, # storage_dir
ctypes.c_char_p, # websocket_url
ctypes.c_char, # network_id
ctypes.c_void_p, # progress callback
]
dll.cro_create_jsonrpc.restype = dll.cro_create_mock_jsonrpc.restype = ctypes.c_int
dll.cro_run_jsonrpc.argtypes = [
ctypes.c_void_p, # jsonrpc
ctypes.c_char_p, # request
ctypes.c_char_p, # buf
ctypes.c_size_t, # buf_size
ctypes.c_void_p, # user_data
]
dll.cro_run_jsonrpc.restype = ctypes.c_int
dll.cro_destroy_jsonrpc.argtypes = [
ctypes.c_void_p, # jsonrpc
]
dll.cro_destroy_jsonrpc.restype = ctypes.c_int
class RpcBinding:
def __init__(self, storage, tendermint_ws, network_id=0xab, mock_mode=False):
create_jsonrpc = dll.cro_create_mock_jsonrpc if mock_mode else dll.cro_create_jsonrpc
self._p = ctypes.c_void_p()
retcode = create_jsonrpc(ctypes.byref(self._p), storage.encode(), tendermint_ws.encode(), network_id, None)
assert retcode == 0, 'create jsonrpc failed'
def __del__(self):
dll.cro_destroy_jsonrpc(self._p)
def call(self, req):
rsp = ctypes.create_string_buffer(102400)
retcode = dll.cro_run_jsonrpc(self._p, req.encode(), rsp, len(rsp), None)
assert retcode == 0, rsp.value
return rsp.value
if __name__ == '__main__':
import fire
fire.Fire(RpcBinding) | integration-tests/bot/chainbinding.py | import os
import sys
import ctypes
from decouple import config
target_dir = config('CARGO_TARGET_DIR', os.path.join(os.path.dirname(__file__), '../../target'))
build_profile = config('BUILD_PROFILE', 'debug')
ext = 'dylib' if sys.platform == 'darwin' else 'so'
dll = ctypes.cdll.LoadLibrary(os.path.join(target_dir, '%s/libcro_clib.%s' % (build_profile, ext)))
dll.cro_jsonrpc_call.argtypes = dll.cro_jsonrpc_call_mock.argtypes = [
ctypes.c_char_p, # storage_dir
ctypes.c_char_p, # websocket_url
ctypes.c_char, # network id
ctypes.c_char_p, # request
ctypes.c_char_p, # buf
ctypes.c_size_t, # buf_size
ctypes.c_void_p, # progress callback
ctypes.c_void_p, # user_data
]
dll.cro_jsonrpc_call.restype = dll.cro_jsonrpc_call_mock.restype = ctypes.c_int
dll.cro_create_jsonrpc.argtypes = dll.cro_create_mock_jsonrpc.argtypes = [
ctypes.POINTER(ctypes.c_void_p), # rpc_out
ctypes.c_char_p, # storage_dir
ctypes.c_char_p, # websocket_url
ctypes.c_char, # network_id
ctypes.c_void_p, # progress callback
]
dll.cro_create_jsonrpc.restype = dll.cro_create_mock_jsonrpc.restype = ctypes.c_int
dll.cro_run_jsonrpc.argtypes = [
ctypes.c_void_p, # jsonrpc
ctypes.c_char_p, # request
ctypes.c_char_p, # buf
ctypes.c_size_t, # buf_size
ctypes.c_void_p, # user_data
]
dll.cro_run_jsonrpc.restype = ctypes.c_int
dll.cro_destroy_jsonrpc.argtypes = [
ctypes.c_void_p, # jsonrpc
]
dll.cro_destroy_jsonrpc.restype = ctypes.c_int
class RpcBinding:
def __init__(self, storage, tendermint_ws, network_id=0xab, mock_mode=False):
create_jsonrpc = dll.cro_create_mock_jsonrpc if mock_mode else dll.cro_create_jsonrpc
self._p = ctypes.c_void_p()
retcode = create_jsonrpc(ctypes.byref(self._p), storage.encode(), tendermint_ws.encode(), network_id, None)
assert retcode == 0, 'create jsonrpc failed'
def __del__(self):
dll.cro_destroy_jsonrpc(self._p)
def call(self, req):
rsp = ctypes.create_string_buffer(102400)
retcode = dll.cro_run_jsonrpc(self._p, req.encode(), rsp, len(rsp), None)
assert retcode == 0, rsp.value
return rsp.value
if __name__ == '__main__':
import fire
fire.Fire(RpcBinding) | 0.188548 | 0.064418 |
import pytest
import datetime
from dateutil.tz import tzoffset
from decimal import Decimal
from pyticketswitch.mixins import JSONMixin, PaginationMixin, SeatPricingMixin
class TestJSONMixin:
ZULU = tzoffset('ZULU', 0)
class Foo(JSONMixin, object):
def __init__(self, bar):
self.bar = bar
def test_with_none(self):
obj = self.Foo(None)
result = obj.__jsondict__()
assert result == {}
def test_with_empty(self):
obj = self.Foo([])
result = obj.__jsondict__()
assert result == {}
def test_with_none_with_hide_none_false(self):
obj = self.Foo(None)
result = obj.__jsondict__(hide_none=False)
assert result == {'bar': None}
def test_with_empty_with_hide_none_false(self):
obj = self.Foo([])
result = obj.__jsondict__(hide_none=False)
assert result == {}
def test_with_none_with_hide_empty_false(self):
obj = self.Foo(None)
result = obj.__jsondict__(hide_empty=False)
assert result == {}
def test_with_empty_with_hide_empty_false(self):
obj = self.Foo([])
result = obj.__jsondict__(hide_empty=False)
assert result == {'bar': []}
def test_normal_object(self):
obj = self.Foo('hello world!')
result = obj.__jsondict__()
assert result == {'bar': 'hello world!'}
def test_datetime(self):
date = datetime.datetime(2017, 1, 25, 12, 39, 40, tzinfo=self.ZULU)
obj = self.Foo(date)
result = obj.__jsondict__()
assert result == {'bar': '2017-01-25T12:39:40+00:00'}
def test_date(self):
obj = self.Foo(datetime.date(2017, 1, 25))
result = obj.__jsondict__()
assert result == {'bar': '2017-01-25'}
def test_sub_json(self):
subobj = self.Foo('hello world!')
obj = self.Foo(subobj)
result = obj.__jsondict__()
assert result == {'bar': {'bar': 'hello world!'}}
def test_list_of_normals(self):
obj = self.Foo(['hello', 'world!'])
result = obj.__jsondict__()
assert result == {'bar': ['hello', 'world!']}
def test_dict_of_normals(self):
obj = self.Foo({'first': 'hello', 'second': 'world!'})
result = obj.__jsondict__()
assert result == {'bar': {'first': 'hello', 'second': 'world!'}}
def test_list_of_subobjs(self):
obj = self.Foo([self.Foo('hello'), self.Foo('world!')])
result = obj.__jsondict__()
assert result == {'bar': [{'bar': 'hello'}, {'bar': 'world!'}]}
def test_dict_of_subobjs(self):
obj = self.Foo({
'first': self.Foo('hello'),
'second': self.Foo('world!')
})
result = obj.__jsondict__()
assert result == {
'bar': {
'first': {'bar': 'hello'},
'second': {'bar': 'world!'}
}
}
def test_decimal_as_json(self):
obj = self.Foo(Decimal('44.1234'))
result = obj.as_json()
assert result == '{"bar": 44.1234}'
def test_as_json(self):
obj = self.Foo('hello world!')
result = obj.as_json()
assert result == '{"bar": "hello world!"}'
def test_as_dict_for_json(self):
obj = self.Foo('hello world!')
result = obj.as_dict_for_json()
assert result == {'bar': 'hello world!'}
class TestPaginationMixin:
def test_from_api_data(self):
data = {
'results': {
"paging_status": {
"page_length": 50,
"page_number": 2,
"pages_remaining": 3,
"results_remaining": 150,
"total_unpaged_results": 250,
}
}
}
class FakeBaseMeta(object):
@classmethod
def from_api_data(cls, data):
return cls()
class FakeMeta(PaginationMixin, FakeBaseMeta):
pass
meta = FakeMeta.from_api_data(data)
assert meta.page_length == 50
assert meta.page_number == 2
assert meta.pages_remaining == 3
assert meta.results_remaining == 150
assert meta.total_results == 250
def test_is_paginated_pages_remaining(self):
meta = PaginationMixin(
page_length=50,
page_number=1,
pages_remaining=15,
results_remaining=700,
total_results=750,
)
assert meta.is_paginated() is True
def test_is_paginated_with_less_results_than_page(self):
meta = PaginationMixin(
page_length=50,
page_number=1,
pages_remaining=0,
results_remaining=0,
total_results=30,
)
assert meta.is_paginated() is False
def test_is_paginated_on_last_page(self):
meta = PaginationMixin(
page_length=50,
page_number=15,
pages_remaining=0,
results_remaining=0,
total_results=750,
)
assert meta.is_paginated() is True
def test_is_paginated_when_something_is_fucked(self):
meta = PaginationMixin(
page_length=None,
page_number=None,
pages_remaining=None,
results_remaining=None,
total_results=None,
)
assert meta.is_paginated() is False
def test_from_api_data_when_not_inside_results(self):
"""Test from_api_data works when the data is not in `results`
Some calls (e.g. related_events.v1) have multiple pagination data
sections inside dicts that are not keyed as 'results'. In this case,
we can pass the subdict they are in directly, so we should make sure
the method can find the data in the base dictionary.
"""
# state
data = {
'misnamed_results': {
'paging_status': {
'page_length': 50,
'page_number': 2,
'pages_remaining': 3,
'results_remaining': 150,
'total_unpaged_results': 250,
}
}
}
class FakeBaseMeta(object):
@classmethod
def from_api_data(cls, data):
return cls()
class FakeMeta(PaginationMixin, FakeBaseMeta):
pass
meta = FakeMeta.from_api_data(data, result_key='misnamed_results')
assert meta.page_length == 50
assert meta.page_number == 2
assert meta.pages_remaining == 3
assert meta.results_remaining == 150
assert meta.total_results == 250
class TestSeatPricingMixin:
def test_kwargs_from_api_data(self):
data = {
'sale_seatprice': 160,
'sale_surcharge': 5.5,
'non_offer_sale_seatprice': 200,
'non_offer_sale_surcharge': 5.5,
}
kwargs = SeatPricingMixin.kwargs_from_api_data(data)
assert kwargs['seatprice'] == 160.00
assert kwargs['surcharge'] == 5.5
assert kwargs['non_offer_seatprice'] == 200
assert kwargs['non_offer_surcharge'] == 5.5
def test_combined_price(self):
inst = SeatPricingMixin(seatprice=123.45, surcharge=6.78)
assert inst.combined_price() == 130.23
def test_combined_price_missing_prices(self):
inst = SeatPricingMixin(seatprice=123.45)
with pytest.raises(AssertionError):
inst.combined_price()
inst = SeatPricingMixin(surcharge=6.78)
with pytest.raises(AssertionError):
inst.combined_price()
def test_combined_price_inexact_floats(self):
inst = SeatPricingMixin(seatprice=1.1, surcharge=2.2)
assert inst.combined_price() == 3.3
def test_combined_price_decimal(self):
inst = SeatPricingMixin(
seatprice=Decimal('123.45'),
surcharge=Decimal('6.78')
)
assert inst.combined_price() == Decimal('130.23')
def test_non_offer_combined_price(self):
inst = SeatPricingMixin(non_offer_seatprice=123.45,
non_offer_surcharge=6.78)
assert inst.non_offer_combined_price() == 130.23
def test_non_offer_combined_price_missing_prices(self):
inst = SeatPricingMixin(non_offer_seatprice=123.45)
with pytest.raises(AssertionError):
inst.non_offer_combined_price()
inst = SeatPricingMixin(non_offer_surcharge=6.78)
with pytest.raises(AssertionError):
inst.non_offer_combined_price()
def test_non_offer_combined_price_inexact_floats(self):
inst = SeatPricingMixin(non_offer_seatprice=1.1, non_offer_surcharge=2.2)
assert inst.non_offer_combined_price() == 3.3
def test_non_offer_combined_price_decimal(self):
inst = SeatPricingMixin(
non_offer_seatprice=Decimal('123.45'),
non_offer_surcharge=Decimal('6.78')
)
assert inst.non_offer_combined_price() == Decimal('130.23') | tests/test_mixins.py | import pytest
import datetime
from dateutil.tz import tzoffset
from decimal import Decimal
from pyticketswitch.mixins import JSONMixin, PaginationMixin, SeatPricingMixin
class TestJSONMixin:
ZULU = tzoffset('ZULU', 0)
class Foo(JSONMixin, object):
def __init__(self, bar):
self.bar = bar
def test_with_none(self):
obj = self.Foo(None)
result = obj.__jsondict__()
assert result == {}
def test_with_empty(self):
obj = self.Foo([])
result = obj.__jsondict__()
assert result == {}
def test_with_none_with_hide_none_false(self):
obj = self.Foo(None)
result = obj.__jsondict__(hide_none=False)
assert result == {'bar': None}
def test_with_empty_with_hide_none_false(self):
obj = self.Foo([])
result = obj.__jsondict__(hide_none=False)
assert result == {}
def test_with_none_with_hide_empty_false(self):
obj = self.Foo(None)
result = obj.__jsondict__(hide_empty=False)
assert result == {}
def test_with_empty_with_hide_empty_false(self):
obj = self.Foo([])
result = obj.__jsondict__(hide_empty=False)
assert result == {'bar': []}
def test_normal_object(self):
obj = self.Foo('hello world!')
result = obj.__jsondict__()
assert result == {'bar': 'hello world!'}
def test_datetime(self):
date = datetime.datetime(2017, 1, 25, 12, 39, 40, tzinfo=self.ZULU)
obj = self.Foo(date)
result = obj.__jsondict__()
assert result == {'bar': '2017-01-25T12:39:40+00:00'}
def test_date(self):
obj = self.Foo(datetime.date(2017, 1, 25))
result = obj.__jsondict__()
assert result == {'bar': '2017-01-25'}
def test_sub_json(self):
subobj = self.Foo('hello world!')
obj = self.Foo(subobj)
result = obj.__jsondict__()
assert result == {'bar': {'bar': 'hello world!'}}
def test_list_of_normals(self):
obj = self.Foo(['hello', 'world!'])
result = obj.__jsondict__()
assert result == {'bar': ['hello', 'world!']}
def test_dict_of_normals(self):
obj = self.Foo({'first': 'hello', 'second': 'world!'})
result = obj.__jsondict__()
assert result == {'bar': {'first': 'hello', 'second': 'world!'}}
def test_list_of_subobjs(self):
obj = self.Foo([self.Foo('hello'), self.Foo('world!')])
result = obj.__jsondict__()
assert result == {'bar': [{'bar': 'hello'}, {'bar': 'world!'}]}
def test_dict_of_subobjs(self):
obj = self.Foo({
'first': self.Foo('hello'),
'second': self.Foo('world!')
})
result = obj.__jsondict__()
assert result == {
'bar': {
'first': {'bar': 'hello'},
'second': {'bar': 'world!'}
}
}
def test_decimal_as_json(self):
obj = self.Foo(Decimal('44.1234'))
result = obj.as_json()
assert result == '{"bar": 44.1234}'
def test_as_json(self):
obj = self.Foo('hello world!')
result = obj.as_json()
assert result == '{"bar": "hello world!"}'
def test_as_dict_for_json(self):
obj = self.Foo('hello world!')
result = obj.as_dict_for_json()
assert result == {'bar': 'hello world!'}
class TestPaginationMixin:
def test_from_api_data(self):
data = {
'results': {
"paging_status": {
"page_length": 50,
"page_number": 2,
"pages_remaining": 3,
"results_remaining": 150,
"total_unpaged_results": 250,
}
}
}
class FakeBaseMeta(object):
@classmethod
def from_api_data(cls, data):
return cls()
class FakeMeta(PaginationMixin, FakeBaseMeta):
pass
meta = FakeMeta.from_api_data(data)
assert meta.page_length == 50
assert meta.page_number == 2
assert meta.pages_remaining == 3
assert meta.results_remaining == 150
assert meta.total_results == 250
def test_is_paginated_pages_remaining(self):
meta = PaginationMixin(
page_length=50,
page_number=1,
pages_remaining=15,
results_remaining=700,
total_results=750,
)
assert meta.is_paginated() is True
def test_is_paginated_with_less_results_than_page(self):
meta = PaginationMixin(
page_length=50,
page_number=1,
pages_remaining=0,
results_remaining=0,
total_results=30,
)
assert meta.is_paginated() is False
def test_is_paginated_on_last_page(self):
meta = PaginationMixin(
page_length=50,
page_number=15,
pages_remaining=0,
results_remaining=0,
total_results=750,
)
assert meta.is_paginated() is True
def test_is_paginated_when_something_is_fucked(self):
meta = PaginationMixin(
page_length=None,
page_number=None,
pages_remaining=None,
results_remaining=None,
total_results=None,
)
assert meta.is_paginated() is False
def test_from_api_data_when_not_inside_results(self):
"""Test from_api_data works when the data is not in `results`
Some calls (e.g. related_events.v1) have multiple pagination data
sections inside dicts that are not keyed as 'results'. In this case,
we can pass the subdict they are in directly, so we should make sure
the method can find the data in the base dictionary.
"""
# state
data = {
'misnamed_results': {
'paging_status': {
'page_length': 50,
'page_number': 2,
'pages_remaining': 3,
'results_remaining': 150,
'total_unpaged_results': 250,
}
}
}
class FakeBaseMeta(object):
@classmethod
def from_api_data(cls, data):
return cls()
class FakeMeta(PaginationMixin, FakeBaseMeta):
pass
meta = FakeMeta.from_api_data(data, result_key='misnamed_results')
assert meta.page_length == 50
assert meta.page_number == 2
assert meta.pages_remaining == 3
assert meta.results_remaining == 150
assert meta.total_results == 250
class TestSeatPricingMixin:
def test_kwargs_from_api_data(self):
data = {
'sale_seatprice': 160,
'sale_surcharge': 5.5,
'non_offer_sale_seatprice': 200,
'non_offer_sale_surcharge': 5.5,
}
kwargs = SeatPricingMixin.kwargs_from_api_data(data)
assert kwargs['seatprice'] == 160.00
assert kwargs['surcharge'] == 5.5
assert kwargs['non_offer_seatprice'] == 200
assert kwargs['non_offer_surcharge'] == 5.5
def test_combined_price(self):
inst = SeatPricingMixin(seatprice=123.45, surcharge=6.78)
assert inst.combined_price() == 130.23
def test_combined_price_missing_prices(self):
inst = SeatPricingMixin(seatprice=123.45)
with pytest.raises(AssertionError):
inst.combined_price()
inst = SeatPricingMixin(surcharge=6.78)
with pytest.raises(AssertionError):
inst.combined_price()
def test_combined_price_inexact_floats(self):
inst = SeatPricingMixin(seatprice=1.1, surcharge=2.2)
assert inst.combined_price() == 3.3
def test_combined_price_decimal(self):
inst = SeatPricingMixin(
seatprice=Decimal('123.45'),
surcharge=Decimal('6.78')
)
assert inst.combined_price() == Decimal('130.23')
def test_non_offer_combined_price(self):
inst = SeatPricingMixin(non_offer_seatprice=123.45,
non_offer_surcharge=6.78)
assert inst.non_offer_combined_price() == 130.23
def test_non_offer_combined_price_missing_prices(self):
inst = SeatPricingMixin(non_offer_seatprice=123.45)
with pytest.raises(AssertionError):
inst.non_offer_combined_price()
inst = SeatPricingMixin(non_offer_surcharge=6.78)
with pytest.raises(AssertionError):
inst.non_offer_combined_price()
def test_non_offer_combined_price_inexact_floats(self):
inst = SeatPricingMixin(non_offer_seatprice=1.1, non_offer_surcharge=2.2)
assert inst.non_offer_combined_price() == 3.3
def test_non_offer_combined_price_decimal(self):
inst = SeatPricingMixin(
non_offer_seatprice=Decimal('123.45'),
non_offer_surcharge=Decimal('6.78')
)
assert inst.non_offer_combined_price() == Decimal('130.23') | 0.719186 | 0.458834 |
from hdmm.workload import *
from hdmm import templates
def __race1():
# single race only, two or more races aggregated
# binary encoding: 1 indicates particular race is checked
race1 = np.zeros((7, 64))
for i in range(6):
race1[i, 2**i] = 1.0
race1[6,:] = 1.0 - race1[0:6].sum(axis=0)
return Matrix(race1)
def __race2():
# all settings of race, k races for 1..6, two or more races
race2 = np.zeros((63+6+1, 64))
for i in range(1,64):
race2[i-1,i] = 1.0
ct = bin(i).count('1') # number of races
race2[62+ct, i] = 1.0
race2[63+6] = race2[64:63+6].sum(axis=0) # two or more races
return Matrix(race2)
def __white():
white = np.zeros((1, 64))
white[0,1] = 1.0
return Matrix(white)
def __isHispanic():
return Matrix(np.array([[1,0]]))
def __notHispanic():
return Matrix(np.array([[0,1]]))
def __adult():
adult = np.zeros((1, 115))
adult[0, 18:] = 1.0
return Matrix(adult)
def __age1():
ranges = [0, 5, 10, 15, 18, 20, 21, 22, 25, 30, 35, 40, 45, 50, 55, 60, 62, 65, 67, 70, 75, 80, 85, 115]
age1 = np.zeros((len(ranges)-1, 115))
for i in range(age1.shape[0]):
age1[i, ranges[i]:ranges[i+1]] = 1.0
return Matrix(age1)
def __age2():
age2 = np.zeros((20, 115))
age2[:20,:20] = np.eye(20)
return Matrix(age2)
def __age3():
# more range queries on age
age3 = np.zeros((103, 115))
age3[:100, :100] = np.eye(100)
age3[100,100:105] = 1.0
age3[101,105:110] = 1.0
age3[102,110:] = 1.0
return Matrix(age3)
def CensusSF1(geography=False):
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P3a = Kron([Total(2), Total(2), __race1(), Total(17), Total(115)])
P3b = P1
P4a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P4b = P1
P5a = Kron([Total(2), Identity(2), __race1(), Total(17), Total(115)])
P5b = Kron([Total(2), IdentityTotal(2), Total(64), Total(17), Total(115)])
P8a = Kron([Total(2), Total(2), __race2(), Total(17), Total(115)])
P8b = P1
P9a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P9b = Kron([Total(2), __notHispanic(), __race2(), Total(17), Total(115)])
P9c = P1
P10a = Kron([Total(2), Total(2), __race2(), Total(17), __adult()])
P10b = Kron([Total(2), Total(2), Total(64), Total(17), __adult()])
P11a = Kron([Total(2), Identity(2), Total(64), Total(17), __adult()])
P11b = Kron([Total(2), __notHispanic(), __race2(), Total(17), __adult()])
P11c = P10b
P12a = Kron([Identity(2), Total(2), Total(64), Total(17), __age1()])
P12b = Kron([IdentityTotal(2), Total(2), Total(64), Total(17), Total(115)])
P12_a = Kron([Identity(2), Total(2), __race1(), Total(17), __age1()])
P12_b = Kron([IdentityTotal(2), Total(2), __race1(), Total(17), Total(115)])
P12_c = Kron([Identity(2), __isHispanic(), Total(64), Total(17), __age1()])
P12_d = Kron([IdentityTotal(2), __isHispanic(), Total(64), Total(17), Total(115)])
P12_e = Kron([Identity(2), __notHispanic(), __white(), Total(17), __age1()])
P12_f = Kron([IdentityTotal(2), __notHispanic(), __white(), Total(17), Total(115)])
PCT12a = Kron([Identity(2), Total(2), Total(64), Total(17), __age3()])
PCT12b = P12b
PCT12_a = Kron([Identity(2), Total(2), __race1(), Total(17), __age3()])
PCT12_b = Kron([IdentityTotal(2), Total(2), __race1(), Total(17), Total(115)])
PCT12_c = Kron([Identity(2), __isHispanic(), Total(64), Total(17), __age3()])
PCT12_d = Kron([IdentityTotal(2), __isHispanic(), Total(64), Total(17), Total(115)])
PCT12_e = Kron([Identity(2), __notHispanic(), __race1(), Total(17), __age3()])
PCT12_f = Kron([IdentityTotal(2), __notHispanic(), __race1(), Total(17), Total(115)])
workloads = [P1,P3a,P3b,P4a,P4b,P5a,P5b,P8a,P8b,P9a,P9b,P9c,P10a,P10b,P11a,P11b,P11c,P12a,P12b,P12_a,P12_b,P12_c,P12_d,P12_e,P12_f,PCT12a,PCT12b,PCT12_a,PCT12_b,PCT12_c,PCT12_d,PCT12_e,PCT12_f]
if geography:
M = IdentityTotal(51)
workloads = [Kron(W.workloads + [M]) for W in workloads]
return Concat(workloads)
def CensusSF1Big(geography=True, reallybig=False):
M = IdentityTotal(51)
I = Identity(51)
T = Total(51)
sf1 = CensusSF1(reallybig and geography)
geography = geography and not reallybig
workloads = []
for sub in sf1.workloads:
matrices = [S.W for S in sub.workloads]
for combo in itertools.product(*matrices):
subs = [Matrix(q[None,:]) for q in combo]
if geography:
workloads.append(Kron(subs + [I]))
workloads.append(Kron(subs + [T]))
else:
workloads.append(Kron(subs))
return Concat(workloads)
def CensusSF1Approx():
R1 = Total(64) + __race1()
R2 = Total(64) + __race2()
A1 = Total(115) + __age1()
A3 = Total(115) + __age3()
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P3 = Kron([Total(2), Total(2), R1, Total(17), Total(115)])
P4 = Kron([Total(2), IdentityTotal(2), Total(64), Total(17), Total(115)])
P5 = Kron([Total(2), IdentityTotal(2), R1, Total(17), Total(115)])
P8 = Kron([Total(2), Total(2), R2, Total(17), Total(115)])
P9 = Kron([Total(2), IdentityTotal(2), R2, Total(17), Total(115)])
P10 = Kron([Total(2), Total(2), R2, Total(17), __adult()])
P11 = Kron([Total(2), IdentityTotal(2), R2, Total(17), __adult()])
P12 = Kron([IdentityTotal(2), IdentityTotal(2), R1, Total(17), A1])
PCT12 = Kron([IdentityTotal(2), IdentityTotal(2), R1, Total(17), A3])
return Concat([P1, P3, P4, P5, P8, P9, P10, P11, P12, PCT12])
def CensusSF1Projected():
sf1 = CensusSF1()
sub = [None]*5
for i in range(5):
sub[i] = sf1.project_and_merge([[i]])
return Kron(sub)
def CensusPL94():
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P8a = Kron([Total(2), Total(2), __race2(), Total(17), Total(115)])
P8b = P1
P9a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P9b = Kron([Total(2), __notHispanic(), __race2(), Total(17), Total(115)])
P9c = P1
P10a = Kron([Total(2), Total(2), __race2(), Total(17), __adult()])
P10b = Kron([Total(2), Total(2), Total(64), Total(17), __adult()])
P11a = Kron([Total(2), Identity(2), Total(64), Total(17), __adult()])
P11b = Kron([Total(2), __notHispanic(), __race2(), Total(17), __adult()])
P11c = P10b
return Concat([P8a,P8b,P9a,P9b,P9c,P10a,P10b,P11a,P11b,P11c])
def CensusSF1_split(geography=False):
sf1 = CensusSF1(geography)
return Concat(sf1.workloads[:18]), Concat(sf1.workloads[18:])
if __name__ == '__main__':
sf1 = CensusSF1()
ps = [1,1,8,1,10]
template = templates.KronPIdentity(sf1.domain, ps)
template.optimize(sf1)
strategy = [sub.A for sub in template.strategies]
print(sf1.expected_error(strategy)) # total variance of HDMM
identity = [np.eye(n) for n in sf1.domain]
identity[3] = np.ones((1,17))
print(sf1.expected_error(identity)) # total variance of identity | hdmm/examples/census.py | from hdmm.workload import *
from hdmm import templates
def __race1():
# single race only, two or more races aggregated
# binary encoding: 1 indicates particular race is checked
race1 = np.zeros((7, 64))
for i in range(6):
race1[i, 2**i] = 1.0
race1[6,:] = 1.0 - race1[0:6].sum(axis=0)
return Matrix(race1)
def __race2():
# all settings of race, k races for 1..6, two or more races
race2 = np.zeros((63+6+1, 64))
for i in range(1,64):
race2[i-1,i] = 1.0
ct = bin(i).count('1') # number of races
race2[62+ct, i] = 1.0
race2[63+6] = race2[64:63+6].sum(axis=0) # two or more races
return Matrix(race2)
def __white():
white = np.zeros((1, 64))
white[0,1] = 1.0
return Matrix(white)
def __isHispanic():
return Matrix(np.array([[1,0]]))
def __notHispanic():
return Matrix(np.array([[0,1]]))
def __adult():
adult = np.zeros((1, 115))
adult[0, 18:] = 1.0
return Matrix(adult)
def __age1():
ranges = [0, 5, 10, 15, 18, 20, 21, 22, 25, 30, 35, 40, 45, 50, 55, 60, 62, 65, 67, 70, 75, 80, 85, 115]
age1 = np.zeros((len(ranges)-1, 115))
for i in range(age1.shape[0]):
age1[i, ranges[i]:ranges[i+1]] = 1.0
return Matrix(age1)
def __age2():
age2 = np.zeros((20, 115))
age2[:20,:20] = np.eye(20)
return Matrix(age2)
def __age3():
# more range queries on age
age3 = np.zeros((103, 115))
age3[:100, :100] = np.eye(100)
age3[100,100:105] = 1.0
age3[101,105:110] = 1.0
age3[102,110:] = 1.0
return Matrix(age3)
def CensusSF1(geography=False):
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P3a = Kron([Total(2), Total(2), __race1(), Total(17), Total(115)])
P3b = P1
P4a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P4b = P1
P5a = Kron([Total(2), Identity(2), __race1(), Total(17), Total(115)])
P5b = Kron([Total(2), IdentityTotal(2), Total(64), Total(17), Total(115)])
P8a = Kron([Total(2), Total(2), __race2(), Total(17), Total(115)])
P8b = P1
P9a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P9b = Kron([Total(2), __notHispanic(), __race2(), Total(17), Total(115)])
P9c = P1
P10a = Kron([Total(2), Total(2), __race2(), Total(17), __adult()])
P10b = Kron([Total(2), Total(2), Total(64), Total(17), __adult()])
P11a = Kron([Total(2), Identity(2), Total(64), Total(17), __adult()])
P11b = Kron([Total(2), __notHispanic(), __race2(), Total(17), __adult()])
P11c = P10b
P12a = Kron([Identity(2), Total(2), Total(64), Total(17), __age1()])
P12b = Kron([IdentityTotal(2), Total(2), Total(64), Total(17), Total(115)])
P12_a = Kron([Identity(2), Total(2), __race1(), Total(17), __age1()])
P12_b = Kron([IdentityTotal(2), Total(2), __race1(), Total(17), Total(115)])
P12_c = Kron([Identity(2), __isHispanic(), Total(64), Total(17), __age1()])
P12_d = Kron([IdentityTotal(2), __isHispanic(), Total(64), Total(17), Total(115)])
P12_e = Kron([Identity(2), __notHispanic(), __white(), Total(17), __age1()])
P12_f = Kron([IdentityTotal(2), __notHispanic(), __white(), Total(17), Total(115)])
PCT12a = Kron([Identity(2), Total(2), Total(64), Total(17), __age3()])
PCT12b = P12b
PCT12_a = Kron([Identity(2), Total(2), __race1(), Total(17), __age3()])
PCT12_b = Kron([IdentityTotal(2), Total(2), __race1(), Total(17), Total(115)])
PCT12_c = Kron([Identity(2), __isHispanic(), Total(64), Total(17), __age3()])
PCT12_d = Kron([IdentityTotal(2), __isHispanic(), Total(64), Total(17), Total(115)])
PCT12_e = Kron([Identity(2), __notHispanic(), __race1(), Total(17), __age3()])
PCT12_f = Kron([IdentityTotal(2), __notHispanic(), __race1(), Total(17), Total(115)])
workloads = [P1,P3a,P3b,P4a,P4b,P5a,P5b,P8a,P8b,P9a,P9b,P9c,P10a,P10b,P11a,P11b,P11c,P12a,P12b,P12_a,P12_b,P12_c,P12_d,P12_e,P12_f,PCT12a,PCT12b,PCT12_a,PCT12_b,PCT12_c,PCT12_d,PCT12_e,PCT12_f]
if geography:
M = IdentityTotal(51)
workloads = [Kron(W.workloads + [M]) for W in workloads]
return Concat(workloads)
def CensusSF1Big(geography=True, reallybig=False):
M = IdentityTotal(51)
I = Identity(51)
T = Total(51)
sf1 = CensusSF1(reallybig and geography)
geography = geography and not reallybig
workloads = []
for sub in sf1.workloads:
matrices = [S.W for S in sub.workloads]
for combo in itertools.product(*matrices):
subs = [Matrix(q[None,:]) for q in combo]
if geography:
workloads.append(Kron(subs + [I]))
workloads.append(Kron(subs + [T]))
else:
workloads.append(Kron(subs))
return Concat(workloads)
def CensusSF1Approx():
R1 = Total(64) + __race1()
R2 = Total(64) + __race2()
A1 = Total(115) + __age1()
A3 = Total(115) + __age3()
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P3 = Kron([Total(2), Total(2), R1, Total(17), Total(115)])
P4 = Kron([Total(2), IdentityTotal(2), Total(64), Total(17), Total(115)])
P5 = Kron([Total(2), IdentityTotal(2), R1, Total(17), Total(115)])
P8 = Kron([Total(2), Total(2), R2, Total(17), Total(115)])
P9 = Kron([Total(2), IdentityTotal(2), R2, Total(17), Total(115)])
P10 = Kron([Total(2), Total(2), R2, Total(17), __adult()])
P11 = Kron([Total(2), IdentityTotal(2), R2, Total(17), __adult()])
P12 = Kron([IdentityTotal(2), IdentityTotal(2), R1, Total(17), A1])
PCT12 = Kron([IdentityTotal(2), IdentityTotal(2), R1, Total(17), A3])
return Concat([P1, P3, P4, P5, P8, P9, P10, P11, P12, PCT12])
def CensusSF1Projected():
sf1 = CensusSF1()
sub = [None]*5
for i in range(5):
sub[i] = sf1.project_and_merge([[i]])
return Kron(sub)
def CensusPL94():
P1 = Kron([Total(2), Total(2), Total(64), Total(17), Total(115)])
P8a = Kron([Total(2), Total(2), __race2(), Total(17), Total(115)])
P8b = P1
P9a = Kron([Total(2), Identity(2), Total(64), Total(17), Total(115)])
P9b = Kron([Total(2), __notHispanic(), __race2(), Total(17), Total(115)])
P9c = P1
P10a = Kron([Total(2), Total(2), __race2(), Total(17), __adult()])
P10b = Kron([Total(2), Total(2), Total(64), Total(17), __adult()])
P11a = Kron([Total(2), Identity(2), Total(64), Total(17), __adult()])
P11b = Kron([Total(2), __notHispanic(), __race2(), Total(17), __adult()])
P11c = P10b
return Concat([P8a,P8b,P9a,P9b,P9c,P10a,P10b,P11a,P11b,P11c])
def CensusSF1_split(geography=False):
sf1 = CensusSF1(geography)
return Concat(sf1.workloads[:18]), Concat(sf1.workloads[18:])
if __name__ == '__main__':
sf1 = CensusSF1()
ps = [1,1,8,1,10]
template = templates.KronPIdentity(sf1.domain, ps)
template.optimize(sf1)
strategy = [sub.A for sub in template.strategies]
print(sf1.expected_error(strategy)) # total variance of HDMM
identity = [np.eye(n) for n in sf1.domain]
identity[3] = np.ones((1,17))
print(sf1.expected_error(identity)) # total variance of identity | 0.428233 | 0.513181 |
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
'''
Mobilenet SSD device side decoding demo
The "mobilenet-ssd" model is a Single-Shot multibox Detection (SSD) network intended
to perform object detection. This model is implemented using the Caffe* framework.
For details about this model, check out the repository <https://github.com/chuanqi305/MobileNet-SSD>.
'''
# MobilenetSSD label texts
label_map = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Get argument first
mobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())
if len(sys.argv) > 1:
mobilenet_path = sys.argv[1]
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(20)
# Define a neural network that will make predictions based on the source frames
detectionNetwork = pipeline.createMobileNetDetectionNetwork()
detectionNetwork.setConfidenceThreshold(0.5)
detectionNetwork.setBlobPath(mobilenet_path)
#detectionNetwork.setNumInferenceThreads(2) # limit inference to run multiple networks simultaneously
detectionNetwork.input.setBlocking(False)
cam_rgb.preview.link(detectionNetwork.input)
# Create outputs
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
detectionNetwork.passthrough.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("rgb_nn")
detectionNetwork.out.link(xout_nn.input)
# Define 2 more sources
cam_right = pipeline.createMonoCamera()
cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
cam_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
cam_left = pipeline.createMonoCamera()
cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT)
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
# resize the mono images to 300x300 for the nn
manip_right = pipeline.createImageManip()
manip_right.initialConfig.setResize(300, 300)
manip_right.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
cam_right.out.link(manip_right.inputImage)
manip_left = pipeline.createImageManip()
manip_left.initialConfig.setResize(300, 300)
manip_left.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
cam_left.out.link(manip_left.inputImage)
# 2 more networks
detection_right = pipeline.createMobileNetDetectionNetwork()
detection_right.setConfidenceThreshold(0.5)
detection_right.setBlobPath(mobilenet_path)
detection_right.input.setBlocking(False)
manip_right.out.link(detection_right.input)
detection_left = pipeline.createMobileNetDetectionNetwork()
detection_left.setConfidenceThreshold(0.5)
detection_left.setBlobPath(mobilenet_path)
detection_left.input.setBlocking(False)
manip_left.out.link(detection_left.input)
# 4 more ouputs (image and data)
xout_right = pipeline.createXLinkOut()
xout_right.setStreamName("right")
detection_right.passthrough.link(xout_right.input)
xout_left = pipeline.createXLinkOut()
xout_left.setStreamName("left")
detection_left.passthrough.link(xout_left.input)
xout_nn_right = pipeline.createXLinkOut()
xout_nn_right.setStreamName("right_nn")
detection_right.out.link(xout_nn_right.input)
xout_nn_left = pipeline.createXLinkOut()
xout_nn_left.setStreamName("left_nn")
detection_left.out.link(xout_nn_left.input)
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
q_nn = device.getOutputQueue(name="rgb_nn", maxSize=4, blocking=False)
q_right = device.getOutputQueue(name="right", maxSize=4, blocking=False)
q_nn_right = device.getOutputQueue(name="right_nn", maxSize=4, blocking=False)
q_left = device.getOutputQueue(name="left", maxSize=4, blocking=False)
q_nn_left = device.getOutputQueue(name="left_nn", maxSize=4, blocking=False)
frame = None
frame_r = None
frame_l = None
bboxes = []
bboxes_r = []
bboxes_l = []
while True:
in_rgb = q_rgb.get()
in_nn = q_nn.get()
in_right = q_right.get()
in_nn_right = q_nn_right.get()
in_left = q_left.get()
in_nn_left = q_nn_left.get()
if in_rgb is not None:
shape = (3, in_rgb.getHeight(), in_rgb.getWidth())
frame = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame = np.ascontiguousarray(frame)
if in_nn is not None:
bboxes = in_nn.detections
if in_right is not None:
shape = (3, in_right.getHeight(), in_right.getWidth())
frame_r = in_right.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame_r = np.ascontiguousarray(frame_r)
if in_nn_right is not None:
bboxes_r = in_nn_right.detections
if in_left is not None:
shape = (3, in_left.getHeight(), in_left.getWidth())
frame_l = in_left.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame_l = np.ascontiguousarray(frame_l)
if in_nn_left is not None:
bboxes_l = in_nn_left.detections
color = (255, 255, 255)
if frame is not None:
height = frame.shape[0]
width = frame.shape[1]
for bbox in bboxes:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
#cv2.putText(frame, "{:.2f}".format(bbox.confidence*100), (x1 + 10, y1 + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("rgb", frame)
if frame_r is not None:
height = frame_r.shape[0]
width = frame_r.shape[1]
for bbox in bboxes_r:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame_r, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame_r, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("right", frame_r)
if frame_l is not None:
height = frame_l.shape[0]
width = frame_l.shape[1]
for bbox in bboxes_l:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame_l, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame_l, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("left", frame_l)
if cv2.waitKey(1) == ord('q'):
break | code/02_tripple_mobilenet.py |
from pathlib import Path
import sys
import cv2
import depthai as dai
import numpy as np
import time
'''
Mobilenet SSD device side decoding demo
The "mobilenet-ssd" model is a Single-Shot multibox Detection (SSD) network intended
to perform object detection. This model is implemented using the Caffe* framework.
For details about this model, check out the repository <https://github.com/chuanqi305/MobileNet-SSD>.
'''
# MobilenetSSD label texts
label_map = ["background", "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat", "chair", "cow",
"diningtable", "dog", "horse", "motorbike", "person", "pottedplant", "sheep", "sofa", "train", "tvmonitor"]
# Get argument first
mobilenet_path = str((Path(__file__).parent / Path('models/mobilenet.blob')).resolve().absolute())
if len(sys.argv) > 1:
mobilenet_path = sys.argv[1]
# Start defining a pipeline
pipeline = dai.Pipeline()
# Define a source - color camera
cam_rgb = pipeline.createColorCamera()
cam_rgb.setPreviewSize(300, 300)
cam_rgb.setInterleaved(False)
cam_rgb.setFps(20)
# Define a neural network that will make predictions based on the source frames
detectionNetwork = pipeline.createMobileNetDetectionNetwork()
detectionNetwork.setConfidenceThreshold(0.5)
detectionNetwork.setBlobPath(mobilenet_path)
#detectionNetwork.setNumInferenceThreads(2) # limit inference to run multiple networks simultaneously
detectionNetwork.input.setBlocking(False)
cam_rgb.preview.link(detectionNetwork.input)
# Create outputs
xout_rgb = pipeline.createXLinkOut()
xout_rgb.setStreamName("rgb")
detectionNetwork.passthrough.link(xout_rgb.input)
xout_nn = pipeline.createXLinkOut()
xout_nn.setStreamName("rgb_nn")
detectionNetwork.out.link(xout_nn.input)
# Define 2 more sources
cam_right = pipeline.createMonoCamera()
cam_right.setBoardSocket(dai.CameraBoardSocket.RIGHT)
cam_right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
cam_left = pipeline.createMonoCamera()
cam_left.setBoardSocket(dai.CameraBoardSocket.LEFT)
cam_left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P)
# resize the mono images to 300x300 for the nn
manip_right = pipeline.createImageManip()
manip_right.initialConfig.setResize(300, 300)
manip_right.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
cam_right.out.link(manip_right.inputImage)
manip_left = pipeline.createImageManip()
manip_left.initialConfig.setResize(300, 300)
manip_left.initialConfig.setFrameType(dai.RawImgFrame.Type.BGR888p)
cam_left.out.link(manip_left.inputImage)
# 2 more networks
detection_right = pipeline.createMobileNetDetectionNetwork()
detection_right.setConfidenceThreshold(0.5)
detection_right.setBlobPath(mobilenet_path)
detection_right.input.setBlocking(False)
manip_right.out.link(detection_right.input)
detection_left = pipeline.createMobileNetDetectionNetwork()
detection_left.setConfidenceThreshold(0.5)
detection_left.setBlobPath(mobilenet_path)
detection_left.input.setBlocking(False)
manip_left.out.link(detection_left.input)
# 4 more ouputs (image and data)
xout_right = pipeline.createXLinkOut()
xout_right.setStreamName("right")
detection_right.passthrough.link(xout_right.input)
xout_left = pipeline.createXLinkOut()
xout_left.setStreamName("left")
detection_left.passthrough.link(xout_left.input)
xout_nn_right = pipeline.createXLinkOut()
xout_nn_right.setStreamName("right_nn")
detection_right.out.link(xout_nn_right.input)
xout_nn_left = pipeline.createXLinkOut()
xout_nn_left.setStreamName("left_nn")
detection_left.out.link(xout_nn_left.input)
# Pipeline defined, now the device is connected to
with dai.Device(pipeline) as device:
# Start pipeline
device.startPipeline()
# Output queues will be used to get the rgb frames and nn data from the outputs defined above
q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
q_nn = device.getOutputQueue(name="rgb_nn", maxSize=4, blocking=False)
q_right = device.getOutputQueue(name="right", maxSize=4, blocking=False)
q_nn_right = device.getOutputQueue(name="right_nn", maxSize=4, blocking=False)
q_left = device.getOutputQueue(name="left", maxSize=4, blocking=False)
q_nn_left = device.getOutputQueue(name="left_nn", maxSize=4, blocking=False)
frame = None
frame_r = None
frame_l = None
bboxes = []
bboxes_r = []
bboxes_l = []
while True:
in_rgb = q_rgb.get()
in_nn = q_nn.get()
in_right = q_right.get()
in_nn_right = q_nn_right.get()
in_left = q_left.get()
in_nn_left = q_nn_left.get()
if in_rgb is not None:
shape = (3, in_rgb.getHeight(), in_rgb.getWidth())
frame = in_rgb.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame = np.ascontiguousarray(frame)
if in_nn is not None:
bboxes = in_nn.detections
if in_right is not None:
shape = (3, in_right.getHeight(), in_right.getWidth())
frame_r = in_right.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame_r = np.ascontiguousarray(frame_r)
if in_nn_right is not None:
bboxes_r = in_nn_right.detections
if in_left is not None:
shape = (3, in_left.getHeight(), in_left.getWidth())
frame_l = in_left.getData().reshape(shape).transpose(1, 2, 0).astype(np.uint8)
frame_l = np.ascontiguousarray(frame_l)
if in_nn_left is not None:
bboxes_l = in_nn_left.detections
color = (255, 255, 255)
if frame is not None:
height = frame.shape[0]
width = frame.shape[1]
for bbox in bboxes:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
#cv2.putText(frame, "{:.2f}".format(bbox.confidence*100), (x1 + 10, y1 + 40), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("rgb", frame)
if frame_r is not None:
height = frame_r.shape[0]
width = frame_r.shape[1]
for bbox in bboxes_r:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame_r, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame_r, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("right", frame_r)
if frame_l is not None:
height = frame_l.shape[0]
width = frame_l.shape[1]
for bbox in bboxes_l:
x1 = int(bbox.xmin * width)
x2 = int(bbox.xmax * width)
y1 = int(bbox.ymin * height)
y2 = int(bbox.ymax * height)
try:
label = label_map[bbox.label]
except:
label = bbox.label
cv2.putText(frame_l, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, color)
cv2.rectangle(frame_l, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX)
cv2.imshow("left", frame_l)
if cv2.waitKey(1) == ord('q'):
break | 0.633864 | 0.333965 |
import sys
import unittest
import pybullet
from qibullet import SimulationManager
from qibullet import NaoVirtual, PepperVirtual, RomeoVirtual
from qibullet import Camera, CameraRgb, CameraDepth, CameraResolution
class CameraTest(unittest.TestCase):
"""
Unittests for virtual cameras (virtual class, don't use directly)
"""
def test_camera_robot_model(self):
"""
Ensure that the robot model of the camera and the model of the robot
are the same
"""
for camera in CameraTest.robot.camera_dict.values():
self.assertEqual(
camera.getRobotModel(),
CameraTest.robot.getRobotModel())
def test_subscribe_camera(self):
"""
Test subscribing to each of Pepper's cameras
"""
physics_client = CameraTest.client
# Test wrong camera ID for subscription
self.assertIsNone(CameraTest.robot.subscribeCamera(-3))
# Test wrong camera ID for unsubscription, and try to unsubscribe from
# an already unsubscribed camera
handle = CameraTest.robot.subscribeCamera(
list(CameraTest.robot.camera_dict.keys())[0])
self.assertFalse(CameraTest.robot.unsubscribeCamera(-3))
camera = CameraTest.robot.getCamera(handle)
CameraTest.robot.unsubscribeCamera(handle)
self.assertFalse(camera.unsubscribe())
# Test subscribing / unsubscribing
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Check if the provided handle corresponds to the id of the camera
# object
self.assertEqual(handle, id(camera_obj))
# Check if the camera and the associated handle have been correctly
# storred in the handles dict
self.assertIn(handle, Camera._getCameraHandlesDict())
try:
self.assertEqual(
handle,
id(Camera._getCameraFromHandle(handle)))
except KeyError:
# Should be able to retrieve the camera associated to the
# handle without throwing any key error
self.assertTrue(False)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
self.assertNotIn(handle, Camera._getCameraHandlesDict())
# Test camera subscription with invalid resolution
with self.assertRaises(pybullet.error):
CameraTest.robot.subscribeCamera(
list(CameraTest.robot.camera_dict.keys())[0],
resolution="invalid")
def test_get_camera_id(self):
"""
Test the getCameraId method
"""
for camera_id in CameraTest.robot.camera_dict.keys():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Check the id (PepperVirtual.ID_CAMERA_TOP for instance) of a
# subscribed camera
self.assertEqual(
camera_id,
CameraTest.robot.getCamera(handle).getCameraId())
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCamera(handle)
def test_get_camera_resolution(self):
"""
Test the getCameraResolution method
"""
# Test the CameraResolution equality
self.assertEqual(Camera.K_VGA, Camera.K_VGA)
self.assertNotEqual(Camera.K_QVGA, Camera.K_QQVGA)
# Testing that the retrieved camera frames correspond to the required
# image resolution
for resolution in [Camera.K_VGA, Camera.K_QVGA, Camera.K_QQVGA]:
for camera_id in CameraTest.robot.camera_dict.keys():
handle = CameraTest.robot.subscribeCamera(
camera_id,
resolution=resolution)
# Check that the camera frame's width and height correspond to
# the required resolution
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[1],
resolution.width)
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[0],
resolution.height)
# Check that the CameraResolution object passed when
# subscribing corresponds to the resolution of the camera
self.assertEqual(
resolution,
CameraTest.robot.getCameraResolution(handle))
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraResolution(handle)
def test_get_camera_link(self):
"""
Test the getCameraLink method
"""
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Test the getCameraLink method of the Camera class
self.assertEqual(
camera_obj.camera_link,
camera_obj.getCameraLink())
# Test the getCameraLink method of the RobotVirtual class
self.assertEqual(
camera_obj.camera_link,
CameraTest.robot.getCameraLink(handle))
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraLink(handle)
def test_is_active(self):
"""
Test the isActive method
"""
handles = list()
# Check that the subscribed cameras are active
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handles.append(CameraTest.robot.subscribeCamera(camera_id))
self.assertTrue(camera_obj.isActive())
# Checked that the unsubscribed cameras are inactive
for handle in handles:
camera_obj = CameraTest.robot.getCamera(handle)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
self.assertFalse(camera_obj.isActive())
# Ensure that waiting for a correct image format when the camera is
# unsubscribed won't block the program
camera_obj._waitForCorrectImageFormat()
def test_camera_channels(self):
"""
Test the number of channels for each camera.
"""
for camera_id in CameraTest.robot.camera_dict.keys():
if camera_id == PepperVirtual.ID_CAMERA_DEPTH or\
camera_id == RomeoVirtual.ID_CAMERA_DEPTH:
# A depth image should have a shape of 2
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertEqual(
len(CameraTest.robot.getCameraFrame(handle).shape),
2)
else:
# An RGB image should have 3 channels
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[2],
3)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraFrame(handle)
def test_invalid_fov(self):
"""
Test the FOV setter of the camera class
"""
try:
dummy_camera = Camera(
None,
None,
None,
"no valid fov",
["still not"])
self.assertTrue(True)
except Exception:
self.assertTrue(
False,
"An invalid FOV should not raise an exception")
def test_get_camera_intrinsics(self):
"""
Test the getter method for the camera intrinsics
"""
dummy_camera = Camera(None, None, None, None, None)
self.assertIsNone(dummy_camera._getCameraIntrinsics())
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertIsInstance(camera_obj._getCameraIntrinsics(), list)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
class PepperCameraTest(CameraTest):
"""
Unittests for Pepper virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the Pepper virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnPepper(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self)
class NaoCameraTest(CameraTest):
"""
Unittests for Nao virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the NAO virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnNao(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self)
class RomeoCameraTest(CameraTest):
"""
Unittests for Romeo virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the Romeo virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnRomeo(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self) | tests/camera_test.py | import sys
import unittest
import pybullet
from qibullet import SimulationManager
from qibullet import NaoVirtual, PepperVirtual, RomeoVirtual
from qibullet import Camera, CameraRgb, CameraDepth, CameraResolution
class CameraTest(unittest.TestCase):
"""
Unittests for virtual cameras (virtual class, don't use directly)
"""
def test_camera_robot_model(self):
"""
Ensure that the robot model of the camera and the model of the robot
are the same
"""
for camera in CameraTest.robot.camera_dict.values():
self.assertEqual(
camera.getRobotModel(),
CameraTest.robot.getRobotModel())
def test_subscribe_camera(self):
"""
Test subscribing to each of Pepper's cameras
"""
physics_client = CameraTest.client
# Test wrong camera ID for subscription
self.assertIsNone(CameraTest.robot.subscribeCamera(-3))
# Test wrong camera ID for unsubscription, and try to unsubscribe from
# an already unsubscribed camera
handle = CameraTest.robot.subscribeCamera(
list(CameraTest.robot.camera_dict.keys())[0])
self.assertFalse(CameraTest.robot.unsubscribeCamera(-3))
camera = CameraTest.robot.getCamera(handle)
CameraTest.robot.unsubscribeCamera(handle)
self.assertFalse(camera.unsubscribe())
# Test subscribing / unsubscribing
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Check if the provided handle corresponds to the id of the camera
# object
self.assertEqual(handle, id(camera_obj))
# Check if the camera and the associated handle have been correctly
# storred in the handles dict
self.assertIn(handle, Camera._getCameraHandlesDict())
try:
self.assertEqual(
handle,
id(Camera._getCameraFromHandle(handle)))
except KeyError:
# Should be able to retrieve the camera associated to the
# handle without throwing any key error
self.assertTrue(False)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
self.assertNotIn(handle, Camera._getCameraHandlesDict())
# Test camera subscription with invalid resolution
with self.assertRaises(pybullet.error):
CameraTest.robot.subscribeCamera(
list(CameraTest.robot.camera_dict.keys())[0],
resolution="invalid")
def test_get_camera_id(self):
"""
Test the getCameraId method
"""
for camera_id in CameraTest.robot.camera_dict.keys():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Check the id (PepperVirtual.ID_CAMERA_TOP for instance) of a
# subscribed camera
self.assertEqual(
camera_id,
CameraTest.robot.getCamera(handle).getCameraId())
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCamera(handle)
def test_get_camera_resolution(self):
"""
Test the getCameraResolution method
"""
# Test the CameraResolution equality
self.assertEqual(Camera.K_VGA, Camera.K_VGA)
self.assertNotEqual(Camera.K_QVGA, Camera.K_QQVGA)
# Testing that the retrieved camera frames correspond to the required
# image resolution
for resolution in [Camera.K_VGA, Camera.K_QVGA, Camera.K_QQVGA]:
for camera_id in CameraTest.robot.camera_dict.keys():
handle = CameraTest.robot.subscribeCamera(
camera_id,
resolution=resolution)
# Check that the camera frame's width and height correspond to
# the required resolution
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[1],
resolution.width)
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[0],
resolution.height)
# Check that the CameraResolution object passed when
# subscribing corresponds to the resolution of the camera
self.assertEqual(
resolution,
CameraTest.robot.getCameraResolution(handle))
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraResolution(handle)
def test_get_camera_link(self):
"""
Test the getCameraLink method
"""
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
# Test the getCameraLink method of the Camera class
self.assertEqual(
camera_obj.camera_link,
camera_obj.getCameraLink())
# Test the getCameraLink method of the RobotVirtual class
self.assertEqual(
camera_obj.camera_link,
CameraTest.robot.getCameraLink(handle))
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraLink(handle)
def test_is_active(self):
"""
Test the isActive method
"""
handles = list()
# Check that the subscribed cameras are active
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handles.append(CameraTest.robot.subscribeCamera(camera_id))
self.assertTrue(camera_obj.isActive())
# Checked that the unsubscribed cameras are inactive
for handle in handles:
camera_obj = CameraTest.robot.getCamera(handle)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
self.assertFalse(camera_obj.isActive())
# Ensure that waiting for a correct image format when the camera is
# unsubscribed won't block the program
camera_obj._waitForCorrectImageFormat()
def test_camera_channels(self):
"""
Test the number of channels for each camera.
"""
for camera_id in CameraTest.robot.camera_dict.keys():
if camera_id == PepperVirtual.ID_CAMERA_DEPTH or\
camera_id == RomeoVirtual.ID_CAMERA_DEPTH:
# A depth image should have a shape of 2
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertEqual(
len(CameraTest.robot.getCameraFrame(handle).shape),
2)
else:
# An RGB image should have 3 channels
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertEqual(
CameraTest.robot.getCameraFrame(handle).shape[2],
3)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
with self.assertRaises(pybullet.error):
CameraTest.robot.getCameraFrame(handle)
def test_invalid_fov(self):
"""
Test the FOV setter of the camera class
"""
try:
dummy_camera = Camera(
None,
None,
None,
"no valid fov",
["still not"])
self.assertTrue(True)
except Exception:
self.assertTrue(
False,
"An invalid FOV should not raise an exception")
def test_get_camera_intrinsics(self):
"""
Test the getter method for the camera intrinsics
"""
dummy_camera = Camera(None, None, None, None, None)
self.assertIsNone(dummy_camera._getCameraIntrinsics())
for camera_id, camera_obj in CameraTest.robot.camera_dict.items():
handle = CameraTest.robot.subscribeCamera(camera_id)
self.assertIsInstance(camera_obj._getCameraIntrinsics(), list)
self.assertTrue(CameraTest.robot.unsubscribeCamera(handle))
class PepperCameraTest(CameraTest):
"""
Unittests for Pepper virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the Pepper virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnPepper(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self)
class NaoCameraTest(CameraTest):
"""
Unittests for Nao virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the NAO virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnNao(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self)
class RomeoCameraTest(CameraTest):
"""
Unittests for Romeo virtual cameras
"""
@classmethod
def setUpClass(cls):
"""
Launches a simulation and spawns the Romeo virtual robot
"""
CameraTest.simulation = SimulationManager()
CameraTest.client = CameraTest.simulation.launchSimulation(
gui=False)
CameraTest.robot = CameraTest.simulation.spawnRomeo(
CameraTest.client,
spawn_ground_plane=True)
@classmethod
def tearDownClass(cls):
"""
Stops the simulation
"""
CameraTest.simulation.stopSimulation(
CameraTest.client)
def test_subscribe_camera(self):
CameraTest.test_subscribe_camera(self)
def test_get_camera_id(self):
CameraTest.test_get_camera_id(self)
def test_is_active(self):
CameraTest.test_is_active(self)
def test_get_camera_resolution(self):
CameraTest.test_get_camera_resolution(self)
def test_camera_channels(self):
CameraTest.test_camera_channels(self)
def test_get_camera_link(self):
CameraTest.test_get_camera_link(self)
def test_invalid_fov(self):
CameraTest.test_invalid_fov(self)
def test_get_camera_intrinsics(self):
CameraTest.test_get_camera_intrinsics(self) | 0.705176 | 0.862728 |
import functools
import itertools
import operator
import unittest
import dual
@functools.lru_cache(maxsize=None)
def stirling(n, k):
# [[https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind]]
if n == 0 and k == 0:
return 1
elif n == 0 or k == 0:
return 0
else:
return stirling(n-1, k-1) + stirling(n-1, k) * k
class IterationTest(unittest.TestCase):
max_ball_count = 12
max_box_count = 50
def test_stirling(self):
for n in range(self.max_ball_count+1):
for k in range(self.max_box_count+1):
d = dual.iter_stirling(range(n), k)
if (n > 0 and k == 0) or n < k:
with self.assertRaises(StopIteration):
next(d)
else:
r = set()
for d in d:
self.assertEqual(len(d), k)
self.assertTrue(all(d))
self.assertCountEqual(itertools.chain.from_iterable(d), range(n))
r.add(tuple(map(tuple, d)))
self.assertEqual(len(r), stirling(n, k))
class DualTest:
@staticmethod
def format_param(*x):
return 'parameters are {!r}'.format(x)
class DualExactTest(DualTest):
def test_add_asso(self):
for x, y, z in self.sample(3):
self.assertEqual((x+y)+z, x+(y+z), self.format_param(x, y, z))
def test_add_comm(self):
for x, y in self.sample(2, allow_repeats=False):
self.assertEqual(x+y, y+x, self.format_param(x, y))
def test_add_iden(self):
for x, in self.sample():
self.assertEqual(x+0, x, self.format_param(x))
self.assertEqual(0+x, x, self.format_param(x))
def test_add_sub_inv(self):
for x, y in self.sample(2):
self.assertEqual(x+y-y, x, self.format_param(x, y))
self.assertEqual(x-y+y, x, self.format_param(x, y))
def test_mul_asso(self):
for x, y, z in self.sample(3):
self.assertEqual((x*y)*z, x*(y*z), self.format_param(x, y, z))
def test_mul_comm(self):
for x, y in self.sample(2, allow_repeats=False):
self.assertEqual(x*y, y*x, self.format_param(x, y))
def test_mul_iden(self):
for x, in self.sample():
self.assertEqual(x*1, x, self.format_param(x))
self.assertEqual(1*x, x, self.format_param(x))
def test_truediv_zero(self):
for x, y in self.sample(2):
if y.a == 0:
with self.assertRaises(ZeroDivisionError):
x/y
def test_mul_truediv_inv(self):
for x, y in self.sample(2):
if y.a != 0:
self.assertEqual(x*y/y, x, self.format_param(x, y))
self.assertEqual(x/y*y, x, self.format_param(x, y))
def test_add_mul_dist(self):
for x, y, z in self.sample(3):
self.assertEqual(x*(y+z), x*y+x*z, self.format_param(x, y, z))
self.assertEqual((y+z)*x, y*x+z*x, self.format_param(x, y, z))
def test_pow_zero(self):
for x, in self.sample():
self.assertEqual(x**0, 1, self.format_param(x))
def test_pow_pos(self):
for x, in self.sample():
y = 1
for n in range(1, self.max_pow+1):
y *= x
self.assertEqual(x**n, y, self.format_param(x, n))
def test_pow_neg(self):
for x, in self.sample():
if x.a != 0:
y = 1
for n in range(1, self.max_pow+1):
y /= x
self.assertEqual(x**-n, y, self.format_param(x, n))
def test_exp_neg(self):
for x, in self.sample():
self.assertEqual(dual.exp(-x), 1/dual.exp(x), self.format_param(x))
def test_log_zero(self):
for x, in self.sample():
if x.a == 0:
with self.assertRaises(ValueError):
dual.log(x)
def test_exp_log_inv(self):
for x, in self.sample():
self.assert_inv(dual.exp, dual.log, x)
def test_sin_sym(self):
for x, in self.sample():
self.assertEqual(dual.sin(-x), -dual.sin(x), self.format_param(x))
def test_sin_asin_inv(self):
for x, in self.sample():
self.assert_inv(dual.sin, dual.asin, x)
def test_cos_sym(self):
for x, in self.sample():
self.assertEqual(dual.cos(-x), dual.cos(x), self.format_param(x))
def test_cos_acos_inv(self):
for x, in self.sample():
self.assert_inv(dual.cos, dual.acos, x)
def test_tan_sym(self):
for x, in self.sample():
self.assertEqual(dual.tan(-x), -dual.tan(x), self.format_param(x))
def test_tan_atan_inv(self):
for x, in self.sample():
self.assert_inv(dual.tan, dual.atan, x)
def test_sin_cos_thm(self):
for x, in self.sample():
self.assertEqual(
dual.cos(x)**2 + dual.sin(x)**2, 1, self.format_param(x))
def test_sin_cos_tan_thm(self):
for x, in self.sample():
self.assertEqual(
dual.sin(x) / dual.cos(x), dual.tan(x), self.format_param(x))
def test_sinh_sym(self):
for x, in self.sample():
self.assertEqual(dual.sinh(-x), -dual.sinh(x), self.format_param(x))
def test_sinh_asinh_inv(self):
for x, in self.sample():
self.assert_inv(dual.sinh, dual.asinh, x)
def test_cosh_sym(self):
for x, in self.sample():
self.assertEqual(dual.cosh(-x), dual.cosh(x), self.format_param(x))
def test_cosh_acosh_inv(self):
for x, in self.sample():
self.assert_inv(dual.cosh, dual.acosh, x)
def test_tanh_sym(self):
for x, in self.sample():
self.assertEqual(dual.tanh(-x), -dual.tanh(x), self.format_param(x))
def test_tanh_atanh_inv(self):
for x, in self.sample():
self.assert_inv(dual.tanh, dual.atanh, x)
def test_sinh_cosh_thm(self):
for x, in self.sample():
self.assertEqual(
dual.cosh(x)**2 - dual.sinh(x)**2, 1, self.format_param(x))
def test_sinh_cosh_tanh_thm(self):
for x, in self.sample():
self.assertEqual(
dual.sinh(x) / dual.cosh(x), dual.tanh(x), self.format_param(x))
def assert_inv(self, f, i, x):
def collapse_dual(x):
return dual.Dual(self.collapse_scalar(x.a), x.b)
y = f(x)
if self.valid_for(i, y):
self.assertEqual(collapse_dual(i(y)), x)
if self.valid_for(i, x):
self.assertEqual(collapse_dual(f(i(x))), x)
try:
import sympy
except ImportError:
has_sympy = False
else:
has_sympy = True
@unittest.skipUnless(has_sympy, 'requires SymPy')
class DualSymbolTest(DualExactTest, unittest.TestCase):
unit_count = 3
max_pow = 16
@classmethod
def setUpClass(cls):
cls.duals = []
term_count = 1 << cls.unit_count
def make_dual(symbol):
head, *tail = sympy.symbols('{}:{}'.format(symbol, term_count))
return dual.Dual(head, dict(enumerate(tail, 1)))
for symbol in 'abc':
cls.duals.append(make_dual(symbol))
cls.zero = make_dual('z')
cls.zero.a = 0
def setUp(self):
dual.set_scalar('symbol')
def tearDown(self):
dual.set_scalar('real')
def assertEqual(self, x, y, msg=None):
x -= y
x.a = sympy.simplify(x.a)
x.b = {k: sympy.simplify(v) for k, v in x.b.items()}
super().assertEqual(x, 0, msg)
def test_pow_inv(self):
for x, y in self.sample(2):
if x.a != 0 and y.a != 0:
p, _ = sympy.posify(x.a)
x = dual.Dual(p, x.b)
p, _ = sympy.posify(y.a)
y = dual.Dual(p, y.b)
self.assertEqual((x**y)**(1/y), x, self.format_param(x, y))
self.assertEqual((x**(1/y))**y, x, self.format_param(x, y))
def test_log_rcp(self):
for x, in self.sample():
if x.a != 0:
p, _ = sympy.posify(x.a)
x = dual.Dual(p, x.b)
self.assertEqual(dual.log(1/x), -dual.log(x), self.format_param(x))
def test_asin_log(self):
for x, in self.sample():
y = dual.asin(x)
y.a = y.a.subs(
sympy.asin(x.a), self.asin_to_log(sympy.sqrt, sympy.log, x.a))
z = self.asin_to_log(dual.sqrt, dual.log, x)
self.assertEqual(y, z, self.format_param(x))
def test_acos_log(self):
for x, in self.sample():
y = dual.acos(x)
y.a = y.a.subs(
sympy.acos(x.a), self.acos_to_log(sympy.sqrt, sympy.log, x.a))
z = self.acos_to_log(dual.sqrt, dual.log, x)
self.assertEqual(y, z, self.format_param(x))
def sample(self, n=1, *, allow_repeats=True):
yield self.duals[:n]
for i in range(n):
yield [self.duals[j] if i != j else self.zero for j in range(n)]
@staticmethod
def valid_for(i, x):
if i is dual.log:
return x.a != 0
@staticmethod
def collapse_scalar(x):
return sympy.simplify(x, inverse=True)
@staticmethod
def asin_to_log(sqrt, log, x):
from sympy import I
return -I * log(sqrt(1-x**2) + I*x)
@staticmethod
def acos_to_log(sqrt, log, x):
from sympy import I
return -I * log(I*sqrt(1-x**2) + x)
import math
import random
import sys
epsilon = sys.float_info.epsilon
sqrt_epsilon = math.sqrt(epsilon)
class DualNumberTest(DualTest):
pure_count = 4
unit_count = 32
unit_zero_frac = 1/8
mix_count = 32
max_fctr_count = 2
max_term_count = 4
mix_zero_frac = 1/8
@classmethod
def setUpClass(cls):
pures = [cls.zero, cls.one]
pures += [
dual.Dual(cls.random(), {})
for _ in range(cls.pure_count)]
units = [
dual.Dual.new(cls.random(), cls.random())
for _ in range(cls.unit_count)]
unit_keys = list(set(k for x in units for k in x.b.keys()))
mixes = []
for _ in range(cls.mix_count):
while True:
fctr_count = random.randint(1, cls.max_fctr_count)
term_count = random.randint(1, cls.max_term_count)
if fctr_count != 1 or term_count != 1:
break
mixes.append(dual.Dual(
cls.random(),
{functools.reduce(operator.or_, random.sample(unit_keys, fctr_count)):
cls.random() for _ in range(term_count)}))
for x in random.sample(units, round(cls.unit_count * cls.unit_zero_frac)):
x.a = 0
for x in random.sample(mixes, round(cls.mix_count * cls.mix_zero_frac)):
x.a = 0
cls.duals = pures + units + mixes
def sample(self, n=1, *, allow_repeats=True):
if allow_repeats:
return itertools.product(self.duals, repeat=n)
else:
return itertools.combinations(self.duals, n)
class DualFloatTest(DualNumberTest):
series_term_count = 32
series_term_max = series_term_count * epsilon**(1/series_term_count) / math.e
def assertAlmostEqual(self, x, y, msg=None):
if not dual.isclose(x, y, abs_tol=sqrt_epsilon):
std = '{!r} != {!r} in approximate sense'.format(x, y)
msg = self._formatMessage(msg, std)
raise self.failureException(msg)
def test_exp_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.exp(x),
sum(
x**n / math.factorial(n)
for n in range(self.series_term_count)),
self.format_param(x))
def test_sin_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.sin(x),
sum(
(-1 if n & 1 else 1) * x**(2*n+1) / math.factorial(2*n+1)
for n in range(self.series_term_count)),
self.format_param(x))
def test_cos_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.cos(x),
sum(
(-1 if n & 1 else 1) * x**(2*n) / math.factorial(2*n)
for n in range(self.series_term_count)),
self.format_param(x))
def test_sinh_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.sinh(x),
sum(
x**(2*n+1) / math.factorial(2*n+1)
for n in range(self.series_term_count)),
self.format_param(x))
def test_cosh_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.cosh(x),
sum(
x**(2*n) / math.factorial(2*n)
for n in range(self.series_term_count)),
self.format_param(x))
class DualRealTest(DualFloatTest, unittest.TestCase):
zero = dual.Dual(0, {})
one = dual.Dual(1, {})
@classmethod
def random(cls):
return (
2**random.uniform(
math.log2(sqrt_epsilon), math.log2(cls.series_term_max)) *
random.choice([-1, 1]))
class DualComplexTest(DualFloatTest, unittest.TestCase):
zero = dual.Dual(0, {})
one = dual.Dual(1, {})
def setUp(self):
dual.set_scalar('complex')
def tearDown(self):
dual.set_scalar('real')
@classmethod
def random(cls):
return complex(DualRealTest.random(), DualRealTest.random())
if __name__ == '__main__':
unittest.main() | test.py | import functools
import itertools
import operator
import unittest
import dual
@functools.lru_cache(maxsize=None)
def stirling(n, k):
# [[https://en.wikipedia.org/wiki/Stirling_numbers_of_the_second_kind]]
if n == 0 and k == 0:
return 1
elif n == 0 or k == 0:
return 0
else:
return stirling(n-1, k-1) + stirling(n-1, k) * k
class IterationTest(unittest.TestCase):
max_ball_count = 12
max_box_count = 50
def test_stirling(self):
for n in range(self.max_ball_count+1):
for k in range(self.max_box_count+1):
d = dual.iter_stirling(range(n), k)
if (n > 0 and k == 0) or n < k:
with self.assertRaises(StopIteration):
next(d)
else:
r = set()
for d in d:
self.assertEqual(len(d), k)
self.assertTrue(all(d))
self.assertCountEqual(itertools.chain.from_iterable(d), range(n))
r.add(tuple(map(tuple, d)))
self.assertEqual(len(r), stirling(n, k))
class DualTest:
@staticmethod
def format_param(*x):
return 'parameters are {!r}'.format(x)
class DualExactTest(DualTest):
def test_add_asso(self):
for x, y, z in self.sample(3):
self.assertEqual((x+y)+z, x+(y+z), self.format_param(x, y, z))
def test_add_comm(self):
for x, y in self.sample(2, allow_repeats=False):
self.assertEqual(x+y, y+x, self.format_param(x, y))
def test_add_iden(self):
for x, in self.sample():
self.assertEqual(x+0, x, self.format_param(x))
self.assertEqual(0+x, x, self.format_param(x))
def test_add_sub_inv(self):
for x, y in self.sample(2):
self.assertEqual(x+y-y, x, self.format_param(x, y))
self.assertEqual(x-y+y, x, self.format_param(x, y))
def test_mul_asso(self):
for x, y, z in self.sample(3):
self.assertEqual((x*y)*z, x*(y*z), self.format_param(x, y, z))
def test_mul_comm(self):
for x, y in self.sample(2, allow_repeats=False):
self.assertEqual(x*y, y*x, self.format_param(x, y))
def test_mul_iden(self):
for x, in self.sample():
self.assertEqual(x*1, x, self.format_param(x))
self.assertEqual(1*x, x, self.format_param(x))
def test_truediv_zero(self):
for x, y in self.sample(2):
if y.a == 0:
with self.assertRaises(ZeroDivisionError):
x/y
def test_mul_truediv_inv(self):
for x, y in self.sample(2):
if y.a != 0:
self.assertEqual(x*y/y, x, self.format_param(x, y))
self.assertEqual(x/y*y, x, self.format_param(x, y))
def test_add_mul_dist(self):
for x, y, z in self.sample(3):
self.assertEqual(x*(y+z), x*y+x*z, self.format_param(x, y, z))
self.assertEqual((y+z)*x, y*x+z*x, self.format_param(x, y, z))
def test_pow_zero(self):
for x, in self.sample():
self.assertEqual(x**0, 1, self.format_param(x))
def test_pow_pos(self):
for x, in self.sample():
y = 1
for n in range(1, self.max_pow+1):
y *= x
self.assertEqual(x**n, y, self.format_param(x, n))
def test_pow_neg(self):
for x, in self.sample():
if x.a != 0:
y = 1
for n in range(1, self.max_pow+1):
y /= x
self.assertEqual(x**-n, y, self.format_param(x, n))
def test_exp_neg(self):
for x, in self.sample():
self.assertEqual(dual.exp(-x), 1/dual.exp(x), self.format_param(x))
def test_log_zero(self):
for x, in self.sample():
if x.a == 0:
with self.assertRaises(ValueError):
dual.log(x)
def test_exp_log_inv(self):
for x, in self.sample():
self.assert_inv(dual.exp, dual.log, x)
def test_sin_sym(self):
for x, in self.sample():
self.assertEqual(dual.sin(-x), -dual.sin(x), self.format_param(x))
def test_sin_asin_inv(self):
for x, in self.sample():
self.assert_inv(dual.sin, dual.asin, x)
def test_cos_sym(self):
for x, in self.sample():
self.assertEqual(dual.cos(-x), dual.cos(x), self.format_param(x))
def test_cos_acos_inv(self):
for x, in self.sample():
self.assert_inv(dual.cos, dual.acos, x)
def test_tan_sym(self):
for x, in self.sample():
self.assertEqual(dual.tan(-x), -dual.tan(x), self.format_param(x))
def test_tan_atan_inv(self):
for x, in self.sample():
self.assert_inv(dual.tan, dual.atan, x)
def test_sin_cos_thm(self):
for x, in self.sample():
self.assertEqual(
dual.cos(x)**2 + dual.sin(x)**2, 1, self.format_param(x))
def test_sin_cos_tan_thm(self):
for x, in self.sample():
self.assertEqual(
dual.sin(x) / dual.cos(x), dual.tan(x), self.format_param(x))
def test_sinh_sym(self):
for x, in self.sample():
self.assertEqual(dual.sinh(-x), -dual.sinh(x), self.format_param(x))
def test_sinh_asinh_inv(self):
for x, in self.sample():
self.assert_inv(dual.sinh, dual.asinh, x)
def test_cosh_sym(self):
for x, in self.sample():
self.assertEqual(dual.cosh(-x), dual.cosh(x), self.format_param(x))
def test_cosh_acosh_inv(self):
for x, in self.sample():
self.assert_inv(dual.cosh, dual.acosh, x)
def test_tanh_sym(self):
for x, in self.sample():
self.assertEqual(dual.tanh(-x), -dual.tanh(x), self.format_param(x))
def test_tanh_atanh_inv(self):
for x, in self.sample():
self.assert_inv(dual.tanh, dual.atanh, x)
def test_sinh_cosh_thm(self):
for x, in self.sample():
self.assertEqual(
dual.cosh(x)**2 - dual.sinh(x)**2, 1, self.format_param(x))
def test_sinh_cosh_tanh_thm(self):
for x, in self.sample():
self.assertEqual(
dual.sinh(x) / dual.cosh(x), dual.tanh(x), self.format_param(x))
def assert_inv(self, f, i, x):
def collapse_dual(x):
return dual.Dual(self.collapse_scalar(x.a), x.b)
y = f(x)
if self.valid_for(i, y):
self.assertEqual(collapse_dual(i(y)), x)
if self.valid_for(i, x):
self.assertEqual(collapse_dual(f(i(x))), x)
try:
import sympy
except ImportError:
has_sympy = False
else:
has_sympy = True
@unittest.skipUnless(has_sympy, 'requires SymPy')
class DualSymbolTest(DualExactTest, unittest.TestCase):
unit_count = 3
max_pow = 16
@classmethod
def setUpClass(cls):
cls.duals = []
term_count = 1 << cls.unit_count
def make_dual(symbol):
head, *tail = sympy.symbols('{}:{}'.format(symbol, term_count))
return dual.Dual(head, dict(enumerate(tail, 1)))
for symbol in 'abc':
cls.duals.append(make_dual(symbol))
cls.zero = make_dual('z')
cls.zero.a = 0
def setUp(self):
dual.set_scalar('symbol')
def tearDown(self):
dual.set_scalar('real')
def assertEqual(self, x, y, msg=None):
x -= y
x.a = sympy.simplify(x.a)
x.b = {k: sympy.simplify(v) for k, v in x.b.items()}
super().assertEqual(x, 0, msg)
def test_pow_inv(self):
for x, y in self.sample(2):
if x.a != 0 and y.a != 0:
p, _ = sympy.posify(x.a)
x = dual.Dual(p, x.b)
p, _ = sympy.posify(y.a)
y = dual.Dual(p, y.b)
self.assertEqual((x**y)**(1/y), x, self.format_param(x, y))
self.assertEqual((x**(1/y))**y, x, self.format_param(x, y))
def test_log_rcp(self):
for x, in self.sample():
if x.a != 0:
p, _ = sympy.posify(x.a)
x = dual.Dual(p, x.b)
self.assertEqual(dual.log(1/x), -dual.log(x), self.format_param(x))
def test_asin_log(self):
for x, in self.sample():
y = dual.asin(x)
y.a = y.a.subs(
sympy.asin(x.a), self.asin_to_log(sympy.sqrt, sympy.log, x.a))
z = self.asin_to_log(dual.sqrt, dual.log, x)
self.assertEqual(y, z, self.format_param(x))
def test_acos_log(self):
for x, in self.sample():
y = dual.acos(x)
y.a = y.a.subs(
sympy.acos(x.a), self.acos_to_log(sympy.sqrt, sympy.log, x.a))
z = self.acos_to_log(dual.sqrt, dual.log, x)
self.assertEqual(y, z, self.format_param(x))
def sample(self, n=1, *, allow_repeats=True):
yield self.duals[:n]
for i in range(n):
yield [self.duals[j] if i != j else self.zero for j in range(n)]
@staticmethod
def valid_for(i, x):
if i is dual.log:
return x.a != 0
@staticmethod
def collapse_scalar(x):
return sympy.simplify(x, inverse=True)
@staticmethod
def asin_to_log(sqrt, log, x):
from sympy import I
return -I * log(sqrt(1-x**2) + I*x)
@staticmethod
def acos_to_log(sqrt, log, x):
from sympy import I
return -I * log(I*sqrt(1-x**2) + x)
import math
import random
import sys
epsilon = sys.float_info.epsilon
sqrt_epsilon = math.sqrt(epsilon)
class DualNumberTest(DualTest):
pure_count = 4
unit_count = 32
unit_zero_frac = 1/8
mix_count = 32
max_fctr_count = 2
max_term_count = 4
mix_zero_frac = 1/8
@classmethod
def setUpClass(cls):
pures = [cls.zero, cls.one]
pures += [
dual.Dual(cls.random(), {})
for _ in range(cls.pure_count)]
units = [
dual.Dual.new(cls.random(), cls.random())
for _ in range(cls.unit_count)]
unit_keys = list(set(k for x in units for k in x.b.keys()))
mixes = []
for _ in range(cls.mix_count):
while True:
fctr_count = random.randint(1, cls.max_fctr_count)
term_count = random.randint(1, cls.max_term_count)
if fctr_count != 1 or term_count != 1:
break
mixes.append(dual.Dual(
cls.random(),
{functools.reduce(operator.or_, random.sample(unit_keys, fctr_count)):
cls.random() for _ in range(term_count)}))
for x in random.sample(units, round(cls.unit_count * cls.unit_zero_frac)):
x.a = 0
for x in random.sample(mixes, round(cls.mix_count * cls.mix_zero_frac)):
x.a = 0
cls.duals = pures + units + mixes
def sample(self, n=1, *, allow_repeats=True):
if allow_repeats:
return itertools.product(self.duals, repeat=n)
else:
return itertools.combinations(self.duals, n)
class DualFloatTest(DualNumberTest):
series_term_count = 32
series_term_max = series_term_count * epsilon**(1/series_term_count) / math.e
def assertAlmostEqual(self, x, y, msg=None):
if not dual.isclose(x, y, abs_tol=sqrt_epsilon):
std = '{!r} != {!r} in approximate sense'.format(x, y)
msg = self._formatMessage(msg, std)
raise self.failureException(msg)
def test_exp_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.exp(x),
sum(
x**n / math.factorial(n)
for n in range(self.series_term_count)),
self.format_param(x))
def test_sin_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.sin(x),
sum(
(-1 if n & 1 else 1) * x**(2*n+1) / math.factorial(2*n+1)
for n in range(self.series_term_count)),
self.format_param(x))
def test_cos_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.cos(x),
sum(
(-1 if n & 1 else 1) * x**(2*n) / math.factorial(2*n)
for n in range(self.series_term_count)),
self.format_param(x))
def test_sinh_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.sinh(x),
sum(
x**(2*n+1) / math.factorial(2*n+1)
for n in range(self.series_term_count)),
self.format_param(x))
def test_cosh_series(self):
for x, in self.sample():
self.assertAlmostEqual(
dual.cosh(x),
sum(
x**(2*n) / math.factorial(2*n)
for n in range(self.series_term_count)),
self.format_param(x))
class DualRealTest(DualFloatTest, unittest.TestCase):
zero = dual.Dual(0, {})
one = dual.Dual(1, {})
@classmethod
def random(cls):
return (
2**random.uniform(
math.log2(sqrt_epsilon), math.log2(cls.series_term_max)) *
random.choice([-1, 1]))
class DualComplexTest(DualFloatTest, unittest.TestCase):
zero = dual.Dual(0, {})
one = dual.Dual(1, {})
def setUp(self):
dual.set_scalar('complex')
def tearDown(self):
dual.set_scalar('real')
@classmethod
def random(cls):
return complex(DualRealTest.random(), DualRealTest.random())
if __name__ == '__main__':
unittest.main() | 0.541651 | 0.510069 |
import os
import boto3
from botocore.exceptions import NoCredentialsError
from flask import Flask, redirect, Blueprint, request, url_for, render_template, flash
from flask_login import current_user, login_required, login_user, logout_user
from werkzeug.utils import secure_filename
from datetime import datetime, date, timedelta
from support.extensions import site, aws
from support.forms import LaunchForm, EditForm, make_checkbox
from support.models import User, Product, Details, Ingredient, Pick, db
bp = Blueprint('admin', __name__)
@bp.route('/admin/portal')
@login_required
def portal():
if current_user.email in site.ADMIN_LIST:
return render_template('admin/portal.html', user=current_user)
else:
return redirect('/')
@bp.route('/admin/products')
@login_required
def inventory():
db_products = Product.query.all()
products = [product for product in db_products]
product_link = "/".join([aws.S3_LINK, "products"])
return render_template('admin/inventory.html',
products=products,
product_link=product_link)
@bp.route('/admin/ingredients')
@login_required
def ingredients():
db_ingredients = Ingredient.query.all()
ingredients = [ingredient for ingredient in db_ingredients]
ingredient_link = "/".join([aws.S3_LINK, "ingredients"])
return render_template('admin/ingredients.html',
ingredients=ingredients,
ingredient_link=ingredient_link)
@bp.route('/admin/product/launch', methods=['POST', 'GET'])
@login_required
def launch():
if current_user.email in site.ADMIN_LIST:
db_ingredients = Ingredient.query.all()
ingredients = [ingredient.name for ingredient in db_ingredients]
if request.method == 'POST':
new_product = Product(
name=request.form.get('name'),
price=request.form.get('price'),
stock=request.form.get('stock')
)
db.session.add(new_product)
db.session.commit()
new_product_details = Details(
description=request.form.get('description'),
instructions=request.form.get('instructions')
)
db.session.add(new_product_details)
db.session.commit()
relevant_ingredients = request.form.getlist('ingredients')
for ingredient_name in relevant_ingredients:
ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
new_product_details.ingredients.append(ingredient)
db.session.commit()
f = request.files['image']
filename = secure_filename(f.filename)
filename = str(new_product.id) + ".jpg"
directory = 'products/' + filename
s3_resource = boto3.resource('s3')
try:
s3_resource.Bucket(aws.S3_BUCKET).put_object(Key=directory, Body=f, ACL='public-read')
flash('Your item has been listed.')
except FileNotFoundError:
flash("The file was not found by the cloud.")
except NoCredentialsError:
flash("Credentials not available")
return redirect('/admin/portal')
return render_template('admin/launch.html', user=current_user, ingredients=ingredients)
else:
return redirect('/')
@bp.route('/admin/p.<int:id>/edit', methods=['POST', 'GET'])
@login_required
def edit(id):
product = Product.query.get_or_404(id)
if current_user.email in site.ADMIN_LIST:
form = EditForm()
if form.validate_on_submit():
product.name=form.name.data
product.price=form.price.data
product.stock=form.stock.data
db.session.commit()
return redirect('/admin/portal')
return render_template('admin/edit.html', form=form, product=product, user=current_user)
else:
return redirect('/')
@bp.route('/admin/p.<int:id>/delete')
@login_required
def delete(id):
product_to_delete = Product.query.get_or_404(id)
if current_user.email in site.ADMIN_LIST:
db.session.delete(product_to_delete)
db.session.commit()
flash('This product has been deleted from the eshop.')
return redirect('/admin/portal')
else:
return redirect('/')
@bp.route('/admin/add_ingredient', methods=['POST', 'GET'])
@login_required
def add_ingredient():
if current_user.email in site.ADMIN_LIST:
if request.method == 'POST':
new_ingredient = Ingredient(
name=request.form.get('name'),
source=request.form.get('source')
)
db.session.add(new_ingredient)
db.session.commit()
f = request.files['image']
filename = secure_filename(f.filename)
f.save(os.path.join('static', filename))
return redirect('/admin/portal')
return render_template('admin/ingredient.html', user=current_user)
else:
return redirect('/')
@bp.route('/admin/delete_i.<int:id>', methods=['POST', 'GET'])
@login_required
def delete_ingredient(id):
if current_user.email in site.ADMIN_LIST:
ingredient_to_delete = Ingredient.query.get_or_404(id)
db.session.delete(ingredient_to_delete)
db.session.commit()
return redirect('/admin/portal')
else:
return redirect('/') | views/admin.py |
import os
import boto3
from botocore.exceptions import NoCredentialsError
from flask import Flask, redirect, Blueprint, request, url_for, render_template, flash
from flask_login import current_user, login_required, login_user, logout_user
from werkzeug.utils import secure_filename
from datetime import datetime, date, timedelta
from support.extensions import site, aws
from support.forms import LaunchForm, EditForm, make_checkbox
from support.models import User, Product, Details, Ingredient, Pick, db
bp = Blueprint('admin', __name__)
@bp.route('/admin/portal')
@login_required
def portal():
if current_user.email in site.ADMIN_LIST:
return render_template('admin/portal.html', user=current_user)
else:
return redirect('/')
@bp.route('/admin/products')
@login_required
def inventory():
db_products = Product.query.all()
products = [product for product in db_products]
product_link = "/".join([aws.S3_LINK, "products"])
return render_template('admin/inventory.html',
products=products,
product_link=product_link)
@bp.route('/admin/ingredients')
@login_required
def ingredients():
db_ingredients = Ingredient.query.all()
ingredients = [ingredient for ingredient in db_ingredients]
ingredient_link = "/".join([aws.S3_LINK, "ingredients"])
return render_template('admin/ingredients.html',
ingredients=ingredients,
ingredient_link=ingredient_link)
@bp.route('/admin/product/launch', methods=['POST', 'GET'])
@login_required
def launch():
if current_user.email in site.ADMIN_LIST:
db_ingredients = Ingredient.query.all()
ingredients = [ingredient.name for ingredient in db_ingredients]
if request.method == 'POST':
new_product = Product(
name=request.form.get('name'),
price=request.form.get('price'),
stock=request.form.get('stock')
)
db.session.add(new_product)
db.session.commit()
new_product_details = Details(
description=request.form.get('description'),
instructions=request.form.get('instructions')
)
db.session.add(new_product_details)
db.session.commit()
relevant_ingredients = request.form.getlist('ingredients')
for ingredient_name in relevant_ingredients:
ingredient = Ingredient.query.filter_by(name=ingredient_name).first()
new_product_details.ingredients.append(ingredient)
db.session.commit()
f = request.files['image']
filename = secure_filename(f.filename)
filename = str(new_product.id) + ".jpg"
directory = 'products/' + filename
s3_resource = boto3.resource('s3')
try:
s3_resource.Bucket(aws.S3_BUCKET).put_object(Key=directory, Body=f, ACL='public-read')
flash('Your item has been listed.')
except FileNotFoundError:
flash("The file was not found by the cloud.")
except NoCredentialsError:
flash("Credentials not available")
return redirect('/admin/portal')
return render_template('admin/launch.html', user=current_user, ingredients=ingredients)
else:
return redirect('/')
@bp.route('/admin/p.<int:id>/edit', methods=['POST', 'GET'])
@login_required
def edit(id):
product = Product.query.get_or_404(id)
if current_user.email in site.ADMIN_LIST:
form = EditForm()
if form.validate_on_submit():
product.name=form.name.data
product.price=form.price.data
product.stock=form.stock.data
db.session.commit()
return redirect('/admin/portal')
return render_template('admin/edit.html', form=form, product=product, user=current_user)
else:
return redirect('/')
@bp.route('/admin/p.<int:id>/delete')
@login_required
def delete(id):
product_to_delete = Product.query.get_or_404(id)
if current_user.email in site.ADMIN_LIST:
db.session.delete(product_to_delete)
db.session.commit()
flash('This product has been deleted from the eshop.')
return redirect('/admin/portal')
else:
return redirect('/')
@bp.route('/admin/add_ingredient', methods=['POST', 'GET'])
@login_required
def add_ingredient():
if current_user.email in site.ADMIN_LIST:
if request.method == 'POST':
new_ingredient = Ingredient(
name=request.form.get('name'),
source=request.form.get('source')
)
db.session.add(new_ingredient)
db.session.commit()
f = request.files['image']
filename = secure_filename(f.filename)
f.save(os.path.join('static', filename))
return redirect('/admin/portal')
return render_template('admin/ingredient.html', user=current_user)
else:
return redirect('/')
@bp.route('/admin/delete_i.<int:id>', methods=['POST', 'GET'])
@login_required
def delete_ingredient(id):
if current_user.email in site.ADMIN_LIST:
ingredient_to_delete = Ingredient.query.get_or_404(id)
db.session.delete(ingredient_to_delete)
db.session.commit()
return redirect('/admin/portal')
else:
return redirect('/') | 0.308086 | 0.043244 |
import os
import xlsxwriter
import time
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
from classes.quiz import Quiz
from classes.save import Save
from classes.result import Overall_Results, Result
from classes.answer import Picture_Answer, Text_Answer, Answer
from classes.school import School, Student, Year_Group
from classes.question import Picture_Question, Text_Question
LOAD_FILE = "data.quiz"
def clear_screen():
"""Clears the screen
"""
os.system('cls' if os.name == 'nt' else 'clear')
def save_data(save_file):
"""Saves the quiz data to a file
Arguments:
save_file {Save} -- A save object containing all of the quiz's data
"""
# Uses pickle to dump the object into a byte array and then into a file
pickle.dump(save_file, open(LOAD_FILE, "wb"))
def main():
"""The main function that is run when the file is run
"""
# If there is a load file
if os.path.exists(LOAD_FILE):
# Load it
save = pickle.load(open(LOAD_FILE, "rb"))
else:
# Otherwise create a new save object and make a new save file for it
save = Save()
pickle.dump(save, open(LOAD_FILE, "wb"))
clear_screen()
category = setup(save)
clear_screen()
quiz(category, save)
def quiz(category, save):
"""Allows the user to complete the quiz
Arguments:
school {School} -- The school that the quiz is currently set up for
year {Year_Group} -- The year-group that the quiz is currently set up
for
category {str} -- The category that the questions shall be for
save {Save} -- The save file that shall be saved to disk
"""
while 1:
school = None
year = None
if save.schools:
school_choice = print_menu("Please choose a school", [
school.name for school in save.schools])
school = save.schools[school_choice]
else:
print("There are currently no schools to pick from. Please add a school to continue")
break
if school:
if school.year_groups:
yeargroup_choice = print_menu(
"Please choose a year-group", [year.year for year in school.year_groups])
year = school.year_groups[yeargroup_choice]
else:
print(
"There are currently no year-groups to pick from with your current choice of school. Please add a yeargroup to continue")
else:
print("Please set a school before setting a year-group")
questions = []
for question in save.questions:
if question.question_category == category:
questions.append(question)
if len(questions) < 10:
print("There are not enough questions for a quiz in this category")
break
else:
questions = random.sample(questions, 10)
student = Student(school, year)
random.shuffle(questions)
answers = []
for question in questions:
print()
index = random.randint(0, 3)
options = list(question.incorrect_answers)
options.insert(index, question.correct_answer)
choice = print_menu(question.question_text, options)
clear_screen()
if choice == index:
answers.append((question, Answer(True)))
print("\nCorrect!")
else:
answers.append((question, Answer(False)))
print("\nIncorrect...")
print("The correct answer is:", question.correct_answer)
result = Result(answers, student)
if save.results:
save.results = save.results + [result]
else:
save.results = [result]
print()
print("Congratulations! You scored: " + str(len(
[answer for answer in answers if answer[1].correct is True]
)) + "/" + str(len(answers)))
print()
save_data(save)
time.sleep(5)
clear_screen()
def setup(save):
"""The method run at startup to allow configuration of the quiz
Arguments:
save {Save} -- An object that holds all the data for the quiz so that
everything can be quickly saved
Returns:
tuple -- The school and yeargroup of the person answering the quiz,
and the
"""
category = None
print("Config menu")
print("===========")
print("To return to this menu, please close the program and then reopen\n")
while 1:
print("\nCurrent config:")
if category:
print("Category: " + category)
else:
print("Category: Not Selected")
choice = print_menu("Please choose an option",
["Start Quiz",
"Add School",
"Add Year-group",
"Set Category",
"Edit Questions",
"View Statistics"])
print()
clear_screen()
if choice == 0:
if category:
return category
else:
print("Please ensure you have entered a category")
elif choice == 1:
name = input("Please enter the school's name: ")
school_ = School()
school_.name = name
if save.schools:
save.schools = save.schools + [school_]
else:
save.schools = [school_]
elif choice == 2:
if save.schools:
year_school_choice = print_menu("Please select a school to add a year-group to:", [school.name for school in save.schools])
school_to_add_year_to = save.schools[year_school_choice]
name = input("Please enter the year-group name: ")
year_ = Year_Group(name)
if school_to_add_year_to.year_groups:
school_to_add_year_to.year_groups = school_to_add_year_to.year_groups + [year_]
else:
school_to_add_year_to.year_groups = [year_]
else:
print("Please add a school before adding a year-group")
elif choice == 3:
if save.questions:
q = []
for question in save.questions:
q.append(question.question_category)
q = list(set(q))
cat = print_menu("Please select a category", q)
category = q[cat]
else:
print("Please add questions before selecting a category")
elif choice == 4:
save.questions = question_editor(save.questions)
elif choice == 5:
show_stats(save)
save_data(save)
def show_stats(save):
"""Displays and exports statistics
Arguments:
save {Save} -- Contains all application data
"""
while 1:
choice = print_menu("What would you like to do?", ["Compare year-groups from a school", "Compare schools", "Export to Excel", "Quit stats viewer"])
clear_screen()
if choice == 0:
years = {}
if save.schools:
school_choice = print_menu("Please select a school:", [school.name for school in save.schools])
school = save.schools[school_choice]
if school.year_groups:
for year_group in school.year_groups:
years[year_group.year] = []
for year in years:
if save.results:
for result in save.results:
if result.student.school == school and result.student.year_group.year == year:
answers = result.result
years[year].append(len(
[answer for answer in answers if answer[1].correct is True]
))
else:
print("Please complete at least one quiz")
year_names = []
year_averages = []
for year in years:
years[year] = sum(years[year])/len(years[year])
year_names.append(year)
year_averages.append(years[year])
index = np.arange(len(year_names))
plt.bar(index, year_averages)
plt.xlabel('Year-groups')
plt.ylabel('Average Score')
plt.xticks(index, year_names)
plt.title('Averages for year-groups in ' + school.name)
plt.show()
else:
print("This school has no year-groups")
else:
print("There are no schools to display")
elif choice == 1:
school_results = {}
if save.schools:
for school in save.schools:
if save.results:
for result in save.results:
if result.student.school.name == school.name:
if school.name in school_results:
school_results[school.name].append(len(
[answer for answer in result.result if answer[1].correct is True]
))
else:
school_results[school.name] = [(len(
[answer for answer in result.result if answer[1].correct is True]
))]
school_names = []
school_averages = []
for school in school_results:
school_results[school] = sum(school_results[school])/len(school_results[school])
school_names.append(school)
school_averages.append(school_results[school])
index = np.arange(len(school_names))
plt.bar(index, school_averages)
plt.xlabel('Schools')
plt.ylabel('Average Score')
plt.xticks(index, school_names)
plt.title('Averages for schools')
plt.show()
else:
print("There are no schools to compare")
elif choice == 2:
try:
workbook = xlsxwriter.Workbook('data.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
worksheet.write('A1', 'School', bold)
worksheet.write('B1', 'Year', bold)
worksheet.write('C1', 'Category', bold)
worksheet.write('D1', 'Result', bold)
row = 1
col = 0
if save.results:
for result in save.results:
worksheet.write(row, col, result.student.school.name)
worksheet.write(row, col + 1, result.student.year_group.year)
worksheet.write(row, col + 2, result.result[0][0].question_category)
worksheet.write(row, col + 3, str(len([answer for answer in result.result if answer[1].correct is True])))
row += 1
workbook.close()
print("Data successfully exported to data.xlsx")
else:
print("There is no data to export")
except PermissionError:
print("Please close the file before attempting to write to it")
elif choice == 3:
return
def question_editor(questions):
"""Creates an easy interface to edit the questions with
Arguments:
questions {list} -- The questions to edit
Returns:
list -- The edited questions
"""
if questions:
pass
else:
questions = []
while 1:
choice = print_menu("Would you like to:", ["Add a question", "Delete a question", "Quit the question editor"])
if choice == 0:
text = input("Please enter the question: ")
correct = input("Please enter the correct answer: ")
incorrect = [input("Please enter an incorrect answer: ") for i in range(0, 3)]
cat = input("Please enter a category: ")
questions.append(Text_Question(text, correct, incorrect, cat))
elif choice == 1:
if len(questions) > 0:
choice = print_menu("Please select a question to delete:", [q.question_text for q in questions])
del questions[choice]
else:
print("There are no questions to delete")
else:
return questions
def print_menu(statement, options):
"""Presents the user with a choice of options and allows the user to pick one
Arguments:
statement {str} -- The description of the choice
options {list} -- The possible options the user can pick
Returns:
int -- The index of the option the user picked from the options
"""
print(statement)
for i, option in enumerate(options, 1):
print(str(i) + ". " + option)
while 1:
try:
value = int(input("Please choose an option: "))
if 0 < value <= len(options):
return value - 1
print("Invalid input")
except ValueError:
print("Invalid input")
if __name__ == "__main__":
main() | main.py | import os
import xlsxwriter
import time
import pickle
import random
import numpy as np
import matplotlib.pyplot as plt
from classes.quiz import Quiz
from classes.save import Save
from classes.result import Overall_Results, Result
from classes.answer import Picture_Answer, Text_Answer, Answer
from classes.school import School, Student, Year_Group
from classes.question import Picture_Question, Text_Question
LOAD_FILE = "data.quiz"
def clear_screen():
"""Clears the screen
"""
os.system('cls' if os.name == 'nt' else 'clear')
def save_data(save_file):
"""Saves the quiz data to a file
Arguments:
save_file {Save} -- A save object containing all of the quiz's data
"""
# Uses pickle to dump the object into a byte array and then into a file
pickle.dump(save_file, open(LOAD_FILE, "wb"))
def main():
"""The main function that is run when the file is run
"""
# If there is a load file
if os.path.exists(LOAD_FILE):
# Load it
save = pickle.load(open(LOAD_FILE, "rb"))
else:
# Otherwise create a new save object and make a new save file for it
save = Save()
pickle.dump(save, open(LOAD_FILE, "wb"))
clear_screen()
category = setup(save)
clear_screen()
quiz(category, save)
def quiz(category, save):
"""Allows the user to complete the quiz
Arguments:
school {School} -- The school that the quiz is currently set up for
year {Year_Group} -- The year-group that the quiz is currently set up
for
category {str} -- The category that the questions shall be for
save {Save} -- The save file that shall be saved to disk
"""
while 1:
school = None
year = None
if save.schools:
school_choice = print_menu("Please choose a school", [
school.name for school in save.schools])
school = save.schools[school_choice]
else:
print("There are currently no schools to pick from. Please add a school to continue")
break
if school:
if school.year_groups:
yeargroup_choice = print_menu(
"Please choose a year-group", [year.year for year in school.year_groups])
year = school.year_groups[yeargroup_choice]
else:
print(
"There are currently no year-groups to pick from with your current choice of school. Please add a yeargroup to continue")
else:
print("Please set a school before setting a year-group")
questions = []
for question in save.questions:
if question.question_category == category:
questions.append(question)
if len(questions) < 10:
print("There are not enough questions for a quiz in this category")
break
else:
questions = random.sample(questions, 10)
student = Student(school, year)
random.shuffle(questions)
answers = []
for question in questions:
print()
index = random.randint(0, 3)
options = list(question.incorrect_answers)
options.insert(index, question.correct_answer)
choice = print_menu(question.question_text, options)
clear_screen()
if choice == index:
answers.append((question, Answer(True)))
print("\nCorrect!")
else:
answers.append((question, Answer(False)))
print("\nIncorrect...")
print("The correct answer is:", question.correct_answer)
result = Result(answers, student)
if save.results:
save.results = save.results + [result]
else:
save.results = [result]
print()
print("Congratulations! You scored: " + str(len(
[answer for answer in answers if answer[1].correct is True]
)) + "/" + str(len(answers)))
print()
save_data(save)
time.sleep(5)
clear_screen()
def setup(save):
"""The method run at startup to allow configuration of the quiz
Arguments:
save {Save} -- An object that holds all the data for the quiz so that
everything can be quickly saved
Returns:
tuple -- The school and yeargroup of the person answering the quiz,
and the
"""
category = None
print("Config menu")
print("===========")
print("To return to this menu, please close the program and then reopen\n")
while 1:
print("\nCurrent config:")
if category:
print("Category: " + category)
else:
print("Category: Not Selected")
choice = print_menu("Please choose an option",
["Start Quiz",
"Add School",
"Add Year-group",
"Set Category",
"Edit Questions",
"View Statistics"])
print()
clear_screen()
if choice == 0:
if category:
return category
else:
print("Please ensure you have entered a category")
elif choice == 1:
name = input("Please enter the school's name: ")
school_ = School()
school_.name = name
if save.schools:
save.schools = save.schools + [school_]
else:
save.schools = [school_]
elif choice == 2:
if save.schools:
year_school_choice = print_menu("Please select a school to add a year-group to:", [school.name for school in save.schools])
school_to_add_year_to = save.schools[year_school_choice]
name = input("Please enter the year-group name: ")
year_ = Year_Group(name)
if school_to_add_year_to.year_groups:
school_to_add_year_to.year_groups = school_to_add_year_to.year_groups + [year_]
else:
school_to_add_year_to.year_groups = [year_]
else:
print("Please add a school before adding a year-group")
elif choice == 3:
if save.questions:
q = []
for question in save.questions:
q.append(question.question_category)
q = list(set(q))
cat = print_menu("Please select a category", q)
category = q[cat]
else:
print("Please add questions before selecting a category")
elif choice == 4:
save.questions = question_editor(save.questions)
elif choice == 5:
show_stats(save)
save_data(save)
def show_stats(save):
"""Displays and exports statistics
Arguments:
save {Save} -- Contains all application data
"""
while 1:
choice = print_menu("What would you like to do?", ["Compare year-groups from a school", "Compare schools", "Export to Excel", "Quit stats viewer"])
clear_screen()
if choice == 0:
years = {}
if save.schools:
school_choice = print_menu("Please select a school:", [school.name for school in save.schools])
school = save.schools[school_choice]
if school.year_groups:
for year_group in school.year_groups:
years[year_group.year] = []
for year in years:
if save.results:
for result in save.results:
if result.student.school == school and result.student.year_group.year == year:
answers = result.result
years[year].append(len(
[answer for answer in answers if answer[1].correct is True]
))
else:
print("Please complete at least one quiz")
year_names = []
year_averages = []
for year in years:
years[year] = sum(years[year])/len(years[year])
year_names.append(year)
year_averages.append(years[year])
index = np.arange(len(year_names))
plt.bar(index, year_averages)
plt.xlabel('Year-groups')
plt.ylabel('Average Score')
plt.xticks(index, year_names)
plt.title('Averages for year-groups in ' + school.name)
plt.show()
else:
print("This school has no year-groups")
else:
print("There are no schools to display")
elif choice == 1:
school_results = {}
if save.schools:
for school in save.schools:
if save.results:
for result in save.results:
if result.student.school.name == school.name:
if school.name in school_results:
school_results[school.name].append(len(
[answer for answer in result.result if answer[1].correct is True]
))
else:
school_results[school.name] = [(len(
[answer for answer in result.result if answer[1].correct is True]
))]
school_names = []
school_averages = []
for school in school_results:
school_results[school] = sum(school_results[school])/len(school_results[school])
school_names.append(school)
school_averages.append(school_results[school])
index = np.arange(len(school_names))
plt.bar(index, school_averages)
plt.xlabel('Schools')
plt.ylabel('Average Score')
plt.xticks(index, school_names)
plt.title('Averages for schools')
plt.show()
else:
print("There are no schools to compare")
elif choice == 2:
try:
workbook = xlsxwriter.Workbook('data.xlsx')
worksheet = workbook.add_worksheet()
bold = workbook.add_format({'bold': True})
worksheet.write('A1', 'School', bold)
worksheet.write('B1', 'Year', bold)
worksheet.write('C1', 'Category', bold)
worksheet.write('D1', 'Result', bold)
row = 1
col = 0
if save.results:
for result in save.results:
worksheet.write(row, col, result.student.school.name)
worksheet.write(row, col + 1, result.student.year_group.year)
worksheet.write(row, col + 2, result.result[0][0].question_category)
worksheet.write(row, col + 3, str(len([answer for answer in result.result if answer[1].correct is True])))
row += 1
workbook.close()
print("Data successfully exported to data.xlsx")
else:
print("There is no data to export")
except PermissionError:
print("Please close the file before attempting to write to it")
elif choice == 3:
return
def question_editor(questions):
"""Creates an easy interface to edit the questions with
Arguments:
questions {list} -- The questions to edit
Returns:
list -- The edited questions
"""
if questions:
pass
else:
questions = []
while 1:
choice = print_menu("Would you like to:", ["Add a question", "Delete a question", "Quit the question editor"])
if choice == 0:
text = input("Please enter the question: ")
correct = input("Please enter the correct answer: ")
incorrect = [input("Please enter an incorrect answer: ") for i in range(0, 3)]
cat = input("Please enter a category: ")
questions.append(Text_Question(text, correct, incorrect, cat))
elif choice == 1:
if len(questions) > 0:
choice = print_menu("Please select a question to delete:", [q.question_text for q in questions])
del questions[choice]
else:
print("There are no questions to delete")
else:
return questions
def print_menu(statement, options):
"""Presents the user with a choice of options and allows the user to pick one
Arguments:
statement {str} -- The description of the choice
options {list} -- The possible options the user can pick
Returns:
int -- The index of the option the user picked from the options
"""
print(statement)
for i, option in enumerate(options, 1):
print(str(i) + ". " + option)
while 1:
try:
value = int(input("Please choose an option: "))
if 0 < value <= len(options):
return value - 1
print("Invalid input")
except ValueError:
print("Invalid input")
if __name__ == "__main__":
main() | 0.380529 | 0.277479 |
import argparse
import glob
import json
import os
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
def create_env(name, pkgs, channel=None, yes=False):
cmd = 'conda create --name {name}'.format(name=name)
if channel:
cmd = '{cmd} --channel {channel}'.format(cmd=cmd, channel=channel)
if yes:
cmd = '{cmd} --yes'.format(cmd=cmd)
cmd = '{cmd} {pkgs}'.format(cmd=cmd, pkgs=' '.join(pkgs))
subprocess.check_call(cmd.split(' '))
def list_envs():
cmd = 'conda env list --json'
return json.loads(subprocess.check_output(cmd.split(' ')))
def remove_env(name, yes=False):
cmd = 'conda env remove --name {name}'.format(name=name)
if yes:
cmd = '{cmd} --yes'.format(cmd=cmd)
subprocess.check_call(cmd.split(' '))
def env_exists(env_name, envs):
for env in envs['envs']:
if os.path.basename(env) == env_name:
return True
return False
def get_env_path(env_name, envs):
for env in envs['envs']:
if os.path.basename(env) == env_name:
return env
return None
def build_recipe(recipe):
cmd = 'conda build {recipe}'.format(recipe= recipe)
subprocess.check_call(shlex.split(cmd))
def _get_python_path(env):
if sys.platform == 'win32':
path = '{path}/python'.format(path=env)
else:
path = '{path}/bin/python'.format(path=env)
return path
def _get_pip_path(env):
if sys.platform == 'win32':
path = '{path}/Scripts/pip'.format(path=env)
else:
path = '{path}/bin/pip'.format(path=env)
return path
def pip_install(env, pkgs):
cmd = '{pip} install {pkgs}'.format(
pip=_get_pip_path(env),
pkgs=' '.join(pkgs)
)
subprocess.check_call(cmd.split(' '))
def python_develop(env, pkg_path):
cmd = '{python_path} setup.py develop --no-deps'.format(
python_path=_get_python_path(env)
)
subprocess.check_call(cmd.split(' '), cwd=pkg_path)
def main():
args = parser.parse_args()
args.func(args)
def build_release(args):
conda_recipes_root = os.path.join(
root,
'conda-recipes'
)
for pkg in openmdao.keys():
recipe_path = os.path.join(
conda_recipes_root,
pkg
)
build_recipe(recipe_path)
def build_bundle(args):
version = args.version
temp_dir = tempfile.mkdtemp()
start_dir = os.getcwd()
try:
os.putenv('CONDA_ENVS_PATH', temp_dir)
# 1. Install OpenMDAO to a temporary conda environment
# 2. Grab all packages
# 3. Make tar file
create_env(
'openmdao-bundle',
['openmdao=={version}'.format(version=version)],
channel='http://conda.binstar.org/openmdao',
yes=True
)
os.chdir('{envs_path}/.pkgs'.format(envs_path=temp_dir))
pkgs = glob.glob('*.tar.bz2')
out = tarfile.open('openmdao.tar', mode='w')
with tarfile.open('openmdao.tar', mode='w') as tar:
for pkg in pkgs:
tar.add(pkg, recursive=False)
shutil.move(
'openmdao.tar',
'{start_dir}/openmdao.tar'.format(start_dir=start_dir)
)
finally:
os.chdir(start_dir)
os.unsetenv('CONDA_ENVS_PATH')
shutil.rmtree(temp_dir)
def build_dev(args):
env_name = args.env
force = args.force
# Remove environment if --force is True
if force and env_exists(env_name, list_envs()):
remove_env(env_name, yes=True)
# Create conda environment
create_env(env_name, pkgs, channel='http://conda.binstar.org/openmdao', yes=True)
envs = list_envs()
# use pip to install virtualenv because conda can't install version 1.9.1
pip_install(get_env_path(env_name, envs), ['virtualenv==1.9.1'])
# Prior steps to correctly build bar3simulation
pkg_path = openmdao['openmdao.examples.bar3simulation']
pkg_path = os.path.join(root, pkg_path)
# 1. Forcibly remove the bar3 extension if it exists
try:
os.remove('{pkg_path}/openmdao/examples/bar3simulation/bar3.so'.format(
pkg_path=pkg_path)
)
except Exception as error:
print error
# 2. Forcibly remove any build directories
try:
shutil.rmtree('{pkg_path}/build'.format(pkg_path=pkg_path))
except Exception as error:
print error
# 3. Forcibly remove any dist directories
try:
shutil.rmtree('{pkg_path}/dist'.format(pkg_path=pkg_path))
except Exception as error:
print error
# Install all OpenMDAO packages using `python setup.py develop`
for pkg_path in openmdao.values():
python_develop(
get_env_path(env_name, envs),
os.path.join(root, pkg_path)
)
msg = "\nTo activate the environment, use the following command:\n\n\t {cmd} {env}\n"
if sys.platform == 'win32':
print msg.format(cmd='activate', env=env_name)
else:
print msg.format(cmd='source activate', env=env_name)
msg = "To deactivate the environment, use the following command:\n\n\t {cmd}\n"
if sys.platform == 'win32':
print msg.format(cmd='deactivate')
else:
print msg.format(cmd='source deactivate')
# Path to root directory
# Should be ../../../../
root = os.path.abspath(os.path.dirname(__file__))
root = os.path.join(
root,
os.path.pardir,
os.path.pardir,
os.path.pardir,
os.path.pardir
)
# openmdao dependencies
pkgs = [
'pip',
'numpy',
'scipy',
'setuptools',
'pyparsing',
'traits==4.3.0',
'nose',
'sphinx==1.2.2',
'fabric==0.9.3',
'boto',
'paramiko==1.7.7.1',
'requests',
'decorator',
'mock',
'networkx',
'zope.interface',
'pytz>=2014.4',
'pycrypto==2.3',
'cobyla==1.0.2',
'conmin==1.0.2',
'slsqp==1.0.2',
'newsumt==1.1.1',
'bson',
'pyevolve',
]
openmdao = {
'openmdao.units' : 'openmdao.units',
'openmdao.util' : 'openmdao.util',
'openmdao.test' : 'openmdao.test',
'openmdao.devtools' : 'openmdao.devtools',
'openmdao.main' : 'openmdao.main',
'openmdao.lib' : 'openmdao.lib',
'openmdao.examples.bar3simulation' : 'examples/openmdao.examples.bar3simulation',
'openmdao.examples.expected_improvement' : 'examples/openmdao.examples.expected_improvement',
'openmdao.examples.mdao' : 'examples/openmdao.examples.mdao',
'openmdao.examples.metamodel_tutorial' : 'examples/openmdao.examples.metamodel_tutorial',
'openmdao.examples.nozzle_geometry_doe' : 'examples/openmdao.examples.nozzle_geometry_doe',
'openmdao.examples.simple' : 'examples/openmdao.examples.simple',
}
parser = argparse.ArgumentParser(description="Process some arguments to build.py")
sub_parsers = parser.add_subparsers()
dev_parser = sub_parsers.add_parser('dev', help='help for building dev version')
dev_parser.add_argument('--env', type=str, default='openmdao-dev', help='name of environment')
dev_parser.add_argument('--force', default=False, action='store_true', help="force environment to be rebuilt if it already exists")
dev_parser.set_defaults(func=build_dev)
try:
from openmdao.main.releaseinfo import __version__
version = __version__
except ImportError as error:
cmd = 'python -c "import releaseinfo; print releaseinfo.__version__"'
cwd = os.path.join(
root,
'openmdao.main',
'src',
'openmdao',
'main'
)
version = subprocess.check_output(shlex.split(cmd), cwd=cwd)
bundle_parser = sub_parsers.add_parser('bundle', help='build conda package that includes OpenMDAO and all dependencies')
bundle_parser.add_argument('-v', '--version', type=str, default=version, help="version of OpenMDAO to bundle")
bundle_parser.set_defaults(func=build_bundle)
release_parser = sub_parsers.add_parser('release', help='build conda release packages for OpenMDAO')
release_parser.set_defaults(func=build_release)
if __name__ == "__main__":
main() | openmdao.devtools/src/openmdao/devtools/conda_build.py | import argparse
import glob
import json
import os
import shlex
import shutil
import subprocess
import sys
import tarfile
import tempfile
def create_env(name, pkgs, channel=None, yes=False):
cmd = 'conda create --name {name}'.format(name=name)
if channel:
cmd = '{cmd} --channel {channel}'.format(cmd=cmd, channel=channel)
if yes:
cmd = '{cmd} --yes'.format(cmd=cmd)
cmd = '{cmd} {pkgs}'.format(cmd=cmd, pkgs=' '.join(pkgs))
subprocess.check_call(cmd.split(' '))
def list_envs():
cmd = 'conda env list --json'
return json.loads(subprocess.check_output(cmd.split(' ')))
def remove_env(name, yes=False):
cmd = 'conda env remove --name {name}'.format(name=name)
if yes:
cmd = '{cmd} --yes'.format(cmd=cmd)
subprocess.check_call(cmd.split(' '))
def env_exists(env_name, envs):
for env in envs['envs']:
if os.path.basename(env) == env_name:
return True
return False
def get_env_path(env_name, envs):
for env in envs['envs']:
if os.path.basename(env) == env_name:
return env
return None
def build_recipe(recipe):
cmd = 'conda build {recipe}'.format(recipe= recipe)
subprocess.check_call(shlex.split(cmd))
def _get_python_path(env):
if sys.platform == 'win32':
path = '{path}/python'.format(path=env)
else:
path = '{path}/bin/python'.format(path=env)
return path
def _get_pip_path(env):
if sys.platform == 'win32':
path = '{path}/Scripts/pip'.format(path=env)
else:
path = '{path}/bin/pip'.format(path=env)
return path
def pip_install(env, pkgs):
cmd = '{pip} install {pkgs}'.format(
pip=_get_pip_path(env),
pkgs=' '.join(pkgs)
)
subprocess.check_call(cmd.split(' '))
def python_develop(env, pkg_path):
cmd = '{python_path} setup.py develop --no-deps'.format(
python_path=_get_python_path(env)
)
subprocess.check_call(cmd.split(' '), cwd=pkg_path)
def main():
args = parser.parse_args()
args.func(args)
def build_release(args):
conda_recipes_root = os.path.join(
root,
'conda-recipes'
)
for pkg in openmdao.keys():
recipe_path = os.path.join(
conda_recipes_root,
pkg
)
build_recipe(recipe_path)
def build_bundle(args):
version = args.version
temp_dir = tempfile.mkdtemp()
start_dir = os.getcwd()
try:
os.putenv('CONDA_ENVS_PATH', temp_dir)
# 1. Install OpenMDAO to a temporary conda environment
# 2. Grab all packages
# 3. Make tar file
create_env(
'openmdao-bundle',
['openmdao=={version}'.format(version=version)],
channel='http://conda.binstar.org/openmdao',
yes=True
)
os.chdir('{envs_path}/.pkgs'.format(envs_path=temp_dir))
pkgs = glob.glob('*.tar.bz2')
out = tarfile.open('openmdao.tar', mode='w')
with tarfile.open('openmdao.tar', mode='w') as tar:
for pkg in pkgs:
tar.add(pkg, recursive=False)
shutil.move(
'openmdao.tar',
'{start_dir}/openmdao.tar'.format(start_dir=start_dir)
)
finally:
os.chdir(start_dir)
os.unsetenv('CONDA_ENVS_PATH')
shutil.rmtree(temp_dir)
def build_dev(args):
env_name = args.env
force = args.force
# Remove environment if --force is True
if force and env_exists(env_name, list_envs()):
remove_env(env_name, yes=True)
# Create conda environment
create_env(env_name, pkgs, channel='http://conda.binstar.org/openmdao', yes=True)
envs = list_envs()
# use pip to install virtualenv because conda can't install version 1.9.1
pip_install(get_env_path(env_name, envs), ['virtualenv==1.9.1'])
# Prior steps to correctly build bar3simulation
pkg_path = openmdao['openmdao.examples.bar3simulation']
pkg_path = os.path.join(root, pkg_path)
# 1. Forcibly remove the bar3 extension if it exists
try:
os.remove('{pkg_path}/openmdao/examples/bar3simulation/bar3.so'.format(
pkg_path=pkg_path)
)
except Exception as error:
print error
# 2. Forcibly remove any build directories
try:
shutil.rmtree('{pkg_path}/build'.format(pkg_path=pkg_path))
except Exception as error:
print error
# 3. Forcibly remove any dist directories
try:
shutil.rmtree('{pkg_path}/dist'.format(pkg_path=pkg_path))
except Exception as error:
print error
# Install all OpenMDAO packages using `python setup.py develop`
for pkg_path in openmdao.values():
python_develop(
get_env_path(env_name, envs),
os.path.join(root, pkg_path)
)
msg = "\nTo activate the environment, use the following command:\n\n\t {cmd} {env}\n"
if sys.platform == 'win32':
print msg.format(cmd='activate', env=env_name)
else:
print msg.format(cmd='source activate', env=env_name)
msg = "To deactivate the environment, use the following command:\n\n\t {cmd}\n"
if sys.platform == 'win32':
print msg.format(cmd='deactivate')
else:
print msg.format(cmd='source deactivate')
# Path to root directory
# Should be ../../../../
root = os.path.abspath(os.path.dirname(__file__))
root = os.path.join(
root,
os.path.pardir,
os.path.pardir,
os.path.pardir,
os.path.pardir
)
# openmdao dependencies
pkgs = [
'pip',
'numpy',
'scipy',
'setuptools',
'pyparsing',
'traits==4.3.0',
'nose',
'sphinx==1.2.2',
'fabric==0.9.3',
'boto',
'paramiko==1.7.7.1',
'requests',
'decorator',
'mock',
'networkx',
'zope.interface',
'pytz>=2014.4',
'pycrypto==2.3',
'cobyla==1.0.2',
'conmin==1.0.2',
'slsqp==1.0.2',
'newsumt==1.1.1',
'bson',
'pyevolve',
]
openmdao = {
'openmdao.units' : 'openmdao.units',
'openmdao.util' : 'openmdao.util',
'openmdao.test' : 'openmdao.test',
'openmdao.devtools' : 'openmdao.devtools',
'openmdao.main' : 'openmdao.main',
'openmdao.lib' : 'openmdao.lib',
'openmdao.examples.bar3simulation' : 'examples/openmdao.examples.bar3simulation',
'openmdao.examples.expected_improvement' : 'examples/openmdao.examples.expected_improvement',
'openmdao.examples.mdao' : 'examples/openmdao.examples.mdao',
'openmdao.examples.metamodel_tutorial' : 'examples/openmdao.examples.metamodel_tutorial',
'openmdao.examples.nozzle_geometry_doe' : 'examples/openmdao.examples.nozzle_geometry_doe',
'openmdao.examples.simple' : 'examples/openmdao.examples.simple',
}
parser = argparse.ArgumentParser(description="Process some arguments to build.py")
sub_parsers = parser.add_subparsers()
dev_parser = sub_parsers.add_parser('dev', help='help for building dev version')
dev_parser.add_argument('--env', type=str, default='openmdao-dev', help='name of environment')
dev_parser.add_argument('--force', default=False, action='store_true', help="force environment to be rebuilt if it already exists")
dev_parser.set_defaults(func=build_dev)
try:
from openmdao.main.releaseinfo import __version__
version = __version__
except ImportError as error:
cmd = 'python -c "import releaseinfo; print releaseinfo.__version__"'
cwd = os.path.join(
root,
'openmdao.main',
'src',
'openmdao',
'main'
)
version = subprocess.check_output(shlex.split(cmd), cwd=cwd)
bundle_parser = sub_parsers.add_parser('bundle', help='build conda package that includes OpenMDAO and all dependencies')
bundle_parser.add_argument('-v', '--version', type=str, default=version, help="version of OpenMDAO to bundle")
bundle_parser.set_defaults(func=build_bundle)
release_parser = sub_parsers.add_parser('release', help='build conda release packages for OpenMDAO')
release_parser.set_defaults(func=build_release)
if __name__ == "__main__":
main() | 0.203233 | 0.083143 |
from __future__ import absolute_import, unicode_literals
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from celery import states
from celery.events.state import Task
from celery.events.snapshot import Polaroid
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.timeutils import maybe_iso8601
from .models import WorkerState, TaskState
from .utils import fromtimestamp, correct_awareness
WORKER_UPDATE_FREQ = 60 # limit worker timestamp write freq.
SUCCESS_STATES = frozenset([states.SUCCESS])
# Expiry can be timedelta or None for never expire.
EXPIRE_SUCCESS = getattr(settings, 'CELERYCAM_EXPIRE_SUCCESS',
timedelta(days=1))
EXPIRE_ERROR = getattr(settings, 'CELERYCAM_EXPIRE_ERROR',
timedelta(days=3))
EXPIRE_PENDING = getattr(settings, 'CELERYCAM_EXPIRE_PENDING',
timedelta(days=5))
NOT_SAVED_ATTRIBUTES = frozenset(['name', 'args', 'kwargs', 'eta'])
logger = get_logger(__name__)
debug = logger.debug
class Camera(Polaroid):
TaskState = TaskState
WorkerState = WorkerState
clear_after = True
worker_update_freq = WORKER_UPDATE_FREQ
expire_states = {
SUCCESS_STATES: EXPIRE_SUCCESS,
states.EXCEPTION_STATES: EXPIRE_ERROR,
states.UNREADY_STATES: EXPIRE_PENDING,
}
def __init__(self, *args, **kwargs):
super(Camera, self).__init__(*args, **kwargs)
self._last_worker_write = defaultdict(lambda: (None, None))
def get_heartbeat(self, worker):
try:
heartbeat = worker.heartbeats[-1]
except IndexError:
return
return fromtimestamp(heartbeat)
def handle_worker(self, hostname_worker):
(hostname, worker) = hostname_worker
last_write, obj = self._last_worker_write[hostname]
if not last_write or \
monotonic() - last_write > self.worker_update_freq:
obj = self.WorkerState.objects.update_or_create(
hostname=hostname,
defaults={'last_heartbeat': self.get_heartbeat(worker)},
)
self._last_worker_write[hostname] = (monotonic(), obj)
return obj
def handle_task(self, uuid_task, worker=None):
"""Handle snapshotted event."""
uuid, task = uuid_task
if task.worker and task.worker.hostname:
worker = self.handle_worker(
(task.worker.hostname, task.worker),
)
defaults = {
'name': task.name,
'args': task.args,
'kwargs': task.kwargs,
'eta': correct_awareness(maybe_iso8601(task.eta)),
'expires': correct_awareness(maybe_iso8601(task.expires)),
'state': task.state,
'tstamp': fromtimestamp(task.timestamp),
'result': task.result or task.exception,
'traceback': task.traceback,
'runtime': task.runtime,
'worker': worker
}
# Some fields are only stored in the RECEIVED event,
# so we should remove these from default values,
# so that they are not overwritten by subsequent states.
[defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES
if defaults[attr] is None]
return self.update_task(task.state,
task_id=uuid, defaults=defaults)
def update_task(self, state, **kwargs):
objects = self.TaskState.objects
defaults = kwargs.pop('defaults', None) or {}
if not defaults.get('name'):
return
obj, created = objects.get_or_create(defaults=defaults, **kwargs)
if created:
return obj
else:
if states.state(state) < states.state(obj.state):
keep = Task.merge_rules[states.RECEIVED]
defaults = dict(
(k, v) for k, v in defaults.items()
if k not in keep
)
for k, v in defaults.items():
setattr(obj, k, v)
obj.save()
return obj
def on_shutter(self, state, commit_every=100):
def _handle_tasks():
for i, task in enumerate(state.tasks.items()):
self.handle_task(task)
for worker in state.workers.items():
self.handle_worker(worker)
_handle_tasks()
def on_cleanup(self):
expired = (self.TaskState.objects.expire_by_states(states, expires)
for states, expires in self.expire_states.items())
dirty = sum(item for item in expired if item is not None)
if dirty:
debug('Cleanup: Marked %s objects as dirty.', dirty)
self.TaskState.objects.purge()
debug('Cleanup: %s objects purged.', dirty)
return dirty
return 0 | djcelery/snapshot.py | from __future__ import absolute_import, unicode_literals
from collections import defaultdict
from datetime import timedelta
from django.conf import settings
from celery import states
from celery.events.state import Task
from celery.events.snapshot import Polaroid
from celery.five import monotonic
from celery.utils.log import get_logger
from celery.utils.timeutils import maybe_iso8601
from .models import WorkerState, TaskState
from .utils import fromtimestamp, correct_awareness
WORKER_UPDATE_FREQ = 60 # limit worker timestamp write freq.
SUCCESS_STATES = frozenset([states.SUCCESS])
# Expiry can be timedelta or None for never expire.
EXPIRE_SUCCESS = getattr(settings, 'CELERYCAM_EXPIRE_SUCCESS',
timedelta(days=1))
EXPIRE_ERROR = getattr(settings, 'CELERYCAM_EXPIRE_ERROR',
timedelta(days=3))
EXPIRE_PENDING = getattr(settings, 'CELERYCAM_EXPIRE_PENDING',
timedelta(days=5))
NOT_SAVED_ATTRIBUTES = frozenset(['name', 'args', 'kwargs', 'eta'])
logger = get_logger(__name__)
debug = logger.debug
class Camera(Polaroid):
TaskState = TaskState
WorkerState = WorkerState
clear_after = True
worker_update_freq = WORKER_UPDATE_FREQ
expire_states = {
SUCCESS_STATES: EXPIRE_SUCCESS,
states.EXCEPTION_STATES: EXPIRE_ERROR,
states.UNREADY_STATES: EXPIRE_PENDING,
}
def __init__(self, *args, **kwargs):
super(Camera, self).__init__(*args, **kwargs)
self._last_worker_write = defaultdict(lambda: (None, None))
def get_heartbeat(self, worker):
try:
heartbeat = worker.heartbeats[-1]
except IndexError:
return
return fromtimestamp(heartbeat)
def handle_worker(self, hostname_worker):
(hostname, worker) = hostname_worker
last_write, obj = self._last_worker_write[hostname]
if not last_write or \
monotonic() - last_write > self.worker_update_freq:
obj = self.WorkerState.objects.update_or_create(
hostname=hostname,
defaults={'last_heartbeat': self.get_heartbeat(worker)},
)
self._last_worker_write[hostname] = (monotonic(), obj)
return obj
def handle_task(self, uuid_task, worker=None):
"""Handle snapshotted event."""
uuid, task = uuid_task
if task.worker and task.worker.hostname:
worker = self.handle_worker(
(task.worker.hostname, task.worker),
)
defaults = {
'name': task.name,
'args': task.args,
'kwargs': task.kwargs,
'eta': correct_awareness(maybe_iso8601(task.eta)),
'expires': correct_awareness(maybe_iso8601(task.expires)),
'state': task.state,
'tstamp': fromtimestamp(task.timestamp),
'result': task.result or task.exception,
'traceback': task.traceback,
'runtime': task.runtime,
'worker': worker
}
# Some fields are only stored in the RECEIVED event,
# so we should remove these from default values,
# so that they are not overwritten by subsequent states.
[defaults.pop(attr, None) for attr in NOT_SAVED_ATTRIBUTES
if defaults[attr] is None]
return self.update_task(task.state,
task_id=uuid, defaults=defaults)
def update_task(self, state, **kwargs):
objects = self.TaskState.objects
defaults = kwargs.pop('defaults', None) or {}
if not defaults.get('name'):
return
obj, created = objects.get_or_create(defaults=defaults, **kwargs)
if created:
return obj
else:
if states.state(state) < states.state(obj.state):
keep = Task.merge_rules[states.RECEIVED]
defaults = dict(
(k, v) for k, v in defaults.items()
if k not in keep
)
for k, v in defaults.items():
setattr(obj, k, v)
obj.save()
return obj
def on_shutter(self, state, commit_every=100):
def _handle_tasks():
for i, task in enumerate(state.tasks.items()):
self.handle_task(task)
for worker in state.workers.items():
self.handle_worker(worker)
_handle_tasks()
def on_cleanup(self):
expired = (self.TaskState.objects.expire_by_states(states, expires)
for states, expires in self.expire_states.items())
dirty = sum(item for item in expired if item is not None)
if dirty:
debug('Cleanup: Marked %s objects as dirty.', dirty)
self.TaskState.objects.purge()
debug('Cleanup: %s objects purged.', dirty)
return dirty
return 0 | 0.623377 | 0.110759 |
from unittest import TestCase
from unittest.mock import MagicMock
from pyramid.config import Configurator
from pyramid_restful.routers import ViewSetRouter, Route
from pyramid_restful.viewsets import ModelCRUDViewSet, APIViewSet
from pyramid_restful.exceptions import ImproperlyConfigured
class MyCRUDViewSet(ModelCRUDViewSet):
pass
class ReadOnlyViewSet(APIViewSet):
def list(self):
pass
def retrieve(self):
pass
class ViewSetRouterTests(TestCase):
def setUp(self):
self.config = MagicMock(spec=Configurator)
self.router = ViewSetRouter(self.config)
def test_get_routes(self):
viewset = MyCRUDViewSet()
# add mock detail_route and list_route methods
def detail_route():
pass
viewset.detail_route = detail_route
viewset.detail_route.bind_to_methods = ['GET']
viewset.detail_route.kwargs = {}
viewset.detail_route.detail = True
def list_route():
pass
viewset.list_route = list_route
viewset.list_route.bind_to_methods = ['GET']
viewset.list_route.kwargs = {}
viewset.list_route.detail = False
routes = self.router.get_routes(viewset)
expected = [
Route(url='/{prefix}{trailing_slash}', mapping={'get': 'list', 'post': 'create'}, name='{basename}-list',
initkwargs={}),
Route(url='/{prefix}/list_route{trailing_slash}', mapping={'get': 'list_route'},
name='{basename}-list-route', initkwargs={}),
Route(url='/{prefix}/{lookup}{trailing_slash}',
mapping={'get': 'retrieve', 'put': 'update', 'patch': 'partial_update', 'delete': 'destroy'},
name='{basename}-detail', initkwargs={}),
Route(url='/{prefix}/{lookup}/detail_route{trailing_slash}', mapping={'get': 'detail_route'},
name='{basename}-detail-route', initkwargs={})]
assert routes == expected
def test_improperly_configured_dynamic_route(self):
viewset = MyCRUDViewSet()
# add mock detail_route and list_route methods
def retrieve():
pass
viewset.retrieve = retrieve
viewset.retrieve.bind_to_methods = ['GET']
viewset.retrieve.kwargs = {}
viewset.retrieve.detail = True
self.assertRaises(ImproperlyConfigured, self.router.get_routes, viewset)
def test_get_lookup(self):
viewset = MyCRUDViewSet()
lookup = self.router.get_lookup(viewset)
assert lookup == '{id}'
viewset = MyCRUDViewSet()
viewset.lookup_field = 'id'
lookup = self.router.get_lookup(viewset)
assert lookup == '{id}'
viewset = MyCRUDViewSet()
viewset.lookup_url_kwargs = {'uuid': 1}
lookup = self.router.get_lookup(viewset)
assert lookup == '{uuid}'
def test_nested_route(self):
viewset = MyCRUDViewSet()
viewset.lookup_url_kwargs = {'uuid': 1, 'parent_id': 2}
self.assertRaises(ImproperlyConfigured, self.router.get_lookup, viewset)
def test_get_method_map(self):
viewset = ReadOnlyViewSet()
mapping = self.router.get_method_map(viewset, {'get': 'list', 'post': 'create', 'put': 'update'})
assert mapping == {'get': 'list'}
def test_register(self):
viewset = ModelCRUDViewSet()
self.config.reset_mock()
self.router.register('users', viewset, 'user')
self.config.add_route.assert_any_call('user-list', '/users/')
self.config.add_route.assert_any_call('user-detail', '/users/{id}/')
assert self.config.add_view.call_count == 2
def test_empty_register(self):
viewset = APIViewSet()
self.config.reset_mock()
self.router.register('users', viewset, 'user')
self.config.add_route.assert_not_called()
self.config.add_route.assert_not_called() | tests/test_routers.py | from unittest import TestCase
from unittest.mock import MagicMock
from pyramid.config import Configurator
from pyramid_restful.routers import ViewSetRouter, Route
from pyramid_restful.viewsets import ModelCRUDViewSet, APIViewSet
from pyramid_restful.exceptions import ImproperlyConfigured
class MyCRUDViewSet(ModelCRUDViewSet):
pass
class ReadOnlyViewSet(APIViewSet):
def list(self):
pass
def retrieve(self):
pass
class ViewSetRouterTests(TestCase):
def setUp(self):
self.config = MagicMock(spec=Configurator)
self.router = ViewSetRouter(self.config)
def test_get_routes(self):
viewset = MyCRUDViewSet()
# add mock detail_route and list_route methods
def detail_route():
pass
viewset.detail_route = detail_route
viewset.detail_route.bind_to_methods = ['GET']
viewset.detail_route.kwargs = {}
viewset.detail_route.detail = True
def list_route():
pass
viewset.list_route = list_route
viewset.list_route.bind_to_methods = ['GET']
viewset.list_route.kwargs = {}
viewset.list_route.detail = False
routes = self.router.get_routes(viewset)
expected = [
Route(url='/{prefix}{trailing_slash}', mapping={'get': 'list', 'post': 'create'}, name='{basename}-list',
initkwargs={}),
Route(url='/{prefix}/list_route{trailing_slash}', mapping={'get': 'list_route'},
name='{basename}-list-route', initkwargs={}),
Route(url='/{prefix}/{lookup}{trailing_slash}',
mapping={'get': 'retrieve', 'put': 'update', 'patch': 'partial_update', 'delete': 'destroy'},
name='{basename}-detail', initkwargs={}),
Route(url='/{prefix}/{lookup}/detail_route{trailing_slash}', mapping={'get': 'detail_route'},
name='{basename}-detail-route', initkwargs={})]
assert routes == expected
def test_improperly_configured_dynamic_route(self):
viewset = MyCRUDViewSet()
# add mock detail_route and list_route methods
def retrieve():
pass
viewset.retrieve = retrieve
viewset.retrieve.bind_to_methods = ['GET']
viewset.retrieve.kwargs = {}
viewset.retrieve.detail = True
self.assertRaises(ImproperlyConfigured, self.router.get_routes, viewset)
def test_get_lookup(self):
viewset = MyCRUDViewSet()
lookup = self.router.get_lookup(viewset)
assert lookup == '{id}'
viewset = MyCRUDViewSet()
viewset.lookup_field = 'id'
lookup = self.router.get_lookup(viewset)
assert lookup == '{id}'
viewset = MyCRUDViewSet()
viewset.lookup_url_kwargs = {'uuid': 1}
lookup = self.router.get_lookup(viewset)
assert lookup == '{uuid}'
def test_nested_route(self):
viewset = MyCRUDViewSet()
viewset.lookup_url_kwargs = {'uuid': 1, 'parent_id': 2}
self.assertRaises(ImproperlyConfigured, self.router.get_lookup, viewset)
def test_get_method_map(self):
viewset = ReadOnlyViewSet()
mapping = self.router.get_method_map(viewset, {'get': 'list', 'post': 'create', 'put': 'update'})
assert mapping == {'get': 'list'}
def test_register(self):
viewset = ModelCRUDViewSet()
self.config.reset_mock()
self.router.register('users', viewset, 'user')
self.config.add_route.assert_any_call('user-list', '/users/')
self.config.add_route.assert_any_call('user-detail', '/users/{id}/')
assert self.config.add_view.call_count == 2
def test_empty_register(self):
viewset = APIViewSet()
self.config.reset_mock()
self.router.register('users', viewset, 'user')
self.config.add_route.assert_not_called()
self.config.add_route.assert_not_called() | 0.667906 | 0.316316 |
import sys
import os
from ngsutils.gtf import GTF
def usage(msg=None):
if msg:
print '%s\n' % msg
print __doc__
print '''\
Usage: gtfutils tobed [type] filename.gtf{.gz}
Where type is one of:
-genes The gene from start to end (including introns)
-exons Each annotated exon
-introns Each annotated intron
-regions Export constant / alternative regions (annotated spliced regions)
-tss Transcription start sites (unique)
-txs Transcription stop sites (unique)
-tlss Translational start sites (unique start codons)
-tlxs Translational stop sites (unique stop codons)
-junc5 Splice junction 5' donor
-junc3 Splice junction 3' acceptor
-utr5 5' UTR (including introns)
-utr3 3' UTR (including introns)
-promoter length Promoter region from the gene [length] upstream of TSS
Note: Length may also be in the form "up,down", where
the promoter coordinates will be TSS-up -> TSS+down.
By default the "down" length is zero.
For example, for a gene that starts a chr1:1000 (+), using
"-promoter 200,100" would yield a BED region of:
chr1 800 1100
'''
sys.exit(1)
def gtf_junc_5_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_junc_3_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_genes_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, gene.start, gene.end, gene.gene_name if gene.gene_name else gene.gid, 0, gene.strand]]))
def gtf_promoter_tobed(gtf, promoter_up, promoter_down, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start - promoter_up, txscr.start + promoter_down, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - promoter_down, txscr.end + promoter_up, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tss_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_txs_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tlss_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.start_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[0], txscr.start_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start_codon)
def gtf_utr5_tobed(gtf, out=sys.stdout):
'Outputs all 5\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.start,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.end,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.end,txscr.start_codon))
def gtf_utr3_tobed(gtf, out=sys.stdout):
'Outputs all 3\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.stop_codon,txscr.end) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.start, txscr.stop_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.stop_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start, txscr.stop_codon))
def gtf_tlxs_tobed(gtf, out=sys.stdout):
'Outputs all translational stop sites (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.stop_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[0], txscr.stop_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.stop_codon)
def gtf_exons_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
exons = set()
for txscr in gene.transcripts:
exons.update(txscr.exons)
for i, (start, end) in enumerate(sorted(exons)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/e%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_introns_tobed(gtf, out=sys.stdout):
'Outputs all introns (from all transcripts)'
for gene in gtf.genes:
introns = set()
for txscr in gene.transcripts:
last = None
for start, end in sorted(txscr.exons):
if last:
introns.add((last, start))
last = end
for i, (start, end) in enumerate(sorted(introns)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/i%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_regions_tobed(gtf, out=sys.stdout):
'Outputs all regions (from all transcripts)'
for gene in gtf.genes:
for i, start, end, const, names in gene.regions:
source_count = 0
for n in names.split(','):
source_count += 1
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/%s.%s' % (gene.gene_name, 'const' if const else 'alt', i), source_count, gene.strand]]))
if __name__ == '__main__':
genes = False
exons = False
introns = False
regions = False
tss = False
tlss = False
txs = False
tlxs = False
junc_5 = False
junc_3 = False
utr_5 = False
utr_3 = False
promoter = False
promoter_up = 0
promoter_down = 0
last = None
filename = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
elif last == '-promoter':
if ',' in arg:
promoter_up, promoter_down = [int(x) for x in arg.split(',')]
else:
promoter_up = int(arg)
last = None
elif arg == '-genes':
genes = True
elif arg == '-exons':
exons = True
elif arg == '-introns':
introns = True
elif arg == '-regions':
regions = True
elif arg == '-tss':
tss = True
elif arg == '-tlss':
tlss = True
elif arg == '-txs':
txs = True
elif arg == '-tlxs':
tlxs = True
elif arg == '-utr5':
utr_5 = True
elif arg == '-utr3':
utr_3 = True
elif arg == '-junc5':
junc_5 = True
elif arg == '-junc3':
junc_3 = True
elif arg in ['-promoter']:
promoter = True
last = arg
elif not filename and os.path.exists(arg):
filename = arg
i = 0
for arg in [genes, exons, introns, regions, tss, tlss, txs, tlxs, utr_5, utr_3, junc_5, junc_3, promoter]:
if arg:
i += 1
if i == 0:
usage('You must select one [type] to export.')
elif i > 1:
usage('You must select *only one* [type] to export.')
elif not filename:
usage('Missing input file')
elif promoter and not (promoter_down or promoter_up):
usage('You must specify a valid promoter length!')
gtf = GTF(filename)
if genes:
gtf_genes_tobed(gtf)
elif exons:
gtf_exons_tobed(gtf)
elif introns:
gtf_introns_tobed(gtf)
elif regions:
gtf_regions_tobed(gtf)
elif tss:
gtf_tss_tobed(gtf)
elif tlss:
gtf_tlss_tobed(gtf)
elif txs:
gtf_txs_tobed(gtf)
elif tlxs:
gtf_tlxs_tobed(gtf)
elif utr_5:
gtf_utr5_tobed(gtf)
elif utr_3:
gtf_utr3_tobed(gtf)
elif junc_5:
gtf_junc_5_tobed(gtf)
elif junc_3:
gtf_junc_3_tobed(gtf)
elif promoter:
gtf_promoter_tobed(gtf, promoter_up, promoter_down) | ngsutils/gtf/tobed.py | import sys
import os
from ngsutils.gtf import GTF
def usage(msg=None):
if msg:
print '%s\n' % msg
print __doc__
print '''\
Usage: gtfutils tobed [type] filename.gtf{.gz}
Where type is one of:
-genes The gene from start to end (including introns)
-exons Each annotated exon
-introns Each annotated intron
-regions Export constant / alternative regions (annotated spliced regions)
-tss Transcription start sites (unique)
-txs Transcription stop sites (unique)
-tlss Translational start sites (unique start codons)
-tlxs Translational stop sites (unique stop codons)
-junc5 Splice junction 5' donor
-junc3 Splice junction 3' acceptor
-utr5 5' UTR (including introns)
-utr3 3' UTR (including introns)
-promoter length Promoter region from the gene [length] upstream of TSS
Note: Length may also be in the form "up,down", where
the promoter coordinates will be TSS-up -> TSS+down.
By default the "down" length is zero.
For example, for a gene that starts a chr1:1000 (+), using
"-promoter 200,100" would yield a BED region of:
chr1 800 1100
'''
sys.exit(1)
def gtf_junc_5_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_junc_3_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
for j, (start, end) in enumerate(txscr.exons):
if j == len(txscr.exons) - 1:
continue
if not end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, end, end + 1, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
else:
for j, (start, end) in enumerate(txscr.exons):
if j == 0:
continue
if not start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start - 1, start, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(end)
def gtf_genes_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, gene.start, gene.end, gene.gene_name if gene.gene_name else gene.gid, 0, gene.strand]]))
def gtf_promoter_tobed(gtf, promoter_up, promoter_down, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start - promoter_up, txscr.start + promoter_down, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - promoter_down, txscr.end + promoter_up, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tss_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '+':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_txs_tobed(gtf, out=sys.stdout):
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if gene.strand == '-':
if not txscr.start in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start + 3, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start)
else:
if not txscr.end in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.end - 3, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.end)
def gtf_tlss_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.start_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[0], txscr.start_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.start_codon)
def gtf_utr5_tobed(gtf, out=sys.stdout):
'Outputs all 5\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.start,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.start_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.end,txscr.start_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.end,txscr.start_codon))
def gtf_utr3_tobed(gtf, out=sys.stdout):
'Outputs all 3\'UTR regions (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if gene.strand == '+':
if not (txscr.stop_codon,txscr.end) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[1]+1, txscr.end, '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start,txscr.start_codon))
else:
if not (txscr.start, txscr.stop_codon) in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.start, txscr.stop_codon[0], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add((txscr.start, txscr.stop_codon))
def gtf_tlxs_tobed(gtf, out=sys.stdout):
'Outputs all translational stop sites (from all transcripts)'
for gene in gtf.genes:
sites = set()
for i, txscr in enumerate(gene.transcripts):
if not txscr.has_cds:
continue
if not txscr.stop_codon in sites:
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, txscr.stop_codon[0], txscr.stop_codon[1], '%s/%s' % (gene.gene_name, i), 0, gene.strand]]))
sites.add(txscr.stop_codon)
def gtf_exons_tobed(gtf, out=sys.stdout):
'Outputs all exons (from all transcripts)'
for gene in gtf.genes:
exons = set()
for txscr in gene.transcripts:
exons.update(txscr.exons)
for i, (start, end) in enumerate(sorted(exons)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/e%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_introns_tobed(gtf, out=sys.stdout):
'Outputs all introns (from all transcripts)'
for gene in gtf.genes:
introns = set()
for txscr in gene.transcripts:
last = None
for start, end in sorted(txscr.exons):
if last:
introns.add((last, start))
last = end
for i, (start, end) in enumerate(sorted(introns)):
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/i%s' % (gene.gene_name, i + 1), 0, gene.strand]]))
def gtf_regions_tobed(gtf, out=sys.stdout):
'Outputs all regions (from all transcripts)'
for gene in gtf.genes:
for i, start, end, const, names in gene.regions:
source_count = 0
for n in names.split(','):
source_count += 1
out.write('%s\n' % '\t'.join([str(x) for x in [gene.chrom, start, end, '%s/%s.%s' % (gene.gene_name, 'const' if const else 'alt', i), source_count, gene.strand]]))
if __name__ == '__main__':
genes = False
exons = False
introns = False
regions = False
tss = False
tlss = False
txs = False
tlxs = False
junc_5 = False
junc_3 = False
utr_5 = False
utr_3 = False
promoter = False
promoter_up = 0
promoter_down = 0
last = None
filename = None
for arg in sys.argv[1:]:
if arg == '-h':
usage()
elif last == '-promoter':
if ',' in arg:
promoter_up, promoter_down = [int(x) for x in arg.split(',')]
else:
promoter_up = int(arg)
last = None
elif arg == '-genes':
genes = True
elif arg == '-exons':
exons = True
elif arg == '-introns':
introns = True
elif arg == '-regions':
regions = True
elif arg == '-tss':
tss = True
elif arg == '-tlss':
tlss = True
elif arg == '-txs':
txs = True
elif arg == '-tlxs':
tlxs = True
elif arg == '-utr5':
utr_5 = True
elif arg == '-utr3':
utr_3 = True
elif arg == '-junc5':
junc_5 = True
elif arg == '-junc3':
junc_3 = True
elif arg in ['-promoter']:
promoter = True
last = arg
elif not filename and os.path.exists(arg):
filename = arg
i = 0
for arg in [genes, exons, introns, regions, tss, tlss, txs, tlxs, utr_5, utr_3, junc_5, junc_3, promoter]:
if arg:
i += 1
if i == 0:
usage('You must select one [type] to export.')
elif i > 1:
usage('You must select *only one* [type] to export.')
elif not filename:
usage('Missing input file')
elif promoter and not (promoter_down or promoter_up):
usage('You must specify a valid promoter length!')
gtf = GTF(filename)
if genes:
gtf_genes_tobed(gtf)
elif exons:
gtf_exons_tobed(gtf)
elif introns:
gtf_introns_tobed(gtf)
elif regions:
gtf_regions_tobed(gtf)
elif tss:
gtf_tss_tobed(gtf)
elif tlss:
gtf_tlss_tobed(gtf)
elif txs:
gtf_txs_tobed(gtf)
elif tlxs:
gtf_tlxs_tobed(gtf)
elif utr_5:
gtf_utr5_tobed(gtf)
elif utr_3:
gtf_utr3_tobed(gtf)
elif junc_5:
gtf_junc_5_tobed(gtf)
elif junc_3:
gtf_junc_3_tobed(gtf)
elif promoter:
gtf_promoter_tobed(gtf, promoter_up, promoter_down) | 0.203985 | 0.370112 |
import inspect
import torch
import warnings
from pd_mesh_net.models import (DualPrimalMeshClassifier,
DualPrimalMeshSegmenter,
DualPrimalUNetMeshSegmenter)
def create_model(model_name, should_initialize_weights, **model_params):
r"""Creates an instance of the input model with the input parameters.
Args:
model_name (str): Name that identifies the model. Valid values are:
`mesh_classifier`, 'mesh_segmenter', 'unet_mesh_segmenter'.
should_initialize_weights (bool): Whether or not to perform weights
initialization. If True, parameters `weight_initialization_type` and
`weight_initialization_gain` are also required.
...
Optional parameters of the models.
Returns:
model (torch.nn.Module): The instance of the model with the input
parameters.
"""
if (model_name == 'mesh_classifier'):
model_class = DualPrimalMeshClassifier
elif (model_name == 'mesh_segmenter'):
model_class = DualPrimalMeshSegmenter
elif (model_name == 'unet_mesh_segmenter'):
model_class = DualPrimalUNetMeshSegmenter
else:
raise KeyError(
f"No known model can be created with the name '{model_name}'.")
# Only keep the valid model parameters.
valid_model_params = {}
possible_valid_params = [
p for p in inspect.getfullargspec(model_class).args
if p not in ['self']
]
for param, param_value in model_params.items():
if (param in possible_valid_params):
valid_model_params[param] = param_value
else:
if (param not in [
'weight_initialization_type', 'weight_initialization_gain'
]):
warnings.warn(
f"Ignoring parameter '{param}', invalid for model "
f"'{model_class.__name__}'.")
# Create model.
model = model_class(**valid_model_params)
# Optionally initialize model weights.
if (should_initialize_weights and
'weight_initialization_type' in model_params and
'weight_initialization_gain' in model_params):
initialize_model_weights(
model=model,
initialization_type=model_params['weight_initialization_type'],
initialization_gain=model_params['weight_initialization_gain'])
return model
def initialize_model_weights(model, initialization_type, initialization_gain):
""" Initializes the weights of the input network.
Modified from MeshCNN (https://github.com/ranahanocka/MeshCNN/).
Args:
model (torch.nn.Module): Model used.
initialization_type (str): One of the following:
- 'kaiming': 'He initialization' is used, cf.,
https://pytorch.org/docs/stable/nn.init.html.
- 'normal': Weights are drawn from a normal distribution with mean
0 and variance `init_gain`.
- 'orthogonal': Weights are initialization with a (semi) orthogonal
matrix, with scaling factor `init_gain`, cf.
https://pytorch.org/docs/stable/nn.init.html.
- 'xavier': 'Glorot initialization' is used, with gain `init_gain`,
cf. https://pytorch.org/docs/stable/nn.init.html.
initialization_gain (float): Factor for weight initialization, cf.
argument `initialization_type`.
Returns:
None.
"""
def initialize_module(module):
""" Initializes the weights of the linear and batch normalization
layers. Convolutional layers are automatically initialized (as they are
derived classed of `torch_geometric.nn.conv.GATConv`).
Modified from MeshCNN (https://github.com/ranahanocka/MeshCNN/).
Args:
module (torch.nn.Module): Submodule of the network to which the
initialization function should be applied.
Returns:
None.
"""
class_name = module.__class__.__name__
if (hasattr(module, 'weight') and class_name.find('Linear') != -1):
if (initialization_type == 'normal'):
torch.nn.init.normal_(module.weight.data, 0.0,
initialization_gain)
elif (initialization_type == 'xavier'):
torch.nn.init.xavier_normal_(module.weight.data,
gain=initialization_gain)
elif (initialization_type == 'kaiming'):
torch.nn.init.kaiming_normal_(module.weight.data,
a=0,
mode='fan_in')
elif (initialization_type == 'orthogonal'):
torch.nn.init.orthogonal_(module.weight.data,
gain=initialization_gain)
else:
raise NotImplementedError(
f"Initialization method {initialization_type} is not "
"implemented.")
elif (class_name.find('BatchNorm') != -1):
torch.nn.init.normal_(module.weight.data, 1.0, initialization_gain)
torch.nn.init.constant_(module.bias.data, 0.0)
# Recursively apply the initialization function to all the submodules in the
# network.
model.apply(initialize_module) | pd_mesh_net/utils/models.py | import inspect
import torch
import warnings
from pd_mesh_net.models import (DualPrimalMeshClassifier,
DualPrimalMeshSegmenter,
DualPrimalUNetMeshSegmenter)
def create_model(model_name, should_initialize_weights, **model_params):
r"""Creates an instance of the input model with the input parameters.
Args:
model_name (str): Name that identifies the model. Valid values are:
`mesh_classifier`, 'mesh_segmenter', 'unet_mesh_segmenter'.
should_initialize_weights (bool): Whether or not to perform weights
initialization. If True, parameters `weight_initialization_type` and
`weight_initialization_gain` are also required.
...
Optional parameters of the models.
Returns:
model (torch.nn.Module): The instance of the model with the input
parameters.
"""
if (model_name == 'mesh_classifier'):
model_class = DualPrimalMeshClassifier
elif (model_name == 'mesh_segmenter'):
model_class = DualPrimalMeshSegmenter
elif (model_name == 'unet_mesh_segmenter'):
model_class = DualPrimalUNetMeshSegmenter
else:
raise KeyError(
f"No known model can be created with the name '{model_name}'.")
# Only keep the valid model parameters.
valid_model_params = {}
possible_valid_params = [
p for p in inspect.getfullargspec(model_class).args
if p not in ['self']
]
for param, param_value in model_params.items():
if (param in possible_valid_params):
valid_model_params[param] = param_value
else:
if (param not in [
'weight_initialization_type', 'weight_initialization_gain'
]):
warnings.warn(
f"Ignoring parameter '{param}', invalid for model "
f"'{model_class.__name__}'.")
# Create model.
model = model_class(**valid_model_params)
# Optionally initialize model weights.
if (should_initialize_weights and
'weight_initialization_type' in model_params and
'weight_initialization_gain' in model_params):
initialize_model_weights(
model=model,
initialization_type=model_params['weight_initialization_type'],
initialization_gain=model_params['weight_initialization_gain'])
return model
def initialize_model_weights(model, initialization_type, initialization_gain):
""" Initializes the weights of the input network.
Modified from MeshCNN (https://github.com/ranahanocka/MeshCNN/).
Args:
model (torch.nn.Module): Model used.
initialization_type (str): One of the following:
- 'kaiming': 'He initialization' is used, cf.,
https://pytorch.org/docs/stable/nn.init.html.
- 'normal': Weights are drawn from a normal distribution with mean
0 and variance `init_gain`.
- 'orthogonal': Weights are initialization with a (semi) orthogonal
matrix, with scaling factor `init_gain`, cf.
https://pytorch.org/docs/stable/nn.init.html.
- 'xavier': 'Glorot initialization' is used, with gain `init_gain`,
cf. https://pytorch.org/docs/stable/nn.init.html.
initialization_gain (float): Factor for weight initialization, cf.
argument `initialization_type`.
Returns:
None.
"""
def initialize_module(module):
""" Initializes the weights of the linear and batch normalization
layers. Convolutional layers are automatically initialized (as they are
derived classed of `torch_geometric.nn.conv.GATConv`).
Modified from MeshCNN (https://github.com/ranahanocka/MeshCNN/).
Args:
module (torch.nn.Module): Submodule of the network to which the
initialization function should be applied.
Returns:
None.
"""
class_name = module.__class__.__name__
if (hasattr(module, 'weight') and class_name.find('Linear') != -1):
if (initialization_type == 'normal'):
torch.nn.init.normal_(module.weight.data, 0.0,
initialization_gain)
elif (initialization_type == 'xavier'):
torch.nn.init.xavier_normal_(module.weight.data,
gain=initialization_gain)
elif (initialization_type == 'kaiming'):
torch.nn.init.kaiming_normal_(module.weight.data,
a=0,
mode='fan_in')
elif (initialization_type == 'orthogonal'):
torch.nn.init.orthogonal_(module.weight.data,
gain=initialization_gain)
else:
raise NotImplementedError(
f"Initialization method {initialization_type} is not "
"implemented.")
elif (class_name.find('BatchNorm') != -1):
torch.nn.init.normal_(module.weight.data, 1.0, initialization_gain)
torch.nn.init.constant_(module.bias.data, 0.0)
# Recursively apply the initialization function to all the submodules in the
# network.
model.apply(initialize_module) | 0.917474 | 0.467271 |
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
def get_query_nn(knn,
embeds,
index,
q_embed,
searchK=None,
gold_idxs=None,
type_idx_mapping=None):
"""
Parameters
----------
knn : int
the number of nearest-neighbours to return
embeds : ndarray
matrix of embeddings
index : faiss
faiss index of the embeddings
q_embed : ndarray
2-D array containing the query embedding
searchK: int
optional parameter, the exact number of nearest-neighbours to retrieve and score
gold_idxs : array
optional parameter, list of golden cui indexes
type_idx_mapping : array
optional parameter, list mapping type-specific indexes to the indexes of the full dictionary
Returns
-------
nn_idxs : array
nearest neighbour indices for the query, sorted in descending order of scores
scores : array
similarity scores for each nearest neighbour, sorted in descending order
"""
# To accomodate the approximate-nature of the knn procedure, retrieve more samples and then filter down
k = searchK if searchK is not None else max(16, 2*knn)
# Find k nearest neighbours
_, nn_idxs = index.search(q_embed, k)
nn_idxs = nn_idxs.astype(np.int64).flatten()
if type_idx_mapping is not None:
nn_idxs = type_idx_mapping[nn_idxs]
nn_embeds = torch.tensor(embeds[nn_idxs]).cuda()
# Compute query-candidate similarity scores
scores = torch.flatten(
torch.mm(torch.tensor(q_embed).cuda(), nn_embeds.T)).cpu()
# Sort the candidates by descending order of scores
nn_idxs, scores = zip(
*sorted(zip(nn_idxs, scores), key=lambda x: -x[1]))
if gold_idxs is not None:
# Calculate the knn index at which the gold cui is found (-1 if not found)
for topk,i in enumerate(nn_idxs):
if i in gold_idxs:
break
topk = -1
# Return only the top k neighbours, and the recall index
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn]), topk
# Return only the top k neighbours
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn])
def partition_graph(graph, n_entities, directed, return_clusters=False):
"""
Parameters
----------
graph : dict
object containing rows, cols, data, and shape of the entity-mention joint graph
n_entities : int
number of entities in the dictionary
directed : bool
whether the graph construction should be directed or undirected
return_clusters : bool
flag to indicate if clusters need to be returned from the partition
Returns
-------
partitioned_graph : coo_matrix
partitioned graph with each mention connected to only one entity
clusters : dict
(optional) contains arrays of connected component indices of the graph
"""
rows, cols, data, shape = graph['rows'], graph['cols'], graph['data'], graph['shape']
rows, cols, data = cluster_linking_partition(
rows,
cols,
data,
n_entities,
directed
)
# Construct the partitioned graph
partitioned_graph = coo_matrix(
(data, (rows, cols)), shape=shape)
if return_clusters:
# Get an array of the graph with each index marked with the component label that it is connected to
_, cc_labels = connected_components(
csgraph=partitioned_graph,
directed=directed,
return_labels=True)
# Store clusters of indices marked with labels with at least 2 connected components
unique_cc_labels, cc_sizes = np.unique(cc_labels, return_counts=True)
filtered_labels = unique_cc_labels[cc_sizes >= 2]
clusters = defaultdict(list)
for i, cc_label in enumerate(cc_labels):
if cc_label in filtered_labels:
clusters[cc_label].append(i)
return partitioned_graph, clusters
return partitioned_graph
def analyzeClusters(clusters, dictionary, queries, knn):
"""
Parameters
----------
clusters : dict
contains arrays of connected component indices of a graph
dictionary : ndarray
entity dictionary to evaluate
queries : ndarray
mention queries to evaluate
knn : int
the number of nearest-neighbour mention candidates considered
Returns
-------
results : dict
Contains n_entities, n_mentions, knn_mentions, accuracy, failure[], success[]
"""
n_entities = len(dictionary)
n_mentions = len(queries)
results = {
'n_entities': n_entities,
'n_mentions': n_mentions,
'knn_mentions': knn,
'accuracy': 0,
'failure': [],
'success': []
}
_debug_n_mens_evaluated, _debug_clusters_wo_entities, _debug_clusters_w_mult_entities = 0, 0, 0
print("Analyzing clusters...")
for cluster in clusters.values():
# The lowest value in the cluster should always be the entity
pred_entity_idx = cluster[0]
# Track the graph index of the entity in the cluster
pred_entity_idxs = [pred_entity_idx]
if pred_entity_idx >= n_entities:
# If the first element is a mention, then the cluster does not have an entity
_debug_clusters_wo_entities += 1
continue
pred_entity = dictionary[pred_entity_idx]
pred_entity_cuis = [str(pred_entity['cui'])]
_debug_tracked_mult_entities = False
for i in range(1, len(cluster)):
men_idx = cluster[i] - n_entities
if men_idx < 0:
# If elements after the first are entities, then the cluster has multiple entities
if not _debug_tracked_mult_entities:
_debug_clusters_w_mult_entities += 1
_debug_tracked_mult_entities = True
# Track the graph indices of each entity in the cluster
pred_entity_idxs.append(cluster[i])
# Predict based on all entities in the cluster
pred_entity_cuis += list(set([dictionary[cluster[i]]['cui']]) - set(pred_entity_cuis))
continue
_debug_n_mens_evaluated += 1
men_query = queries[men_idx]
men_golden_cuis = list(map(str, men_query['label_cuis']))
report_obj = {
'mention_id': men_query['mention_id'],
'mention_name': men_query['mention_name'],
'mention_gold_cui': '|'.join(men_golden_cuis),
'mention_gold_cui_name': '|'.join([dictionary[i]['title'] for i in men_query['label_idxs'][:men_query['n_labels']]]),
'predicted_name': '|'.join([d['title'] for d in [dictionary[i] for i in pred_entity_idxs]]),
'predicted_cui': '|'.join(pred_entity_cuis),
}
# Correct prediction
if not set(pred_entity_cuis).isdisjoint(men_golden_cuis):
results['accuracy'] += 1
results['success'].append(report_obj)
# Incorrect prediction
else:
results['failure'].append(report_obj)
results['accuracy'] = f"{results['accuracy'] / float(_debug_n_mens_evaluated) * 100} %"
print(f"Accuracy = {results['accuracy']}")
# Run sanity checks
assert n_mentions == _debug_n_mens_evaluated
assert _debug_clusters_wo_entities == 0
assert _debug_clusters_w_mult_entities == 0
return results
def main(params):
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"], 'log-eval')
pickle_src_path = params["pickle_src_path"]
if pickle_src_path is None or not os.path.exists(pickle_src_path):
pickle_src_path = output_path
embed_data_path = params["embed_data_path"]
if embed_data_path is None or not os.path.exists(embed_data_path):
embed_data_path = output_path
# Init model
reranker = BiEncoderRanker(params)
reranker.model.eval()
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
knn = params["knn"]
use_types = params["use_types"]
data_split = params["data_split"] # Default = "test"
# Load test data
entity_dictionary_loaded = False
test_dictionary_pkl_path = os.path.join(pickle_src_path, 'test_dictionary.pickle')
test_tensor_data_pkl_path = os.path.join(pickle_src_path, 'test_tensor_data.pickle')
test_mention_data_pkl_path = os.path.join(pickle_src_path, 'test_mention_data.pickle')
if os.path.isfile(test_dictionary_pkl_path):
print("Loading stored processed entity dictionary...")
with open(test_dictionary_pkl_path, 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
entity_dictionary_loaded = True
if os.path.isfile(test_tensor_data_pkl_path) and os.path.isfile(test_mention_data_pkl_path):
print("Loading stored processed test data...")
with open(test_tensor_data_pkl_path, 'rb') as read_handle:
test_tensor_data = pickle.load(read_handle)
with open(test_mention_data_pkl_path, 'rb') as read_handle:
mention_data = pickle.load(read_handle)
else:
test_samples = utils.read_dataset(data_split, params["data_path"])
if not entity_dictionary_loaded:
with open(os.path.join(params["data_path"], 'dictionary.pickle'), 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
# Check if dataset has multiple ground-truth labels
mult_labels = "labels" in test_samples[0].keys()
if params["filter_unlabeled"]:
# Filter samples without gold entities
test_samples = list(filter(lambda sample: (len(sample["labels"]) > 0) if mult_labels else (sample["label"] is not None), test_samples))
logger.info("Read %d test samples." % len(test_samples))
mention_data, test_dictionary, test_tensor_data = data_process.process_mention_data(
test_samples,
test_dictionary,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
multi_label_key="labels" if mult_labels else None,
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
knn=knn,
dictionary_processed=entity_dictionary_loaded
)
print("Saving processed test data...")
if not entity_dictionary_loaded:
with open(test_dictionary_pkl_path, 'wb') as write_handle:
pickle.dump(test_dictionary, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_tensor_data_pkl_path, 'wb') as write_handle:
pickle.dump(test_tensor_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_mention_data_pkl_path, 'wb') as write_handle:
pickle.dump(mention_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
# Store test dictionary token ids
test_dict_vecs = torch.tensor(
list(map(lambda x: x['ids'], test_dictionary)), dtype=torch.long)
# Store test mention token ids
test_men_vecs = test_tensor_data[:][0]
n_entities = len(test_dict_vecs)
n_mentions = len(test_tensor_data)
# Values of k to run the evaluation against
knn_vals = [0] + [2**i for i in range(int(math.log(knn, 2)) + 1)]
# Store the maximum evaluation k
max_knn = knn_vals[-1]
time_start = time.time()
# Check if graphs are already built
graph_path = os.path.join(output_path, 'graphs.pickle')
if not params['only_recall'] and os.path.isfile(graph_path):
print("Loading stored joint graphs...")
with open(graph_path, 'rb') as read_handle:
joint_graphs = pickle.load(read_handle)
else:
# Initialize graphs to store mention-mention and mention-entity similarity score edges;
# Keyed on k, the number of nearest mentions retrieved
joint_graphs = {}
for k in knn_vals:
joint_graphs[k] = {
'rows': np.array([]),
'cols': np.array([]),
'data': np.array([]),
'shape': (n_entities+n_mentions, n_entities+n_mentions)
}
# Check and load stored embedding data
embed_data_path = os.path.join(embed_data_path, 'embed_data.t7')
embed_data = None
if os.path.isfile(embed_data_path):
embed_data = torch.load(embed_data_path)
if use_types:
if embed_data is not None:
logger.info('Loading stored embeddings and computing indexes')
dict_embeds = embed_data['dict_embeds']
if 'dict_idxs_by_type' in embed_data:
dict_idxs_by_type = embed_data['dict_idxs_by_type']
else:
dict_idxs_by_type = data_process.get_idxs_by_type(test_dictionary)
dict_indexes = data_process.get_index_from_embeds(dict_embeds, dict_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
men_embeds = embed_data['men_embeds']
if 'men_idxs_by_type' in embed_data:
men_idxs_by_type = embed_data['men_idxs_by_type']
else:
men_idxs_by_type = data_process.get_idxs_by_type(mention_data)
men_indexes = data_process.get_index_from_embeds(men_embeds, men_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info("Dictionary: Embedding and building index")
dict_embeds, dict_indexes, dict_idxs_by_type = data_process.embed_and_index(reranker, test_dict_vecs, encoder_type="candidate", n_gpu=n_gpu, corpus=test_dictionary, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
logger.info("Queries: Embedding and building index")
men_embeds, men_indexes, men_idxs_by_type = data_process.embed_and_index(reranker, test_men_vecs, encoder_type="context", n_gpu=n_gpu, corpus=mention_data, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
else:
if embed_data is not None:
logger.info('Loading stored embeddings and computing indexes')
dict_embeds = embed_data['dict_embeds']
dict_index = data_process.get_index_from_embeds(dict_embeds, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
men_embeds = embed_data['men_embeds']
men_index = data_process.get_index_from_embeds(men_embeds, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info("Dictionary: Embedding and building index")
dict_embeds, dict_index = data_process.embed_and_index(
reranker, test_dict_vecs, 'candidate', n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
logger.info("Queries: Embedding and building index")
men_embeds, men_index = data_process.embed_and_index(
reranker, test_men_vecs, 'context', n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
# Save computed embedding data if not loaded from disk
if embed_data is None:
embed_data = {}
embed_data['dict_embeds'] = dict_embeds
embed_data['men_embeds'] = men_embeds
if use_types:
embed_data['dict_idxs_by_type'] = dict_idxs_by_type
embed_data['men_idxs_by_type'] = men_idxs_by_type
# NOTE: Cannot pickle faiss index because it is a SwigPyObject
torch.save(embed_data, embed_data_path, pickle_protocol=pickle.HIGHEST_PROTOCOL)
recall_accuracy = {2**i: 0 for i in range(int(math.log(params['recall_k'], 2)) + 1)}
recall_idxs = [0.]*params['recall_k']
logger.info("Starting KNN search...")
# Fetch recall_k (default 16) knn entities for all mentions
# Fetch (k+1) NN mention candidates
if not use_types:
nn_ent_dists, nn_ent_idxs = dict_index.search(men_embeds, params['recall_k'])
nn_men_dists, nn_men_idxs = men_index.search(men_embeds, max_knn + 1)
else:
nn_ent_idxs = np.zeros((len(men_embeds), params['recall_k']))
nn_ent_dists = np.zeros((len(men_embeds), params['recall_k']), dtype='float64')
nn_men_idxs = np.zeros((len(men_embeds), max_knn + 1))
nn_men_dists = np.zeros((len(men_embeds), max_knn + 1), dtype='float64')
for entity_type in men_indexes:
men_embeds_by_type = men_embeds[men_idxs_by_type[entity_type]]
nn_ent_dists_by_type, nn_ent_idxs_by_type = dict_indexes[entity_type].search(men_embeds_by_type, params['recall_k'])
nn_men_dists_by_type, nn_men_idxs_by_type = men_indexes[entity_type].search(men_embeds_by_type, max_knn + 1)
nn_ent_idxs_by_type = np.array(list(map(lambda x: dict_idxs_by_type[entity_type][x], nn_ent_idxs_by_type)))
nn_men_idxs_by_type = np.array(list(map(lambda x: men_idxs_by_type[entity_type][x], nn_men_idxs_by_type)))
for i,idx in enumerate(men_idxs_by_type[entity_type]):
nn_ent_idxs[idx] = nn_ent_idxs_by_type[i]
nn_ent_dists[idx] = nn_ent_dists_by_type[i]
nn_men_idxs[idx] = nn_men_idxs_by_type[i]
nn_men_dists[idx] = nn_men_dists_by_type[i]
logger.info("Search finished")
logger.info('Building graphs')
# Find the most similar entity and k-nn mentions for each mention query
for men_query_idx, men_embed in enumerate(tqdm(men_embeds, total=len(men_embeds), desc="Building graph")):
# Get nearest entity candidate
dict_cand_idx = nn_ent_idxs[men_query_idx][0]
dict_cand_score = nn_ent_dists[men_query_idx][0]
# Compute recall metric
gold_idxs = mention_data[men_query_idx]["label_idxs"][:mention_data[men_query_idx]["n_labels"]]
recall_idx = np.argwhere(nn_ent_idxs[men_query_idx] == gold_idxs[0])
if len(recall_idx) != 0:
recall_idx = int(recall_idx)
recall_idxs[recall_idx] += 1.
for recall_k in recall_accuracy:
if recall_idx < recall_k:
recall_accuracy[recall_k] += 1.
if not params['only_recall']:
# Filter candidates to remove mention query and keep only the top k candidates
men_cand_idxs = nn_men_idxs[men_query_idx]
men_cand_scores = nn_men_dists[men_query_idx]
filter_mask = men_cand_idxs != men_query_idx
men_cand_idxs, men_cand_scores = men_cand_idxs[filter_mask][:max_knn], men_cand_scores[filter_mask][:max_knn]
# Add edges to the graphs
for k in joint_graphs:
joint_graph = joint_graphs[k]
# Add mention-entity edge
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]) # Mentions added at an offset of maximum entities
joint_graph['cols'] = np.append(
joint_graph['cols'], dict_cand_idx)
joint_graph['data'] = np.append(
joint_graph['data'], dict_cand_score)
if k > 0:
# Add mention-mention edges
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]*len(men_cand_idxs[:k]))
joint_graph['cols'] = np.append(
joint_graph['cols'], n_entities+men_cand_idxs[:k])
joint_graph['data'] = np.append(
joint_graph['data'], men_cand_scores[:k])
# Compute and print recall metric
recall_idx_mode = np.argmax(recall_idxs)
recall_idx_mode_prop = recall_idxs[recall_idx_mode]/np.sum(recall_idxs)
logger.info(f"""
Recall metrics (for {len(men_embeds)} queries):
---------------""")
logger.info(f"highest recall idx = {recall_idx_mode} ({recall_idxs[recall_idx_mode]}/{np.sum(recall_idxs)} = {recall_idx_mode_prop})")
for recall_k in recall_accuracy:
recall_accuracy[recall_k] /= len(men_embeds)
logger.info(f"recall@{recall_k} = {recall_accuracy[recall_k]}")
if params['only_recall']:
exit()
# Pickle the graphs
print("Saving joint graphs...")
with open(graph_path, 'wb') as write_handle:
pickle.dump(joint_graphs, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
graph_mode = params.get('graph_mode', None)
result_overview = {
'n_entities': n_entities,
'n_mentions': n_mentions
}
results = {}
if graph_mode is None or graph_mode not in ['directed', 'undirected']:
results['directed'] = []
results['undirected'] = []
else:
results[graph_mode] = []
knn_fetch_time = time.time() - time_start
graph_processing_time = time.time()
n_graphs_processed = 0.
for mode in results:
print(f'\nEvaluation mode: {mode.upper()}')
for k in joint_graphs:
if k <= knn:
print(f"\nGraph (k={k}):")
# Partition graph based on cluster-linking constraints
partitioned_graph, clusters = partition_graph(
joint_graphs[k], n_entities, mode == 'directed', return_clusters=True)
# Infer predictions from clusters
result = analyzeClusters(clusters, test_dictionary, mention_data, k)
# Store result
results[mode].append(result)
n_graphs_processed += 1
avg_graph_processing_time = (time.time() - graph_processing_time) / n_graphs_processed
avg_per_graph_time = (knn_fetch_time + avg_graph_processing_time) / 60
execution_time = (time.time() - time_start) / 60
# Store results
output_file_name = os.path.join(
output_path, f"eval_results_{__import__('calendar').timegm(__import__('time').gmtime())}")
try:
for recall_k in recall_accuracy:
result_overview[f'recall@{recall_k}'] = recall_accuracy[recall_k]
except:
logger.info("Recall data not available since graphs were loaded from disk")
for mode in results:
mode_results = results[mode]
result_overview[mode] = {}
for r in mode_results:
k = r['knn_mentions']
result_overview[mode][f'accuracy@knn{k}'] = r['accuracy']
logger.info(f"{mode} accuracy@knn{k} = {r['accuracy']}")
output_file = f'{output_file_name}-{mode}-{k}.json'
with open(output_file, 'w') as f:
json.dump(r, f, indent=2)
print(f"\nPredictions ({mode}) @knn{k} saved at: {output_file}")
with open(f'{output_file_name}.json', 'w') as f:
json.dump(result_overview, f, indent=2)
print(f"\nPredictions overview saved at: {output_file_name}.json")
logger.info("\nThe avg. per graph evaluation time is {} minutes\n".format(avg_per_graph_time))
logger.info("\nThe total evaluation took {} minutes\n".format(execution_time))
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
main(args.__dict__) | blink/biencoder/eval_cluster_linking.py |
import os
import json
import math
import time
import torch
from torch.utils.data import (DataLoader, SequentialSampler)
import numpy as np
from tqdm import tqdm
import pickle
from scipy.sparse import coo_matrix
from scipy.sparse.csgraph import connected_components
from special_partition.special_partition import cluster_linking_partition
from collections import defaultdict
import blink.biencoder.data_process_mult as data_process
import blink.candidate_ranking.utils as utils
from blink.common.params import BlinkParser
from blink.biencoder.biencoder import BiEncoderRanker
from IPython import embed
def get_query_nn(knn,
embeds,
index,
q_embed,
searchK=None,
gold_idxs=None,
type_idx_mapping=None):
"""
Parameters
----------
knn : int
the number of nearest-neighbours to return
embeds : ndarray
matrix of embeddings
index : faiss
faiss index of the embeddings
q_embed : ndarray
2-D array containing the query embedding
searchK: int
optional parameter, the exact number of nearest-neighbours to retrieve and score
gold_idxs : array
optional parameter, list of golden cui indexes
type_idx_mapping : array
optional parameter, list mapping type-specific indexes to the indexes of the full dictionary
Returns
-------
nn_idxs : array
nearest neighbour indices for the query, sorted in descending order of scores
scores : array
similarity scores for each nearest neighbour, sorted in descending order
"""
# To accomodate the approximate-nature of the knn procedure, retrieve more samples and then filter down
k = searchK if searchK is not None else max(16, 2*knn)
# Find k nearest neighbours
_, nn_idxs = index.search(q_embed, k)
nn_idxs = nn_idxs.astype(np.int64).flatten()
if type_idx_mapping is not None:
nn_idxs = type_idx_mapping[nn_idxs]
nn_embeds = torch.tensor(embeds[nn_idxs]).cuda()
# Compute query-candidate similarity scores
scores = torch.flatten(
torch.mm(torch.tensor(q_embed).cuda(), nn_embeds.T)).cpu()
# Sort the candidates by descending order of scores
nn_idxs, scores = zip(
*sorted(zip(nn_idxs, scores), key=lambda x: -x[1]))
if gold_idxs is not None:
# Calculate the knn index at which the gold cui is found (-1 if not found)
for topk,i in enumerate(nn_idxs):
if i in gold_idxs:
break
topk = -1
# Return only the top k neighbours, and the recall index
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn]), topk
# Return only the top k neighbours
return np.array(nn_idxs[:knn], dtype=np.int64), np.array(scores[:knn])
def partition_graph(graph, n_entities, directed, return_clusters=False):
"""
Parameters
----------
graph : dict
object containing rows, cols, data, and shape of the entity-mention joint graph
n_entities : int
number of entities in the dictionary
directed : bool
whether the graph construction should be directed or undirected
return_clusters : bool
flag to indicate if clusters need to be returned from the partition
Returns
-------
partitioned_graph : coo_matrix
partitioned graph with each mention connected to only one entity
clusters : dict
(optional) contains arrays of connected component indices of the graph
"""
rows, cols, data, shape = graph['rows'], graph['cols'], graph['data'], graph['shape']
rows, cols, data = cluster_linking_partition(
rows,
cols,
data,
n_entities,
directed
)
# Construct the partitioned graph
partitioned_graph = coo_matrix(
(data, (rows, cols)), shape=shape)
if return_clusters:
# Get an array of the graph with each index marked with the component label that it is connected to
_, cc_labels = connected_components(
csgraph=partitioned_graph,
directed=directed,
return_labels=True)
# Store clusters of indices marked with labels with at least 2 connected components
unique_cc_labels, cc_sizes = np.unique(cc_labels, return_counts=True)
filtered_labels = unique_cc_labels[cc_sizes >= 2]
clusters = defaultdict(list)
for i, cc_label in enumerate(cc_labels):
if cc_label in filtered_labels:
clusters[cc_label].append(i)
return partitioned_graph, clusters
return partitioned_graph
def analyzeClusters(clusters, dictionary, queries, knn):
"""
Parameters
----------
clusters : dict
contains arrays of connected component indices of a graph
dictionary : ndarray
entity dictionary to evaluate
queries : ndarray
mention queries to evaluate
knn : int
the number of nearest-neighbour mention candidates considered
Returns
-------
results : dict
Contains n_entities, n_mentions, knn_mentions, accuracy, failure[], success[]
"""
n_entities = len(dictionary)
n_mentions = len(queries)
results = {
'n_entities': n_entities,
'n_mentions': n_mentions,
'knn_mentions': knn,
'accuracy': 0,
'failure': [],
'success': []
}
_debug_n_mens_evaluated, _debug_clusters_wo_entities, _debug_clusters_w_mult_entities = 0, 0, 0
print("Analyzing clusters...")
for cluster in clusters.values():
# The lowest value in the cluster should always be the entity
pred_entity_idx = cluster[0]
# Track the graph index of the entity in the cluster
pred_entity_idxs = [pred_entity_idx]
if pred_entity_idx >= n_entities:
# If the first element is a mention, then the cluster does not have an entity
_debug_clusters_wo_entities += 1
continue
pred_entity = dictionary[pred_entity_idx]
pred_entity_cuis = [str(pred_entity['cui'])]
_debug_tracked_mult_entities = False
for i in range(1, len(cluster)):
men_idx = cluster[i] - n_entities
if men_idx < 0:
# If elements after the first are entities, then the cluster has multiple entities
if not _debug_tracked_mult_entities:
_debug_clusters_w_mult_entities += 1
_debug_tracked_mult_entities = True
# Track the graph indices of each entity in the cluster
pred_entity_idxs.append(cluster[i])
# Predict based on all entities in the cluster
pred_entity_cuis += list(set([dictionary[cluster[i]]['cui']]) - set(pred_entity_cuis))
continue
_debug_n_mens_evaluated += 1
men_query = queries[men_idx]
men_golden_cuis = list(map(str, men_query['label_cuis']))
report_obj = {
'mention_id': men_query['mention_id'],
'mention_name': men_query['mention_name'],
'mention_gold_cui': '|'.join(men_golden_cuis),
'mention_gold_cui_name': '|'.join([dictionary[i]['title'] for i in men_query['label_idxs'][:men_query['n_labels']]]),
'predicted_name': '|'.join([d['title'] for d in [dictionary[i] for i in pred_entity_idxs]]),
'predicted_cui': '|'.join(pred_entity_cuis),
}
# Correct prediction
if not set(pred_entity_cuis).isdisjoint(men_golden_cuis):
results['accuracy'] += 1
results['success'].append(report_obj)
# Incorrect prediction
else:
results['failure'].append(report_obj)
results['accuracy'] = f"{results['accuracy'] / float(_debug_n_mens_evaluated) * 100} %"
print(f"Accuracy = {results['accuracy']}")
# Run sanity checks
assert n_mentions == _debug_n_mens_evaluated
assert _debug_clusters_wo_entities == 0
assert _debug_clusters_w_mult_entities == 0
return results
def main(params):
output_path = params["output_path"]
if not os.path.exists(output_path):
os.makedirs(output_path)
logger = utils.get_logger(params["output_path"], 'log-eval')
pickle_src_path = params["pickle_src_path"]
if pickle_src_path is None or not os.path.exists(pickle_src_path):
pickle_src_path = output_path
embed_data_path = params["embed_data_path"]
if embed_data_path is None or not os.path.exists(embed_data_path):
embed_data_path = output_path
# Init model
reranker = BiEncoderRanker(params)
reranker.model.eval()
tokenizer = reranker.tokenizer
model = reranker.model
device = reranker.device
n_gpu = reranker.n_gpu
knn = params["knn"]
use_types = params["use_types"]
data_split = params["data_split"] # Default = "test"
# Load test data
entity_dictionary_loaded = False
test_dictionary_pkl_path = os.path.join(pickle_src_path, 'test_dictionary.pickle')
test_tensor_data_pkl_path = os.path.join(pickle_src_path, 'test_tensor_data.pickle')
test_mention_data_pkl_path = os.path.join(pickle_src_path, 'test_mention_data.pickle')
if os.path.isfile(test_dictionary_pkl_path):
print("Loading stored processed entity dictionary...")
with open(test_dictionary_pkl_path, 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
entity_dictionary_loaded = True
if os.path.isfile(test_tensor_data_pkl_path) and os.path.isfile(test_mention_data_pkl_path):
print("Loading stored processed test data...")
with open(test_tensor_data_pkl_path, 'rb') as read_handle:
test_tensor_data = pickle.load(read_handle)
with open(test_mention_data_pkl_path, 'rb') as read_handle:
mention_data = pickle.load(read_handle)
else:
test_samples = utils.read_dataset(data_split, params["data_path"])
if not entity_dictionary_loaded:
with open(os.path.join(params["data_path"], 'dictionary.pickle'), 'rb') as read_handle:
test_dictionary = pickle.load(read_handle)
# Check if dataset has multiple ground-truth labels
mult_labels = "labels" in test_samples[0].keys()
if params["filter_unlabeled"]:
# Filter samples without gold entities
test_samples = list(filter(lambda sample: (len(sample["labels"]) > 0) if mult_labels else (sample["label"] is not None), test_samples))
logger.info("Read %d test samples." % len(test_samples))
mention_data, test_dictionary, test_tensor_data = data_process.process_mention_data(
test_samples,
test_dictionary,
tokenizer,
params["max_context_length"],
params["max_cand_length"],
multi_label_key="labels" if mult_labels else None,
context_key=params["context_key"],
silent=params["silent"],
logger=logger,
debug=params["debug"],
knn=knn,
dictionary_processed=entity_dictionary_loaded
)
print("Saving processed test data...")
if not entity_dictionary_loaded:
with open(test_dictionary_pkl_path, 'wb') as write_handle:
pickle.dump(test_dictionary, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_tensor_data_pkl_path, 'wb') as write_handle:
pickle.dump(test_tensor_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
with open(test_mention_data_pkl_path, 'wb') as write_handle:
pickle.dump(mention_data, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
# Store test dictionary token ids
test_dict_vecs = torch.tensor(
list(map(lambda x: x['ids'], test_dictionary)), dtype=torch.long)
# Store test mention token ids
test_men_vecs = test_tensor_data[:][0]
n_entities = len(test_dict_vecs)
n_mentions = len(test_tensor_data)
# Values of k to run the evaluation against
knn_vals = [0] + [2**i for i in range(int(math.log(knn, 2)) + 1)]
# Store the maximum evaluation k
max_knn = knn_vals[-1]
time_start = time.time()
# Check if graphs are already built
graph_path = os.path.join(output_path, 'graphs.pickle')
if not params['only_recall'] and os.path.isfile(graph_path):
print("Loading stored joint graphs...")
with open(graph_path, 'rb') as read_handle:
joint_graphs = pickle.load(read_handle)
else:
# Initialize graphs to store mention-mention and mention-entity similarity score edges;
# Keyed on k, the number of nearest mentions retrieved
joint_graphs = {}
for k in knn_vals:
joint_graphs[k] = {
'rows': np.array([]),
'cols': np.array([]),
'data': np.array([]),
'shape': (n_entities+n_mentions, n_entities+n_mentions)
}
# Check and load stored embedding data
embed_data_path = os.path.join(embed_data_path, 'embed_data.t7')
embed_data = None
if os.path.isfile(embed_data_path):
embed_data = torch.load(embed_data_path)
if use_types:
if embed_data is not None:
logger.info('Loading stored embeddings and computing indexes')
dict_embeds = embed_data['dict_embeds']
if 'dict_idxs_by_type' in embed_data:
dict_idxs_by_type = embed_data['dict_idxs_by_type']
else:
dict_idxs_by_type = data_process.get_idxs_by_type(test_dictionary)
dict_indexes = data_process.get_index_from_embeds(dict_embeds, dict_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
men_embeds = embed_data['men_embeds']
if 'men_idxs_by_type' in embed_data:
men_idxs_by_type = embed_data['men_idxs_by_type']
else:
men_idxs_by_type = data_process.get_idxs_by_type(mention_data)
men_indexes = data_process.get_index_from_embeds(men_embeds, men_idxs_by_type, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info("Dictionary: Embedding and building index")
dict_embeds, dict_indexes, dict_idxs_by_type = data_process.embed_and_index(reranker, test_dict_vecs, encoder_type="candidate", n_gpu=n_gpu, corpus=test_dictionary, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
logger.info("Queries: Embedding and building index")
men_embeds, men_indexes, men_idxs_by_type = data_process.embed_and_index(reranker, test_men_vecs, encoder_type="context", n_gpu=n_gpu, corpus=mention_data, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
else:
if embed_data is not None:
logger.info('Loading stored embeddings and computing indexes')
dict_embeds = embed_data['dict_embeds']
dict_index = data_process.get_index_from_embeds(dict_embeds, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
men_embeds = embed_data['men_embeds']
men_index = data_process.get_index_from_embeds(men_embeds, force_exact_search=params['force_exact_search'], probe_mult_factor=params['probe_mult_factor'])
else:
logger.info("Dictionary: Embedding and building index")
dict_embeds, dict_index = data_process.embed_and_index(
reranker, test_dict_vecs, 'candidate', n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
logger.info("Queries: Embedding and building index")
men_embeds, men_index = data_process.embed_and_index(
reranker, test_men_vecs, 'context', n_gpu=n_gpu, force_exact_search=params['force_exact_search'], batch_size=params['embed_batch_size'], probe_mult_factor=params['probe_mult_factor'])
# Save computed embedding data if not loaded from disk
if embed_data is None:
embed_data = {}
embed_data['dict_embeds'] = dict_embeds
embed_data['men_embeds'] = men_embeds
if use_types:
embed_data['dict_idxs_by_type'] = dict_idxs_by_type
embed_data['men_idxs_by_type'] = men_idxs_by_type
# NOTE: Cannot pickle faiss index because it is a SwigPyObject
torch.save(embed_data, embed_data_path, pickle_protocol=pickle.HIGHEST_PROTOCOL)
recall_accuracy = {2**i: 0 for i in range(int(math.log(params['recall_k'], 2)) + 1)}
recall_idxs = [0.]*params['recall_k']
logger.info("Starting KNN search...")
# Fetch recall_k (default 16) knn entities for all mentions
# Fetch (k+1) NN mention candidates
if not use_types:
nn_ent_dists, nn_ent_idxs = dict_index.search(men_embeds, params['recall_k'])
nn_men_dists, nn_men_idxs = men_index.search(men_embeds, max_knn + 1)
else:
nn_ent_idxs = np.zeros((len(men_embeds), params['recall_k']))
nn_ent_dists = np.zeros((len(men_embeds), params['recall_k']), dtype='float64')
nn_men_idxs = np.zeros((len(men_embeds), max_knn + 1))
nn_men_dists = np.zeros((len(men_embeds), max_knn + 1), dtype='float64')
for entity_type in men_indexes:
men_embeds_by_type = men_embeds[men_idxs_by_type[entity_type]]
nn_ent_dists_by_type, nn_ent_idxs_by_type = dict_indexes[entity_type].search(men_embeds_by_type, params['recall_k'])
nn_men_dists_by_type, nn_men_idxs_by_type = men_indexes[entity_type].search(men_embeds_by_type, max_knn + 1)
nn_ent_idxs_by_type = np.array(list(map(lambda x: dict_idxs_by_type[entity_type][x], nn_ent_idxs_by_type)))
nn_men_idxs_by_type = np.array(list(map(lambda x: men_idxs_by_type[entity_type][x], nn_men_idxs_by_type)))
for i,idx in enumerate(men_idxs_by_type[entity_type]):
nn_ent_idxs[idx] = nn_ent_idxs_by_type[i]
nn_ent_dists[idx] = nn_ent_dists_by_type[i]
nn_men_idxs[idx] = nn_men_idxs_by_type[i]
nn_men_dists[idx] = nn_men_dists_by_type[i]
logger.info("Search finished")
logger.info('Building graphs')
# Find the most similar entity and k-nn mentions for each mention query
for men_query_idx, men_embed in enumerate(tqdm(men_embeds, total=len(men_embeds), desc="Building graph")):
# Get nearest entity candidate
dict_cand_idx = nn_ent_idxs[men_query_idx][0]
dict_cand_score = nn_ent_dists[men_query_idx][0]
# Compute recall metric
gold_idxs = mention_data[men_query_idx]["label_idxs"][:mention_data[men_query_idx]["n_labels"]]
recall_idx = np.argwhere(nn_ent_idxs[men_query_idx] == gold_idxs[0])
if len(recall_idx) != 0:
recall_idx = int(recall_idx)
recall_idxs[recall_idx] += 1.
for recall_k in recall_accuracy:
if recall_idx < recall_k:
recall_accuracy[recall_k] += 1.
if not params['only_recall']:
# Filter candidates to remove mention query and keep only the top k candidates
men_cand_idxs = nn_men_idxs[men_query_idx]
men_cand_scores = nn_men_dists[men_query_idx]
filter_mask = men_cand_idxs != men_query_idx
men_cand_idxs, men_cand_scores = men_cand_idxs[filter_mask][:max_knn], men_cand_scores[filter_mask][:max_knn]
# Add edges to the graphs
for k in joint_graphs:
joint_graph = joint_graphs[k]
# Add mention-entity edge
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]) # Mentions added at an offset of maximum entities
joint_graph['cols'] = np.append(
joint_graph['cols'], dict_cand_idx)
joint_graph['data'] = np.append(
joint_graph['data'], dict_cand_score)
if k > 0:
# Add mention-mention edges
joint_graph['rows'] = np.append(
joint_graph['rows'], [n_entities+men_query_idx]*len(men_cand_idxs[:k]))
joint_graph['cols'] = np.append(
joint_graph['cols'], n_entities+men_cand_idxs[:k])
joint_graph['data'] = np.append(
joint_graph['data'], men_cand_scores[:k])
# Compute and print recall metric
recall_idx_mode = np.argmax(recall_idxs)
recall_idx_mode_prop = recall_idxs[recall_idx_mode]/np.sum(recall_idxs)
logger.info(f"""
Recall metrics (for {len(men_embeds)} queries):
---------------""")
logger.info(f"highest recall idx = {recall_idx_mode} ({recall_idxs[recall_idx_mode]}/{np.sum(recall_idxs)} = {recall_idx_mode_prop})")
for recall_k in recall_accuracy:
recall_accuracy[recall_k] /= len(men_embeds)
logger.info(f"recall@{recall_k} = {recall_accuracy[recall_k]}")
if params['only_recall']:
exit()
# Pickle the graphs
print("Saving joint graphs...")
with open(graph_path, 'wb') as write_handle:
pickle.dump(joint_graphs, write_handle,
protocol=pickle.HIGHEST_PROTOCOL)
graph_mode = params.get('graph_mode', None)
result_overview = {
'n_entities': n_entities,
'n_mentions': n_mentions
}
results = {}
if graph_mode is None or graph_mode not in ['directed', 'undirected']:
results['directed'] = []
results['undirected'] = []
else:
results[graph_mode] = []
knn_fetch_time = time.time() - time_start
graph_processing_time = time.time()
n_graphs_processed = 0.
for mode in results:
print(f'\nEvaluation mode: {mode.upper()}')
for k in joint_graphs:
if k <= knn:
print(f"\nGraph (k={k}):")
# Partition graph based on cluster-linking constraints
partitioned_graph, clusters = partition_graph(
joint_graphs[k], n_entities, mode == 'directed', return_clusters=True)
# Infer predictions from clusters
result = analyzeClusters(clusters, test_dictionary, mention_data, k)
# Store result
results[mode].append(result)
n_graphs_processed += 1
avg_graph_processing_time = (time.time() - graph_processing_time) / n_graphs_processed
avg_per_graph_time = (knn_fetch_time + avg_graph_processing_time) / 60
execution_time = (time.time() - time_start) / 60
# Store results
output_file_name = os.path.join(
output_path, f"eval_results_{__import__('calendar').timegm(__import__('time').gmtime())}")
try:
for recall_k in recall_accuracy:
result_overview[f'recall@{recall_k}'] = recall_accuracy[recall_k]
except:
logger.info("Recall data not available since graphs were loaded from disk")
for mode in results:
mode_results = results[mode]
result_overview[mode] = {}
for r in mode_results:
k = r['knn_mentions']
result_overview[mode][f'accuracy@knn{k}'] = r['accuracy']
logger.info(f"{mode} accuracy@knn{k} = {r['accuracy']}")
output_file = f'{output_file_name}-{mode}-{k}.json'
with open(output_file, 'w') as f:
json.dump(r, f, indent=2)
print(f"\nPredictions ({mode}) @knn{k} saved at: {output_file}")
with open(f'{output_file_name}.json', 'w') as f:
json.dump(result_overview, f, indent=2)
print(f"\nPredictions overview saved at: {output_file_name}.json")
logger.info("\nThe avg. per graph evaluation time is {} minutes\n".format(avg_per_graph_time))
logger.info("\nThe total evaluation took {} minutes\n".format(execution_time))
if __name__ == "__main__":
parser = BlinkParser(add_model_args=True)
parser.add_eval_args()
args = parser.parse_args()
print(args)
main(args.__dict__) | 0.755817 | 0.368576 |
import threading
import gzip
import time
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
import ujson as json
from bitrue.helpers import gen_depth_channel, gen_ticker_channel, gen_kline_channel, gen_trade_channel
class BitrueClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
# reset the delay after reconnecting
self.factory.resetDelay()
def onOpen(self):
msg = self.factory.subscribe()
# print(msg)
self.sendMessage(msg.encode("utf8"))
def onMessage(self, playload, isBinary):
msg = BitrueClientProtocol.gzip_inflate(playload) if isBinary else playload
# print(msg)
try:
payload_obj = json.loads(msg.decode("utf8"))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
def onClose(self, wasClean, code, reason):
# print("%s,%s,%s" %(wasClean, code, reason))
self.factory.callback(None)
def onPing(self, playload):
self.sendMessage('{"pong":%d}'%(int(time.time()*1000)).encode("utf8"))
@staticmethod
def gzip_inflate(data):
return gzip.decompress(data)
class BitrueReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BitrueClientFactory(WebSocketClientFactory, BitrueReconnectingClientFactory):
protocol = BitrueClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': "Max reconnect retries reached"
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BitrueSocketManager(threading.Thread):
STREAM_URL = "wss://ws.bitrue.com/kline-api/ws"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 mintes
def __init__(self, user_timeout=DEFAULT_USER_TIMEOUT):
"""initialize the BitrueSocketManager
Args:
user_timeout ([int], optional): [default timeout]. Defaults to DEFAULT_USER_TIMEOUT.
"""
threading.Thread.__init__(self)
self._conns = {}
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin':None}
self._listen_keys = {'user':None, 'margin':None}
self._account_callbacks = {'user': None, 'margin':None}
def _start_socket(self, name, subscribe, callback):
if name in self._conns:
return False
factory = BitrueClientFactory(self.STREAM_URL)
factory.protocol = BitrueClientProtocol
factory.subscribe = subscribe
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[name] = connectWS(factory, context_factory)
return name
def start_depth_socket(self, symbol, callback, subscribe=None, depth=0, interval=None):
"""subscribe depth for symbol
Args:
symbol ([type]): [description]
subscribe ([type]): [description]
callback (function): [description]
depth ([type], optional): [description]. Defaults to 0.
interval ([type], optional): [description]. Defaults to None.
"""
socket_name = gen_depth_channel(symbol.lower())
return self._start_socket(socket_name, subscribe, callback=callback)
def start_kline_socket(self, symbol, callback, subscribe=None, interval=''):
pass
def start_trade_socket(self, symbol, callback, subscribe=None):
pass
def start_symbol_ticker_socket(self, symbol, callback, subscribe=None):
"""subscribe ticker stream for given symbol
Args:
symbol ([type]): [description]
callback (function): [description]
subscribe (function, optional): subscribe message for ticker subscribe. Defaults to None.
Returns:
[type]: [description]
"""
socket_name = gen_ticker_channel(symbol.lower())
return self._start_socket(socket_name, subscribe, callback=callback)
def stop_socket(self, conn_key):
"""stop a websocket given the connection key
Args:
conn_key (string): the connection key
"""
if conn_key not in self._conns:
return
# disable reconnectiong if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + "?error")
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error abount reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | bitrue/websockets.py |
import threading
import gzip
import time
from autobahn.twisted.websocket import WebSocketClientFactory, WebSocketClientProtocol, connectWS
from twisted.internet import reactor, ssl
from twisted.internet.protocol import ReconnectingClientFactory
from twisted.internet.error import ReactorAlreadyRunning
import ujson as json
from bitrue.helpers import gen_depth_channel, gen_ticker_channel, gen_kline_channel, gen_trade_channel
class BitrueClientProtocol(WebSocketClientProtocol):
def __init__(self):
super(WebSocketClientProtocol, self).__init__()
def onConnect(self, response):
# reset the delay after reconnecting
self.factory.resetDelay()
def onOpen(self):
msg = self.factory.subscribe()
# print(msg)
self.sendMessage(msg.encode("utf8"))
def onMessage(self, playload, isBinary):
msg = BitrueClientProtocol.gzip_inflate(playload) if isBinary else playload
# print(msg)
try:
payload_obj = json.loads(msg.decode("utf8"))
except ValueError:
pass
else:
self.factory.callback(payload_obj)
def onClose(self, wasClean, code, reason):
# print("%s,%s,%s" %(wasClean, code, reason))
self.factory.callback(None)
def onPing(self, playload):
self.sendMessage('{"pong":%d}'%(int(time.time()*1000)).encode("utf8"))
@staticmethod
def gzip_inflate(data):
return gzip.decompress(data)
class BitrueReconnectingClientFactory(ReconnectingClientFactory):
# set initial delay to a short time
initialDelay = 0.1
maxDelay = 10
maxRetries = 5
class BitrueClientFactory(WebSocketClientFactory, BitrueReconnectingClientFactory):
protocol = BitrueClientProtocol
_reconnect_error_payload = {
'e': 'error',
'm': "Max reconnect retries reached"
}
def clientConnectionFailed(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
def clientConnectionLost(self, connector, reason):
self.retry(connector)
if self.retries > self.maxRetries:
self.callback(self._reconnect_error_payload)
class BitrueSocketManager(threading.Thread):
STREAM_URL = "wss://ws.bitrue.com/kline-api/ws"
WEBSOCKET_DEPTH_5 = "5"
WEBSOCKET_DEPTH_10 = "10"
WEBSOCKET_DEPTH_20 = "20"
DEFAULT_USER_TIMEOUT = 30 * 60 # 30 mintes
def __init__(self, user_timeout=DEFAULT_USER_TIMEOUT):
"""initialize the BitrueSocketManager
Args:
user_timeout ([int], optional): [default timeout]. Defaults to DEFAULT_USER_TIMEOUT.
"""
threading.Thread.__init__(self)
self._conns = {}
self._user_timeout = user_timeout
self._timers = {'user': None, 'margin':None}
self._listen_keys = {'user':None, 'margin':None}
self._account_callbacks = {'user': None, 'margin':None}
def _start_socket(self, name, subscribe, callback):
if name in self._conns:
return False
factory = BitrueClientFactory(self.STREAM_URL)
factory.protocol = BitrueClientProtocol
factory.subscribe = subscribe
factory.callback = callback
factory.reconnect = True
context_factory = ssl.ClientContextFactory()
self._conns[name] = connectWS(factory, context_factory)
return name
def start_depth_socket(self, symbol, callback, subscribe=None, depth=0, interval=None):
"""subscribe depth for symbol
Args:
symbol ([type]): [description]
subscribe ([type]): [description]
callback (function): [description]
depth ([type], optional): [description]. Defaults to 0.
interval ([type], optional): [description]. Defaults to None.
"""
socket_name = gen_depth_channel(symbol.lower())
return self._start_socket(socket_name, subscribe, callback=callback)
def start_kline_socket(self, symbol, callback, subscribe=None, interval=''):
pass
def start_trade_socket(self, symbol, callback, subscribe=None):
pass
def start_symbol_ticker_socket(self, symbol, callback, subscribe=None):
"""subscribe ticker stream for given symbol
Args:
symbol ([type]): [description]
callback (function): [description]
subscribe (function, optional): subscribe message for ticker subscribe. Defaults to None.
Returns:
[type]: [description]
"""
socket_name = gen_ticker_channel(symbol.lower())
return self._start_socket(socket_name, subscribe, callback=callback)
def stop_socket(self, conn_key):
"""stop a websocket given the connection key
Args:
conn_key (string): the connection key
"""
if conn_key not in self._conns:
return
# disable reconnectiong if we are closing
self._conns[conn_key].factory = WebSocketClientFactory(self.STREAM_URL + "?error")
self._conns[conn_key].disconnect()
del(self._conns[conn_key])
def run(self):
try:
reactor.run(installSignalHandlers=False)
except ReactorAlreadyRunning:
# Ignore error abount reactor already running
pass
def close(self):
"""Close all connections
"""
keys = set(self._conns.keys())
for key in keys:
self.stop_socket(key)
self._conns = {} | 0.508544 | 0.053576 |
import pandas as pd, numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm,metrics
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import StratifiedKFold
column = "review"
train = pd.read_csv('./data/train.csv',lineterminator='\n')
test = pd.read_csv('./data/20190529_test.csv',lineterminator='\n')
test_id = test["ID"].copy()
vec = TfidfVectorizer(ngram_range=(1,2),min_df=3, max_df=0.9,use_idf=1,smooth_idf=1, sublinear_tf=1)
trn_term_doc = vec.fit_transform(train[column])
test_term_doc = vec.transform(test[column])
train_data = trn_term_doc
test_data = test_term_doc
fid0=open('./data/result0.csv','w')
label = train["label"]
train["predict"] = [0 if item=='Negative' else 1 for item in label]
train_label=(train["predict"]).astype(int)
folds = StratifiedKFold(n_splits=10,shuffle=False,random_state=2019)
predictions = np.zeros(test_id.shape[0])
aucs = []
for fold_, (train_index,test_index) in enumerate(folds.split(train_data,train_label)):
print("Fold:{}".format(fold_ + 1))
cv_train_data,cv_train_label = train_data[train_index],train_label[train_index]
cv_test_data,cv_test_label = train_data[test_index],train_label[test_index]
lin_clf = svm.LinearSVC()
lin_clf = CalibratedClassifierCV(lin_clf,cv=5)
lin_clf.fit(cv_train_data,cv_train_label)
test_predict = lin_clf.predict_proba(cv_test_data)[:, 1]
auc = metrics.roc_auc_score(cv_test_label,test_predict)
predictions += lin_clf.predict_proba(test_data)[:,1] / folds.n_splits
aucs.append(auc)
print("auc score: %.5f" % auc)
print("Mean auc",np.mean(aucs))
i=1
fid0.write("ID,Pred"+"\n")
for item in predictions:
fid0.write(str(i)+","+str(item)+"\n")
i=i+1
fid0.close()
# clf = svm.SVC(C=5.0)
# clf.fit(trn_term_doc,y)
# predict_prob_y = clf.predict_proba(test_term_doc)#基于SVM对验证集做出预测,prodict_prob_y 为预测的概率
# #end svm ,start metrics
# test_auc = metrics.roc_auc_score(test_y,predict_prob_y)#验证集上的auc值
# print(test_auc) | clafiyy/new/baseline.py | import pandas as pd, numpy as np
from sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer
from sklearn import svm,metrics
from sklearn.calibration import CalibratedClassifierCV
from sklearn.model_selection import StratifiedKFold
column = "review"
train = pd.read_csv('./data/train.csv',lineterminator='\n')
test = pd.read_csv('./data/20190529_test.csv',lineterminator='\n')
test_id = test["ID"].copy()
vec = TfidfVectorizer(ngram_range=(1,2),min_df=3, max_df=0.9,use_idf=1,smooth_idf=1, sublinear_tf=1)
trn_term_doc = vec.fit_transform(train[column])
test_term_doc = vec.transform(test[column])
train_data = trn_term_doc
test_data = test_term_doc
fid0=open('./data/result0.csv','w')
label = train["label"]
train["predict"] = [0 if item=='Negative' else 1 for item in label]
train_label=(train["predict"]).astype(int)
folds = StratifiedKFold(n_splits=10,shuffle=False,random_state=2019)
predictions = np.zeros(test_id.shape[0])
aucs = []
for fold_, (train_index,test_index) in enumerate(folds.split(train_data,train_label)):
print("Fold:{}".format(fold_ + 1))
cv_train_data,cv_train_label = train_data[train_index],train_label[train_index]
cv_test_data,cv_test_label = train_data[test_index],train_label[test_index]
lin_clf = svm.LinearSVC()
lin_clf = CalibratedClassifierCV(lin_clf,cv=5)
lin_clf.fit(cv_train_data,cv_train_label)
test_predict = lin_clf.predict_proba(cv_test_data)[:, 1]
auc = metrics.roc_auc_score(cv_test_label,test_predict)
predictions += lin_clf.predict_proba(test_data)[:,1] / folds.n_splits
aucs.append(auc)
print("auc score: %.5f" % auc)
print("Mean auc",np.mean(aucs))
i=1
fid0.write("ID,Pred"+"\n")
for item in predictions:
fid0.write(str(i)+","+str(item)+"\n")
i=i+1
fid0.close()
# clf = svm.SVC(C=5.0)
# clf.fit(trn_term_doc,y)
# predict_prob_y = clf.predict_proba(test_term_doc)#基于SVM对验证集做出预测,prodict_prob_y 为预测的概率
# #end svm ,start metrics
# test_auc = metrics.roc_auc_score(test_y,predict_prob_y)#验证集上的auc值
# print(test_auc) | 0.187914 | 0.213029 |
import db_handler
import models
from Tkinter import Tk
def copy_to_clipboard(text):
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(text)
r.destroy()
def print_member_details(member_id, email=None, use_prev_title=False):
''' Print the member details for the supplied ID
Substitute member's email for 'email' if given
'use_prev_title' governs whether to prepend (I)PID, (I)PCC or (I)PDG if applicable
'''
overrides = {}
if email:
overrides['email'] = email
data = db_handler.get_member_data(member_id, overrides)
out = []
if data:
# got valid data, build it
out.append('')
prev_title = ''
if use_prev_title:
prev_title = data['prev_title']
if data['name']:
s = 'Member: %s %s' % (data['prev_title'], data['name'])
if data.get('deceased', False):
s += ' (Deceased)'
out.append(s)
if data['partner']:
out.append('Partner: %s %s' % ('Lion' if data['partner_lion'] else '', data['partner']))
if data['resigned']:
out.append('Resigned')
if data['join_date']:
out.append('Joined Lions in %d' % data['join_date'])
out.append('')
for i in data['add']:
out.append(i)
out.append(data['country'])
out.append('')
for h,i in zip([i[0].upper() for i in db_handler.MEMBER_PHS], data['phone']):
if i:
out.append('%s: %s' % (h,i))
if data['email']:
out.append('')
out.append('Email: %s' % data['email'])
if data['club']:
out.append('')
out.append('Home Club: %s' % data['club'])
else:
out.append('Invalid member')
print '\n'.join(out)
copy_to_clipboard('\n'.join(out))
def get_officerless_clubs(struct_id, year):
''' Return a list of club names for clubs which do not have recorded officers
in the given struct for the specified year
'''
out = []
for c in db_handler.get_clubs_in_dist(struct_id, ['Club President', 'Club Secretary', 'Club Treasurer'], year):
if not all(c['officers']):
out.append(c['name'])
return out
def get_meeting_strings():
''' Build and return a meeting string for all clubs
'''
import json
dic = {}
meeting_model = models.Meetings.objects
for d in db_handler.get_md_details()['dists']:
for c in db_handler.get_clubs_in_dist(d['id'], []):
meetings = meeting_model.filter(club__id=int(c['id']))
s = []
for m in meetings:
# print meeting
try:
s.append('%s %s at %s%s' % (models.weeks[m.week][1], models.weekdays[m.day-1][1], m.time.strftime('%H:%M'),
' (%s)' % m.spec_ins if m.spec_ins else ''))
except:
print m.week, m.day, len(models.weeks), len(models.weekdays)
raise
dic[int(c['id'])] = ';'.join(s)
fh = open('meeting_strings.json','w')
json.dump(dic, fh)
fh.close()
print 'done'
def write_meeting_strings():
import json
fh = open('altered_meetings.json', 'r')
j = json.load(fh)
fh.close()
for k,v in j.items():
try:
c = models.Club.objects.get(id=int(k))
c.meet_time = v
c.save()
except:
print 'No match for club with id ', k
raise
print 'done'
if __name__ == "__main__":
def member_info(args):
print_member_details(args.member_id, use_prev_title=args.no_prev_title)
def clubs_without_officers(args):
print '\n'.join(get_officerless_clubs(args.id, args.year))
# parse command line args, some of which may override conf settings
import argparse
parser = argparse.ArgumentParser(description='Explore MD Directory data')
subparsers = parser.add_subparsers()
parser_member_info = subparsers.add_parser('member_info', help='Print info for a given member ID')
parser_member_info.add_argument('member_id', action='store', type=int, help='The member ID to look up')
# store the flag as false so as not to need inversion when calling the print function
parser_member_info.add_argument('--no_prev_title', action='store_false', default=True, help='Do not include title member holds through previous position (i.e. PDG, PCC, PID)')
parser_member_info.set_defaults(func=member_info)
parser_member_info = subparsers.add_parser('clubs_without_officers', help='Print a list of clubs without officer info for a given year and district')
parser_member_info.add_argument('year', action='store', type=int, help='The Lionistic year to use')
parser_member_info.add_argument('id', action='store', type=int, help='The district ID to look for clubs in')
parser_member_info.set_defaults(func=clubs_without_officers)
args = parser.parse_args()
args.func(args) | common_modules/db_explorer.py | import db_handler
import models
from Tkinter import Tk
def copy_to_clipboard(text):
r = Tk()
r.withdraw()
r.clipboard_clear()
r.clipboard_append(text)
r.destroy()
def print_member_details(member_id, email=None, use_prev_title=False):
''' Print the member details for the supplied ID
Substitute member's email for 'email' if given
'use_prev_title' governs whether to prepend (I)PID, (I)PCC or (I)PDG if applicable
'''
overrides = {}
if email:
overrides['email'] = email
data = db_handler.get_member_data(member_id, overrides)
out = []
if data:
# got valid data, build it
out.append('')
prev_title = ''
if use_prev_title:
prev_title = data['prev_title']
if data['name']:
s = 'Member: %s %s' % (data['prev_title'], data['name'])
if data.get('deceased', False):
s += ' (Deceased)'
out.append(s)
if data['partner']:
out.append('Partner: %s %s' % ('Lion' if data['partner_lion'] else '', data['partner']))
if data['resigned']:
out.append('Resigned')
if data['join_date']:
out.append('Joined Lions in %d' % data['join_date'])
out.append('')
for i in data['add']:
out.append(i)
out.append(data['country'])
out.append('')
for h,i in zip([i[0].upper() for i in db_handler.MEMBER_PHS], data['phone']):
if i:
out.append('%s: %s' % (h,i))
if data['email']:
out.append('')
out.append('Email: %s' % data['email'])
if data['club']:
out.append('')
out.append('Home Club: %s' % data['club'])
else:
out.append('Invalid member')
print '\n'.join(out)
copy_to_clipboard('\n'.join(out))
def get_officerless_clubs(struct_id, year):
''' Return a list of club names for clubs which do not have recorded officers
in the given struct for the specified year
'''
out = []
for c in db_handler.get_clubs_in_dist(struct_id, ['Club President', 'Club Secretary', 'Club Treasurer'], year):
if not all(c['officers']):
out.append(c['name'])
return out
def get_meeting_strings():
''' Build and return a meeting string for all clubs
'''
import json
dic = {}
meeting_model = models.Meetings.objects
for d in db_handler.get_md_details()['dists']:
for c in db_handler.get_clubs_in_dist(d['id'], []):
meetings = meeting_model.filter(club__id=int(c['id']))
s = []
for m in meetings:
# print meeting
try:
s.append('%s %s at %s%s' % (models.weeks[m.week][1], models.weekdays[m.day-1][1], m.time.strftime('%H:%M'),
' (%s)' % m.spec_ins if m.spec_ins else ''))
except:
print m.week, m.day, len(models.weeks), len(models.weekdays)
raise
dic[int(c['id'])] = ';'.join(s)
fh = open('meeting_strings.json','w')
json.dump(dic, fh)
fh.close()
print 'done'
def write_meeting_strings():
import json
fh = open('altered_meetings.json', 'r')
j = json.load(fh)
fh.close()
for k,v in j.items():
try:
c = models.Club.objects.get(id=int(k))
c.meet_time = v
c.save()
except:
print 'No match for club with id ', k
raise
print 'done'
if __name__ == "__main__":
def member_info(args):
print_member_details(args.member_id, use_prev_title=args.no_prev_title)
def clubs_without_officers(args):
print '\n'.join(get_officerless_clubs(args.id, args.year))
# parse command line args, some of which may override conf settings
import argparse
parser = argparse.ArgumentParser(description='Explore MD Directory data')
subparsers = parser.add_subparsers()
parser_member_info = subparsers.add_parser('member_info', help='Print info for a given member ID')
parser_member_info.add_argument('member_id', action='store', type=int, help='The member ID to look up')
# store the flag as false so as not to need inversion when calling the print function
parser_member_info.add_argument('--no_prev_title', action='store_false', default=True, help='Do not include title member holds through previous position (i.e. PDG, PCC, PID)')
parser_member_info.set_defaults(func=member_info)
parser_member_info = subparsers.add_parser('clubs_without_officers', help='Print a list of clubs without officer info for a given year and district')
parser_member_info.add_argument('year', action='store', type=int, help='The Lionistic year to use')
parser_member_info.add_argument('id', action='store', type=int, help='The district ID to look for clubs in')
parser_member_info.set_defaults(func=clubs_without_officers)
args = parser.parse_args()
args.func(args) | 0.185172 | 0.118793 |
from unittest import TestCase
import jwt
from piccolo.apps.user.tables import BaseUser
from starlette.exceptions import HTTPException
from starlette.routing import Route, Router
from starlette.testclient import TestClient
from piccolo_api.jwt_auth.middleware import JWTMiddleware
from piccolo_api.token_auth.tables import TokenAuth
APP = Router([Route("/", lambda endpoint: endpoint)])
APP = JWTMiddleware(asgi=APP, secret="SECRET") # type: ignore
class TestJWTMiddleware(TestCase):
def setUp(self):
BaseUser.create_table().run_sync()
TokenAuth.create_table().run_sync()
def tearDown(self):
TokenAuth.alter().drop_table().run_sync()
BaseUser.alter().drop_table().run_sync()
def test_empty_token(self):
client = TestClient(APP)
with self.assertRaises(HTTPException):
response = client.get("/")
self.assertTrue(response.status_code == 403)
self.assertTrue(response.json()["detail"] == "Token not found")
def test_invalid_token_format(self):
client = TestClient(APP)
with self.assertRaises(HTTPException):
response = client.get("/", headers={"authorization": "12345"})
self.assertTrue(response.status_code == 404)
self.assertTrue(response.json()["detail"] == "Token not found")
def test_expired_token(self):
client = TestClient(APP)
token = jwt.encode({"user_id": 1}, "SECRET")
with self.assertRaises(HTTPException):
response = client.get(
"/", headers={"authorization": f"Bearer {token}"}
)
self.assertTrue(response.status_code == 403)
self.assertTrue(response.json()["detail"] == "Token has expired")
def test_token_without_user_id(self):
client = TestClient(APP)
token = jwt.encode({}, "SECRET")
with self.assertRaises(HTTPException):
response = client.get(
"/", headers={"authorization": f"Bearer {token}"}
)
self.assertTrue(response.status_code == 403)
self.assertTrue(response.content == b"") | tests/jwt_auth/test_jwt_middleware.py | from unittest import TestCase
import jwt
from piccolo.apps.user.tables import BaseUser
from starlette.exceptions import HTTPException
from starlette.routing import Route, Router
from starlette.testclient import TestClient
from piccolo_api.jwt_auth.middleware import JWTMiddleware
from piccolo_api.token_auth.tables import TokenAuth
APP = Router([Route("/", lambda endpoint: endpoint)])
APP = JWTMiddleware(asgi=APP, secret="SECRET") # type: ignore
class TestJWTMiddleware(TestCase):
def setUp(self):
BaseUser.create_table().run_sync()
TokenAuth.create_table().run_sync()
def tearDown(self):
TokenAuth.alter().drop_table().run_sync()
BaseUser.alter().drop_table().run_sync()
def test_empty_token(self):
client = TestClient(APP)
with self.assertRaises(HTTPException):
response = client.get("/")
self.assertTrue(response.status_code == 403)
self.assertTrue(response.json()["detail"] == "Token not found")
def test_invalid_token_format(self):
client = TestClient(APP)
with self.assertRaises(HTTPException):
response = client.get("/", headers={"authorization": "12345"})
self.assertTrue(response.status_code == 404)
self.assertTrue(response.json()["detail"] == "Token not found")
def test_expired_token(self):
client = TestClient(APP)
token = jwt.encode({"user_id": 1}, "SECRET")
with self.assertRaises(HTTPException):
response = client.get(
"/", headers={"authorization": f"Bearer {token}"}
)
self.assertTrue(response.status_code == 403)
self.assertTrue(response.json()["detail"] == "Token has expired")
def test_token_without_user_id(self):
client = TestClient(APP)
token = jwt.encode({}, "SECRET")
with self.assertRaises(HTTPException):
response = client.get(
"/", headers={"authorization": f"Bearer {token}"}
)
self.assertTrue(response.status_code == 403)
self.assertTrue(response.content == b"") | 0.552057 | 0.338159 |
from typing import Text
import numpy as np
from numpy import ndarray
from oqupy.config import NpDtype
SIGMA = {"id":[[1, 0], [0, 1]],
"x":[[0, 1], [1, 0]],
"y":[[0, -1j], [1j, 0]],
"z":[[1, 0], [0, -1]],
"+":[[0, 1], [0, 0]],
"-":[[0, 0], [1, 0]]}
SPIN_DM = {"up":[[1, 0], [0, 0]],
"down":[[0, 0], [0, 1]],
"z+":[[1, 0], [0, 0]],
"z-":[[0, 0], [0, 1]],
"x+":[[0.5, 0.5], [0.5, 0.5]],
"x-":[[0.5, -0.5], [-0.5, 0.5]],
"y+":[[0.5, -0.5j], [0.5j, 0.5]],
"y-":[[0.5, 0.5j], [-0.5j, 0.5]],
"mixed":[[0.5, 0.0], [0.0, 0.5]]}
def identity(n: int) -> ndarray:
"""
Identity matrix of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the square matrix.
Returns
-------
identity : ndarray
Identity matrix of dimension `n` x `n`.
"""
return np.identity(n, dtype=NpDtype)
def sigma(name: Text) -> ndarray:
"""
Spin matrix sigma of type `name`.
Parameters
----------
name: str{ ``'id'``, ``'x'``, ``'y'``, ``'z'``, ``'+'``, ``'-'``}
Returns
-------
sigma : ndarray
Spin matrix of type `name`.
"""
return np.array(SIGMA[name], dtype=NpDtype)
def spin_dm(name: Text) -> ndarray:
"""
Spin 1/2 state of type `name`.
Parameters
----------
name: str{ ``'up'``/``'z+'``, ``'down'``/``'z-'``, ``'x+'``, ``'x-'``, \
``'y+'``, ``'y-'``, ``mixed``}
Returns
-------
density_matrix : ndarray
Spin density matrix.
"""
return np.array(SPIN_DM[name], dtype=NpDtype)
def create(n: int) -> ndarray:
"""
Bosonic creation operator of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the Hilbert space.
Returns
-------
create : ndarray
Creation operator matrix of dimension `n` x `n`.
"""
return destroy(n).T
def destroy(n: int) -> ndarray:
"""
Bosonic annihilation operator of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the Hilbert space.
Returns
-------
create : ndarray
Annihilation operator matrix of dimension `n` x `n`.
"""
return np.diag(np.sqrt(range(1, n), dtype=NpDtype), 1)
# -- superoperators ----------------------------------------------------------
def commutator(operator: ndarray) -> ndarray:
"""Construct commutator superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim)) \
- np.kron(np.identity(dim), operator.T)
def acommutator(operator: ndarray) -> ndarray:
"""Construct anti-commutator superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim)) \
+ np.kron(np.identity(dim), operator.T)
def left_super(operator: ndarray) -> ndarray:
"""Construct left acting superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim))
def right_super(operator: ndarray) -> ndarray:
"""Construct right acting superoperator from operator. """
dim = operator.shape[0]
return np.kron(np.identity(dim), operator.T)
def left_right_super(
left_operator: ndarray,
right_operator: ndarray) -> ndarray:
"""Construct left and right acting superoperator from operators. """
return np.kron(left_operator, right_operator.T)
def preparation(
density_matrix: ndarray) -> ndarray:
"""Construct the super operator that prepares the state. """
dim = density_matrix.shape[0]
identity_matrix = np.identity(dim, dtype=NpDtype)
return np.outer(density_matrix.flatten(), identity_matrix.flatten())
# -- two site superoperators --------------------------------------------------
def cross_commutator(
operator_1: ndarray,
operator_2: ndarray) -> ndarray:
"""Construct commutator of cross term (acting on two Hilbert spaces). """
id1 = np.identity(operator_1.shape[1])
id2 = np.identity(operator_2.shape[1])
op1_id = np.kron(operator_1, id1)
op2_id = np.kron(operator_2, id2)
id_op1 = np.kron(id1, operator_1.T)
id_op2 = np.kron(id2, operator_2.T)
return np.kron(op1_id, op2_id) - np.kron(id_op1, id_op2)
def cross_acommutator(
operator_1: ndarray,
operator_2: ndarray) -> ndarray:
"""
Construct anit-commutator of cross term (acting on two Hilbert spaces).
"""
id1 = np.identity(operator_1.shape[1])
id2 = np.identity(operator_2.shape[1])
op1_id = np.kron(operator_1, id1)
op2_id = np.kron(operator_2, id2)
id_op1 = np.kron(id1, operator_1.T)
id_op2 = np.kron(id2, operator_2.T)
return np.kron(op1_id, op2_id) + np.kron(id_op1, id_op2)
def cross_left_right_super(
operator_1_l: ndarray,
operator_1_r: ndarray,
operator_2_l: ndarray,
operator_2_r: ndarray) -> ndarray:
"""
Construct anit-commutator of cross term (acting on two Hilbert spaces).
"""
op1l_op1r = np.kron(operator_1_l, operator_1_r.T)
op2l_op2r = np.kron(operator_2_l, operator_2_r.T)
return np.kron(op1l_op1r, op2l_op2r) | oqupy/operators.py | from typing import Text
import numpy as np
from numpy import ndarray
from oqupy.config import NpDtype
SIGMA = {"id":[[1, 0], [0, 1]],
"x":[[0, 1], [1, 0]],
"y":[[0, -1j], [1j, 0]],
"z":[[1, 0], [0, -1]],
"+":[[0, 1], [0, 0]],
"-":[[0, 0], [1, 0]]}
SPIN_DM = {"up":[[1, 0], [0, 0]],
"down":[[0, 0], [0, 1]],
"z+":[[1, 0], [0, 0]],
"z-":[[0, 0], [0, 1]],
"x+":[[0.5, 0.5], [0.5, 0.5]],
"x-":[[0.5, -0.5], [-0.5, 0.5]],
"y+":[[0.5, -0.5j], [0.5j, 0.5]],
"y-":[[0.5, 0.5j], [-0.5j, 0.5]],
"mixed":[[0.5, 0.0], [0.0, 0.5]]}
def identity(n: int) -> ndarray:
"""
Identity matrix of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the square matrix.
Returns
-------
identity : ndarray
Identity matrix of dimension `n` x `n`.
"""
return np.identity(n, dtype=NpDtype)
def sigma(name: Text) -> ndarray:
"""
Spin matrix sigma of type `name`.
Parameters
----------
name: str{ ``'id'``, ``'x'``, ``'y'``, ``'z'``, ``'+'``, ``'-'``}
Returns
-------
sigma : ndarray
Spin matrix of type `name`.
"""
return np.array(SIGMA[name], dtype=NpDtype)
def spin_dm(name: Text) -> ndarray:
"""
Spin 1/2 state of type `name`.
Parameters
----------
name: str{ ``'up'``/``'z+'``, ``'down'``/``'z-'``, ``'x+'``, ``'x-'``, \
``'y+'``, ``'y-'``, ``mixed``}
Returns
-------
density_matrix : ndarray
Spin density matrix.
"""
return np.array(SPIN_DM[name], dtype=NpDtype)
def create(n: int) -> ndarray:
"""
Bosonic creation operator of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the Hilbert space.
Returns
-------
create : ndarray
Creation operator matrix of dimension `n` x `n`.
"""
return destroy(n).T
def destroy(n: int) -> ndarray:
"""
Bosonic annihilation operator of dimension `n` x `n`.
Parameters
----------
n: int
Dimension of the Hilbert space.
Returns
-------
create : ndarray
Annihilation operator matrix of dimension `n` x `n`.
"""
return np.diag(np.sqrt(range(1, n), dtype=NpDtype), 1)
# -- superoperators ----------------------------------------------------------
def commutator(operator: ndarray) -> ndarray:
"""Construct commutator superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim)) \
- np.kron(np.identity(dim), operator.T)
def acommutator(operator: ndarray) -> ndarray:
"""Construct anti-commutator superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim)) \
+ np.kron(np.identity(dim), operator.T)
def left_super(operator: ndarray) -> ndarray:
"""Construct left acting superoperator from operator. """
dim = operator.shape[0]
return np.kron(operator, np.identity(dim))
def right_super(operator: ndarray) -> ndarray:
"""Construct right acting superoperator from operator. """
dim = operator.shape[0]
return np.kron(np.identity(dim), operator.T)
def left_right_super(
left_operator: ndarray,
right_operator: ndarray) -> ndarray:
"""Construct left and right acting superoperator from operators. """
return np.kron(left_operator, right_operator.T)
def preparation(
density_matrix: ndarray) -> ndarray:
"""Construct the super operator that prepares the state. """
dim = density_matrix.shape[0]
identity_matrix = np.identity(dim, dtype=NpDtype)
return np.outer(density_matrix.flatten(), identity_matrix.flatten())
# -- two site superoperators --------------------------------------------------
def cross_commutator(
operator_1: ndarray,
operator_2: ndarray) -> ndarray:
"""Construct commutator of cross term (acting on two Hilbert spaces). """
id1 = np.identity(operator_1.shape[1])
id2 = np.identity(operator_2.shape[1])
op1_id = np.kron(operator_1, id1)
op2_id = np.kron(operator_2, id2)
id_op1 = np.kron(id1, operator_1.T)
id_op2 = np.kron(id2, operator_2.T)
return np.kron(op1_id, op2_id) - np.kron(id_op1, id_op2)
def cross_acommutator(
operator_1: ndarray,
operator_2: ndarray) -> ndarray:
"""
Construct anit-commutator of cross term (acting on two Hilbert spaces).
"""
id1 = np.identity(operator_1.shape[1])
id2 = np.identity(operator_2.shape[1])
op1_id = np.kron(operator_1, id1)
op2_id = np.kron(operator_2, id2)
id_op1 = np.kron(id1, operator_1.T)
id_op2 = np.kron(id2, operator_2.T)
return np.kron(op1_id, op2_id) + np.kron(id_op1, id_op2)
def cross_left_right_super(
operator_1_l: ndarray,
operator_1_r: ndarray,
operator_2_l: ndarray,
operator_2_r: ndarray) -> ndarray:
"""
Construct anit-commutator of cross term (acting on two Hilbert spaces).
"""
op1l_op1r = np.kron(operator_1_l, operator_1_r.T)
op2l_op2r = np.kron(operator_2_l, operator_2_r.T)
return np.kron(op1l_op1r, op2l_op2r) | 0.899315 | 0.624408 |
import os
import pandas as pd
from src.config.labels import ALGORITHM_LABEL, CALIBRATION_LABEL, FAIRNESS_METRIC_LABEL, LAMBDA_LABEL, \
LAMBDA_VALUE_LABEL, EVALUATION_METRIC_LABEL, EVALUATION_VALUE_LABEL, EVALUATION_LIST_LABELS
from src.config.path_dir_files import data_results_path, ALL_FOLDS_FILE, ALL_RECOMMENDERS_RESULTS_FILE
from src.config.variables import K_FOLDS_VALUES
def k_fold_results_concat(evaluation_results_df):
k_results_df = pd.DataFrame()
for recommender in evaluation_results_df[ALGORITHM_LABEL].unique().tolist():
recommender_subset_df = evaluation_results_df[evaluation_results_df[ALGORITHM_LABEL] == recommender]
for calib_method in recommender_subset_df[CALIBRATION_LABEL].unique().tolist():
calib_subset_df = recommender_subset_df[recommender_subset_df[CALIBRATION_LABEL] == calib_method]
for distance_metric in calib_subset_df[FAIRNESS_METRIC_LABEL].unique().tolist():
fairness_subset_df = calib_subset_df[calib_subset_df[FAIRNESS_METRIC_LABEL] == distance_metric]
for lambda_type in fairness_subset_df[LAMBDA_LABEL].unique().tolist():
lambda_subset_df = fairness_subset_df[fairness_subset_df[LAMBDA_LABEL] == lambda_type]
for lambda_value in lambda_subset_df[LAMBDA_VALUE_LABEL].unique().tolist():
lambda_value_subset_df = lambda_subset_df[lambda_subset_df[LAMBDA_VALUE_LABEL] == lambda_value]
for evaluation_metric in lambda_value_subset_df[EVALUATION_METRIC_LABEL].unique().tolist():
evaluation_subset_df = lambda_value_subset_df[
lambda_value_subset_df[EVALUATION_METRIC_LABEL] == evaluation_metric]
result = evaluation_subset_df[EVALUATION_VALUE_LABEL].mean()
k_results_df = pd.concat([k_results_df,
pd.DataFrame(
[[recommender,
calib_method,
distance_metric,
lambda_type,
lambda_value,
evaluation_metric,
result]],
columns=EVALUATION_LIST_LABELS
)
])
return k_results_df
def merge_recommender_results(label, db):
evaluation_results_df = pd.DataFrame()
for fold in range(1, K_FOLDS_VALUES + 1):
tmp = pd.read_csv(os.path.join("/".join([data_results_path(db), label]) + "/", str(fold) + '.csv'))
evaluation_results_df = pd.concat([evaluation_results_df, tmp])
path_to_save = "".join([data_results_path(db), label, "/"])
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
evaluation_concat_df = k_fold_results_concat(evaluation_results_df)
evaluation_concat_df.to_csv(os.path.join(path_to_save, ALL_FOLDS_FILE),
index=False)
def merge_all_results(recommenders_labels, db):
evaluation_results_df = pd.DataFrame()
for label in recommenders_labels:
tmp = pd.read_csv(os.path.join("/".join([data_results_path(db), label]) + "/", ALL_FOLDS_FILE))
evaluation_results_df = pd.concat([evaluation_results_df, tmp])
path_to_save = data_results_path(db)
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
evaluation_results_df.to_csv(os.path.join(path_to_save, ALL_RECOMMENDERS_RESULTS_FILE),
index=False) | src/processing/merge_results.py | import os
import pandas as pd
from src.config.labels import ALGORITHM_LABEL, CALIBRATION_LABEL, FAIRNESS_METRIC_LABEL, LAMBDA_LABEL, \
LAMBDA_VALUE_LABEL, EVALUATION_METRIC_LABEL, EVALUATION_VALUE_LABEL, EVALUATION_LIST_LABELS
from src.config.path_dir_files import data_results_path, ALL_FOLDS_FILE, ALL_RECOMMENDERS_RESULTS_FILE
from src.config.variables import K_FOLDS_VALUES
def k_fold_results_concat(evaluation_results_df):
k_results_df = pd.DataFrame()
for recommender in evaluation_results_df[ALGORITHM_LABEL].unique().tolist():
recommender_subset_df = evaluation_results_df[evaluation_results_df[ALGORITHM_LABEL] == recommender]
for calib_method in recommender_subset_df[CALIBRATION_LABEL].unique().tolist():
calib_subset_df = recommender_subset_df[recommender_subset_df[CALIBRATION_LABEL] == calib_method]
for distance_metric in calib_subset_df[FAIRNESS_METRIC_LABEL].unique().tolist():
fairness_subset_df = calib_subset_df[calib_subset_df[FAIRNESS_METRIC_LABEL] == distance_metric]
for lambda_type in fairness_subset_df[LAMBDA_LABEL].unique().tolist():
lambda_subset_df = fairness_subset_df[fairness_subset_df[LAMBDA_LABEL] == lambda_type]
for lambda_value in lambda_subset_df[LAMBDA_VALUE_LABEL].unique().tolist():
lambda_value_subset_df = lambda_subset_df[lambda_subset_df[LAMBDA_VALUE_LABEL] == lambda_value]
for evaluation_metric in lambda_value_subset_df[EVALUATION_METRIC_LABEL].unique().tolist():
evaluation_subset_df = lambda_value_subset_df[
lambda_value_subset_df[EVALUATION_METRIC_LABEL] == evaluation_metric]
result = evaluation_subset_df[EVALUATION_VALUE_LABEL].mean()
k_results_df = pd.concat([k_results_df,
pd.DataFrame(
[[recommender,
calib_method,
distance_metric,
lambda_type,
lambda_value,
evaluation_metric,
result]],
columns=EVALUATION_LIST_LABELS
)
])
return k_results_df
def merge_recommender_results(label, db):
evaluation_results_df = pd.DataFrame()
for fold in range(1, K_FOLDS_VALUES + 1):
tmp = pd.read_csv(os.path.join("/".join([data_results_path(db), label]) + "/", str(fold) + '.csv'))
evaluation_results_df = pd.concat([evaluation_results_df, tmp])
path_to_save = "".join([data_results_path(db), label, "/"])
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
evaluation_concat_df = k_fold_results_concat(evaluation_results_df)
evaluation_concat_df.to_csv(os.path.join(path_to_save, ALL_FOLDS_FILE),
index=False)
def merge_all_results(recommenders_labels, db):
evaluation_results_df = pd.DataFrame()
for label in recommenders_labels:
tmp = pd.read_csv(os.path.join("/".join([data_results_path(db), label]) + "/", ALL_FOLDS_FILE))
evaluation_results_df = pd.concat([evaluation_results_df, tmp])
path_to_save = data_results_path(db)
if not os.path.exists(path_to_save):
os.makedirs(path_to_save)
evaluation_results_df.to_csv(os.path.join(path_to_save, ALL_RECOMMENDERS_RESULTS_FILE),
index=False) | 0.249264 | 0.164148 |
import numpy as np
import matplotlib.pyplot as plt
from python_codes.transformations import *
class MK2Robot(object):
HOME_0 = 0
HOME_1 = np.pi
def __init__(self, link_lengths):
self.a = link_lengths
self.q = []
self.T = []
self.pose = []
self.s = []
# self.update_pose(MK2Robot.HOME_0, MK2Robot.HOME_1)
def update_pose(self, q0, q1):
"""
Este metodo calcula la pose de cada link del robot, usando las matrices T y R. Luego guarda el
resultado para cada link como un elemento del arreglo self.pose
"""
# Calcula las matrices T y Q
self._update_transformation_matrices(q0, q1)
# re-escribe self.pose como una lista de 4 matrices nulas
self.pose = np.zeros((2, 2))
l0_pose = np.linalg.multi_dot([self.R[0], self.T[0]])
l1_pose = np.linalg.multi_dot([self.R[0], self.T[0], self.R[1], self.T[1]])
self.pose[:, 0] = l0_pose[:, 2][:2]
self.pose[:, 1] = l1_pose[:, 2][:2]
def _update_transformation_matrices(self, q0, q1):
"""
Este método calcula las matrices de rotación traslación del modelo de nuestro robot
y guarda sus valores como elementos de las listas self.R y self.T, en orden
"""
q0 = q0 * np.pi / 180
q1 = q1 * np.pi / 180
self.q = [q0, q1]
self.T = []
self.R = []
angulo_rotacion_l0 = q0
angulo_rotacion_l1 = q1
# Link 1
self.T.append(translation_along_x_axis(self.a[0]))
self.R.append(rotation_around_zaxis(angulo_rotacion_l0))
# Link 2
self.T.append(translation_along_x_axis(self.a[1]))
self.R.append(rotation_around_zaxis(angulo_rotacion_l1))
def inverse_kinematics(self, x, y):
## pa q el robot vaya a x,y,x hay q usar
# q0,q1,q2=inversekinematics
##robot.updatepose(q0,q1,q2)
a1 = self.a[0]
a2 = self.a[1]
lim = a1 + a2
r = np.sqrt(x**2 + y**2)
if (r > lim):
return self.q
phi0 = np.arctan2(y, x)
phi1 = np.arccos((r**2+a1**2-a2**2) / (2*r*a1))
phi2 = np.arccos((a1**2 + a2**2 - r**2) / (2*a1*a2))
q0 = phi0 -phi1
q1 = np.pi - phi2
return np.array([q0, q1]) * 180 / np.pi
def get_joint_positions(self):
"""Este método entrega las coordenadas de cada joint en 1 listas; es para que el codigo se vea mas limpio :)"""
X_pos = self.pose[0]
Y_pos = self.pose[1]
return [X_pos, Y_pos]
def get_pose_error(self, inputed_coord):
x, y = inputed_coord
xr, yr = self.pose[:, 1]
error_x = np.abs(x-xr)/x
error_y = np.abs(y - yr) / y
return [error_x, error_y]
def angle_to_step(self, qarr):
"qarr must be in degres"
q0, q1 = qarr
s1 = q0 * 200
s2 = q1 * 400
self.s = [s1, s2]
return self.s
def write_coords_as_gcode(self, file, coords):
"""Takes an array of tuples with coordinates (in degrees) and writes
them as Gcode to a file"""
arch = open(file, 'w')
for i in range(len(coords)):
x = str(np.round(coords[i][0], 1))
y = str(np.round(coords[i][1], 1))
msg = 'G0 X' + x + ' Y' + y + '\n' # G0 Xx Yy
arch.write(msg)
arch.close() | python_codes/mk2Robot.py | import numpy as np
import matplotlib.pyplot as plt
from python_codes.transformations import *
class MK2Robot(object):
HOME_0 = 0
HOME_1 = np.pi
def __init__(self, link_lengths):
self.a = link_lengths
self.q = []
self.T = []
self.pose = []
self.s = []
# self.update_pose(MK2Robot.HOME_0, MK2Robot.HOME_1)
def update_pose(self, q0, q1):
"""
Este metodo calcula la pose de cada link del robot, usando las matrices T y R. Luego guarda el
resultado para cada link como un elemento del arreglo self.pose
"""
# Calcula las matrices T y Q
self._update_transformation_matrices(q0, q1)
# re-escribe self.pose como una lista de 4 matrices nulas
self.pose = np.zeros((2, 2))
l0_pose = np.linalg.multi_dot([self.R[0], self.T[0]])
l1_pose = np.linalg.multi_dot([self.R[0], self.T[0], self.R[1], self.T[1]])
self.pose[:, 0] = l0_pose[:, 2][:2]
self.pose[:, 1] = l1_pose[:, 2][:2]
def _update_transformation_matrices(self, q0, q1):
"""
Este método calcula las matrices de rotación traslación del modelo de nuestro robot
y guarda sus valores como elementos de las listas self.R y self.T, en orden
"""
q0 = q0 * np.pi / 180
q1 = q1 * np.pi / 180
self.q = [q0, q1]
self.T = []
self.R = []
angulo_rotacion_l0 = q0
angulo_rotacion_l1 = q1
# Link 1
self.T.append(translation_along_x_axis(self.a[0]))
self.R.append(rotation_around_zaxis(angulo_rotacion_l0))
# Link 2
self.T.append(translation_along_x_axis(self.a[1]))
self.R.append(rotation_around_zaxis(angulo_rotacion_l1))
def inverse_kinematics(self, x, y):
## pa q el robot vaya a x,y,x hay q usar
# q0,q1,q2=inversekinematics
##robot.updatepose(q0,q1,q2)
a1 = self.a[0]
a2 = self.a[1]
lim = a1 + a2
r = np.sqrt(x**2 + y**2)
if (r > lim):
return self.q
phi0 = np.arctan2(y, x)
phi1 = np.arccos((r**2+a1**2-a2**2) / (2*r*a1))
phi2 = np.arccos((a1**2 + a2**2 - r**2) / (2*a1*a2))
q0 = phi0 -phi1
q1 = np.pi - phi2
return np.array([q0, q1]) * 180 / np.pi
def get_joint_positions(self):
"""Este método entrega las coordenadas de cada joint en 1 listas; es para que el codigo se vea mas limpio :)"""
X_pos = self.pose[0]
Y_pos = self.pose[1]
return [X_pos, Y_pos]
def get_pose_error(self, inputed_coord):
x, y = inputed_coord
xr, yr = self.pose[:, 1]
error_x = np.abs(x-xr)/x
error_y = np.abs(y - yr) / y
return [error_x, error_y]
def angle_to_step(self, qarr):
"qarr must be in degres"
q0, q1 = qarr
s1 = q0 * 200
s2 = q1 * 400
self.s = [s1, s2]
return self.s
def write_coords_as_gcode(self, file, coords):
"""Takes an array of tuples with coordinates (in degrees) and writes
them as Gcode to a file"""
arch = open(file, 'w')
for i in range(len(coords)):
x = str(np.round(coords[i][0], 1))
y = str(np.round(coords[i][1], 1))
msg = 'G0 X' + x + ' Y' + y + '\n' # G0 Xx Yy
arch.write(msg)
arch.close() | 0.569972 | 0.551211 |
import PlayCards
import CommonCardsType
# 该模块中list_list和Cardlist的内容一样
# 这个函数用于计算手牌中各种牌面的张数,接收一个手牌列表作为参数,返回一个记录各种牌面张数列表
def GetList_count(Cardlist=[]):
list_count = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for item in Cardlist:
number = item[0]
list_count[number] = list_count[number] + 1
return list_count
def IsZhizhunqinglong(Cardlist=[]):
list_list = list.copy(Cardlist)
flag1 = 1
flag2 = 1
for i in range(1, 13):
if (list_list[i][1] != list_list[i - 1][1]):
flag1 = 0
break
for i in range(1, 13):
if (list_list[i][0] - list_list[i - 1][0] != 1):
flag2 = 0
break
if (flag1 == 1 and flag2 == 1):
return True
else:
return False
def IsYitiaolong(Cardlist=[]):
list_list = list.copy(Cardlist)
flag = 1
for i in range(1, 13):
if (list_list[i][0] - list_list[i - 1][0] != 1):
flag = 0
break
if (flag == 1):
return True
else:
return False
def IsShierhuangzu(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] > 10):
count = count + 1
if (count >= 12):
return True
else:
return False
def IsSantonghuashun(Cardlist=[]):
temp_Sanshunzi = [] # 存放在查找过程中可能存在的三顺子的一部分
list_list = list.copy(Cardlist) # Cardlist的副本
Sanshunzi = []
SanTonghuashun = []
temp_list1 = [] # 存放去掉一个顺子后的手牌
temp_list2 = [] # 存放去掉两个顺子后的手牌
Shunzi1 = [] # 第一层循环中所有的顺子
Shunzi2 = [] # 第二层循环中所有的顺子
Shunzi1 = CommonCardsType.FindShunzi(list_list)
if (Shunzi1 != []):
for item1 in Shunzi1:
temp_list1 = PlayCards.CalculateSub(list_list, item1)
Shunzi2 = CommonCardsType.FindShunzi(temp_list1)
if (Shunzi2 != []):
for item2 in Shunzi2:
temp_list2 = PlayCards.CalculateSub(temp_list1, item2)
temp_list2.sort()
if (temp_list2[0][0] + 1 == temp_list2[1][0]
and temp_list2[1][0] + 1 == temp_list2[2][0]):
temp_Sanshunzi.append(temp_list2)
if (item1[4][0] >= item2[4][0]):
temp_Sanshunzi.append(item2)
temp_Sanshunzi.append(item1)
else:
temp_Sanshunzi.append(item1)
temp_Sanshunzi.append(item2)
if temp_Sanshunzi not in Sanshunzi:
Sanshunzi.append(temp_Sanshunzi)
temp_Sanshunzi = []
if (Sanshunzi != []):
for item in Sanshunzi: # item是一个三顺子
flag = 1 # flag=0时表示这一个三顺子不是三同花顺
for dun in item: # dun是一个墩
for i in range(len(dun) - 1): # i是墩中的一张牌的下标
if (dun[i][1] != dun[i + 1][1]):
flag = 0
if (flag == 1):
SanTonghuashun.append(item)
if (SanTonghuashun != []):
return True
else:
return False
def IsSanfentianxia(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] == 4):
count = count + 1
if (count == 3):
return True
else:
return False
def IsQuanda(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] >= 8):
count = count + 1
if (count == 13):
return True
else:
return False
def IsQuanxiao(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] <= 8):
count = count + 1
if (count == 13):
return True
else:
return False
def IsCouyise(Cardlist=[]):
list_list = list.copy(Cardlist)
Meihua = 0
Fangkuai = 0
for i in range(0, 13):
if (list_list[i][1] == '*'):
Meihua = Meihua + 1
elif (list_list[i][1] == '#'):
Fangkuai = Fangkuai + 1
if (Meihua + Fangkuai == 13 or Meihua + Fangkuai == 0):
return True
else:
return False
def IsShuangguaichongsan(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count2 = 0
count3 = 0
count4 = 0
for i in range(0, 13):
if (list_count[i] == 2):
count2 = count2 + 1
elif (list_count[i] == 3):
count3 = count3 + 1
elif (list_count == 4):
count4 = count4 + 1
if (count2 == 3 and count3 == 2
or count2 == 3 and count3 == 1 and count4 == 1
or count2 == 2 and count3 == 3
or count2 == 1 and count3 == 2 and count4 == 1):
return True
else:
return False
def IsSitaosantiao(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] >= 3):
count = count + 1
if (count == 4):
return True
else:
return False
def IsWuduisantiao(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count2 = 0
count3 = 0
count4 = 0
for i in range(0, 13):
if (list_count[i] == 2):
count2 = count2 + 1
elif (list_count[i] == 3):
count3 = count3 + 1
elif (list_count[i] == 4):
count4 = count4 + 1
if (count2 == 5 and count3 == 1
or count2 == 3 and count3 == 1 and count4 == 1
or count2 == 1 and count3 == 1 and count4 == 2):
return True
else:
return False
def IsLiuduiban(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] == 4):
count = count + 2
elif (list_count[i] == 3 or list_count[i] == 2):
count = count + 1
if (count == 6):
return True
else:
return False
def IsSanshunzi(Cardlist=[]):
temp_Sanshunzi = [] # 存放在查找过程中可能存在的三顺子的一部分
list_list = list.copy(Cardlist) # Cardlist的副本
Sanshunzi=[]
temp_list1 = [] # 存放去掉一个顺子后的手牌
temp_list2 = [] # 存放去掉两个顺子后的手牌
Shunzi1 = [] # 第一层循环中所有的顺子
Shunzi2 = [] # 第二层循环中所有的顺子
Shunzi1 = CommonCardsType.FindShunzi(list_list)
if (Shunzi1 != []):
for item1 in Shunzi1:
temp_list1 = PlayCards.CalculateSub(list_list, item1)
Shunzi2 = CommonCardsType.FindShunzi(temp_list1)
if (Shunzi2 != []):
for item2 in Shunzi2:
temp_list2 = PlayCards.CalculateSub(temp_list1, item2)
temp_list2.sort()
if (temp_list2[0][0] + 1 == temp_list2[1][0]
and temp_list2[1][0] + 1 == temp_list2[2][0]):
temp_Sanshunzi.append(temp_list2)
if (item1[4][0] >= item2[4][0]):
temp_Sanshunzi.append(item2)
temp_Sanshunzi.append(item1)
else:
temp_Sanshunzi.append(item1)
temp_Sanshunzi.append(item2)
if temp_Sanshunzi not in Sanshunzi:
Sanshunzi.append(temp_Sanshunzi)
temp_Sanshunzi = []
if (Sanshunzi != []):
return True
else:
return False
def IsSantonghua(Cardlist=[]):
list_list = list.copy(Cardlist)
Fangkuai = 0
Meihua = 0
Heitao = 0
Hongxing = 0
for i in range(0, 13):
if (list_list[i][1] == '#'):
Fangkuai = Fangkuai + 1
elif (list_list[i][1] == '*'):
Meihua = Meihua + 1
elif (list_list[i][1] == '$'):
Heitao = Heitao + 1
else:
Hongxing = Hongxing + 1
templist = [Fangkuai, Meihua, Heitao, Hongxing]
templist.sort()
if (templist[0] == 0 and templist[1] == 3 and templist[2] == 5 and templist[3] == 5):
return True
else:
return False | AutoPlayForShisanshui/SpecialCardsType.py | import PlayCards
import CommonCardsType
# 该模块中list_list和Cardlist的内容一样
# 这个函数用于计算手牌中各种牌面的张数,接收一个手牌列表作为参数,返回一个记录各种牌面张数列表
def GetList_count(Cardlist=[]):
list_count = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]
for item in Cardlist:
number = item[0]
list_count[number] = list_count[number] + 1
return list_count
def IsZhizhunqinglong(Cardlist=[]):
list_list = list.copy(Cardlist)
flag1 = 1
flag2 = 1
for i in range(1, 13):
if (list_list[i][1] != list_list[i - 1][1]):
flag1 = 0
break
for i in range(1, 13):
if (list_list[i][0] - list_list[i - 1][0] != 1):
flag2 = 0
break
if (flag1 == 1 and flag2 == 1):
return True
else:
return False
def IsYitiaolong(Cardlist=[]):
list_list = list.copy(Cardlist)
flag = 1
for i in range(1, 13):
if (list_list[i][0] - list_list[i - 1][0] != 1):
flag = 0
break
if (flag == 1):
return True
else:
return False
def IsShierhuangzu(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] > 10):
count = count + 1
if (count >= 12):
return True
else:
return False
def IsSantonghuashun(Cardlist=[]):
temp_Sanshunzi = [] # 存放在查找过程中可能存在的三顺子的一部分
list_list = list.copy(Cardlist) # Cardlist的副本
Sanshunzi = []
SanTonghuashun = []
temp_list1 = [] # 存放去掉一个顺子后的手牌
temp_list2 = [] # 存放去掉两个顺子后的手牌
Shunzi1 = [] # 第一层循环中所有的顺子
Shunzi2 = [] # 第二层循环中所有的顺子
Shunzi1 = CommonCardsType.FindShunzi(list_list)
if (Shunzi1 != []):
for item1 in Shunzi1:
temp_list1 = PlayCards.CalculateSub(list_list, item1)
Shunzi2 = CommonCardsType.FindShunzi(temp_list1)
if (Shunzi2 != []):
for item2 in Shunzi2:
temp_list2 = PlayCards.CalculateSub(temp_list1, item2)
temp_list2.sort()
if (temp_list2[0][0] + 1 == temp_list2[1][0]
and temp_list2[1][0] + 1 == temp_list2[2][0]):
temp_Sanshunzi.append(temp_list2)
if (item1[4][0] >= item2[4][0]):
temp_Sanshunzi.append(item2)
temp_Sanshunzi.append(item1)
else:
temp_Sanshunzi.append(item1)
temp_Sanshunzi.append(item2)
if temp_Sanshunzi not in Sanshunzi:
Sanshunzi.append(temp_Sanshunzi)
temp_Sanshunzi = []
if (Sanshunzi != []):
for item in Sanshunzi: # item是一个三顺子
flag = 1 # flag=0时表示这一个三顺子不是三同花顺
for dun in item: # dun是一个墩
for i in range(len(dun) - 1): # i是墩中的一张牌的下标
if (dun[i][1] != dun[i + 1][1]):
flag = 0
if (flag == 1):
SanTonghuashun.append(item)
if (SanTonghuashun != []):
return True
else:
return False
def IsSanfentianxia(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] == 4):
count = count + 1
if (count == 3):
return True
else:
return False
def IsQuanda(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] >= 8):
count = count + 1
if (count == 13):
return True
else:
return False
def IsQuanxiao(Cardlist=[]):
list_list = list.copy(Cardlist)
count = 0
for i in range(0, 13):
if (list_list[i][0] <= 8):
count = count + 1
if (count == 13):
return True
else:
return False
def IsCouyise(Cardlist=[]):
list_list = list.copy(Cardlist)
Meihua = 0
Fangkuai = 0
for i in range(0, 13):
if (list_list[i][1] == '*'):
Meihua = Meihua + 1
elif (list_list[i][1] == '#'):
Fangkuai = Fangkuai + 1
if (Meihua + Fangkuai == 13 or Meihua + Fangkuai == 0):
return True
else:
return False
def IsShuangguaichongsan(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count2 = 0
count3 = 0
count4 = 0
for i in range(0, 13):
if (list_count[i] == 2):
count2 = count2 + 1
elif (list_count[i] == 3):
count3 = count3 + 1
elif (list_count == 4):
count4 = count4 + 1
if (count2 == 3 and count3 == 2
or count2 == 3 and count3 == 1 and count4 == 1
or count2 == 2 and count3 == 3
or count2 == 1 and count3 == 2 and count4 == 1):
return True
else:
return False
def IsSitaosantiao(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] >= 3):
count = count + 1
if (count == 4):
return True
else:
return False
def IsWuduisantiao(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count2 = 0
count3 = 0
count4 = 0
for i in range(0, 13):
if (list_count[i] == 2):
count2 = count2 + 1
elif (list_count[i] == 3):
count3 = count3 + 1
elif (list_count[i] == 4):
count4 = count4 + 1
if (count2 == 5 and count3 == 1
or count2 == 3 and count3 == 1 and count4 == 1
or count2 == 1 and count3 == 1 and count4 == 2):
return True
else:
return False
def IsLiuduiban(Cardlist=[]):
list_list = list.copy(Cardlist)
list_count = GetList_count(list_list)
count = 0
for i in range(0, 13):
if (list_count[i] == 4):
count = count + 2
elif (list_count[i] == 3 or list_count[i] == 2):
count = count + 1
if (count == 6):
return True
else:
return False
def IsSanshunzi(Cardlist=[]):
temp_Sanshunzi = [] # 存放在查找过程中可能存在的三顺子的一部分
list_list = list.copy(Cardlist) # Cardlist的副本
Sanshunzi=[]
temp_list1 = [] # 存放去掉一个顺子后的手牌
temp_list2 = [] # 存放去掉两个顺子后的手牌
Shunzi1 = [] # 第一层循环中所有的顺子
Shunzi2 = [] # 第二层循环中所有的顺子
Shunzi1 = CommonCardsType.FindShunzi(list_list)
if (Shunzi1 != []):
for item1 in Shunzi1:
temp_list1 = PlayCards.CalculateSub(list_list, item1)
Shunzi2 = CommonCardsType.FindShunzi(temp_list1)
if (Shunzi2 != []):
for item2 in Shunzi2:
temp_list2 = PlayCards.CalculateSub(temp_list1, item2)
temp_list2.sort()
if (temp_list2[0][0] + 1 == temp_list2[1][0]
and temp_list2[1][0] + 1 == temp_list2[2][0]):
temp_Sanshunzi.append(temp_list2)
if (item1[4][0] >= item2[4][0]):
temp_Sanshunzi.append(item2)
temp_Sanshunzi.append(item1)
else:
temp_Sanshunzi.append(item1)
temp_Sanshunzi.append(item2)
if temp_Sanshunzi not in Sanshunzi:
Sanshunzi.append(temp_Sanshunzi)
temp_Sanshunzi = []
if (Sanshunzi != []):
return True
else:
return False
def IsSantonghua(Cardlist=[]):
list_list = list.copy(Cardlist)
Fangkuai = 0
Meihua = 0
Heitao = 0
Hongxing = 0
for i in range(0, 13):
if (list_list[i][1] == '#'):
Fangkuai = Fangkuai + 1
elif (list_list[i][1] == '*'):
Meihua = Meihua + 1
elif (list_list[i][1] == '$'):
Heitao = Heitao + 1
else:
Hongxing = Hongxing + 1
templist = [Fangkuai, Meihua, Heitao, Hongxing]
templist.sort()
if (templist[0] == 0 and templist[1] == 3 and templist[2] == 5 and templist[3] == 5):
return True
else:
return False | 0.090809 | 0.201224 |
import requests
import json
from pprint import pprint
from glob import glob
from semantic_version import Version
import getpass
import sys
def main():
version = ""
with open('files/version.txt') as f:
version = f.read().strip()
base_url = 'https://api.github.com/repos/jyapayne/Electrify/releases'
req = requests.get(base_url+'/tags/'+version)
update = False
rel_id = None
upload_url = None
github_user = input('Github user:')
password = <PASSWORD>.getpass('Password:')
if req.status_code == 200:
print('Found release:', version)
json_data = json.loads(req.text)
tag = json_data.get('tag_name', '')
cur_ver = Version(tag[1:-1])
new_ver = Version(version[1:-1])
if new_ver <= cur_ver:
update = True
rel_id = json_data['id']
upload_url = json_data['upload_url'].replace('{?name,label}', '')
if not update:
print('Creating release:', version)
data = {'tag_name': version,
'target_commitish': 'master',
'name': 'Electrify ' + version}
post_res = requests.post(base_url, data=json.dumps(data), auth=(github_user, password))
if post_res.status_code == 201:
json_data = json.loads(post_res.text)
upload_url = json_data['upload_url'].replace('{?name,label}', '')
rel_id = json_data['id']
else:
print('Authentication failed!')
if rel_id:
zip_files = glob('*.zip')
for zip_file in zip_files:
with open(zip_file, 'rb') as zipf:
file_data = zipf.read()
print('Uploading file {}...'.format(zip_file))
data = {'name': zip_file}
headers = {'Content-Type': 'application/zip'}
r = requests.post(upload_url, params=data, data=file_data, headers=headers, auth=(github_user, password))
if r.status_code == 201:
print('Success!')
else:
print('Error:', r.text)
if __name__ == '__main__':
main() | upload_release.py |
import requests
import json
from pprint import pprint
from glob import glob
from semantic_version import Version
import getpass
import sys
def main():
version = ""
with open('files/version.txt') as f:
version = f.read().strip()
base_url = 'https://api.github.com/repos/jyapayne/Electrify/releases'
req = requests.get(base_url+'/tags/'+version)
update = False
rel_id = None
upload_url = None
github_user = input('Github user:')
password = <PASSWORD>.getpass('Password:')
if req.status_code == 200:
print('Found release:', version)
json_data = json.loads(req.text)
tag = json_data.get('tag_name', '')
cur_ver = Version(tag[1:-1])
new_ver = Version(version[1:-1])
if new_ver <= cur_ver:
update = True
rel_id = json_data['id']
upload_url = json_data['upload_url'].replace('{?name,label}', '')
if not update:
print('Creating release:', version)
data = {'tag_name': version,
'target_commitish': 'master',
'name': 'Electrify ' + version}
post_res = requests.post(base_url, data=json.dumps(data), auth=(github_user, password))
if post_res.status_code == 201:
json_data = json.loads(post_res.text)
upload_url = json_data['upload_url'].replace('{?name,label}', '')
rel_id = json_data['id']
else:
print('Authentication failed!')
if rel_id:
zip_files = glob('*.zip')
for zip_file in zip_files:
with open(zip_file, 'rb') as zipf:
file_data = zipf.read()
print('Uploading file {}...'.format(zip_file))
data = {'name': zip_file}
headers = {'Content-Type': 'application/zip'}
r = requests.post(upload_url, params=data, data=file_data, headers=headers, auth=(github_user, password))
if r.status_code == 201:
print('Success!')
else:
print('Error:', r.text)
if __name__ == '__main__':
main() | 0.127476 | 0.060613 |
import numpy as np
f1_alpha=1000 #set alpha of f1
f3_epsilon=1e-6 #set epsilon of f3
f45_q=10**8
def f1(x): #ellipsoid function
dim=x.shape[0] #dimension number
result=0
for i in range(dim):
result+=f1_alpha**(i/(dim-1))*x[i]**2
return result
def g1(x):
dim=x.shape[0]
result=np.zeros(dim)
for i in range(dim):
result[i]=2*(f1_alpha**(i/(dim-1)))*x[i]
return result
def h1(x):
dim=x.shape[0]
result=np.zeros((dim,dim))
for i in range(dim):
result[i,i]=2*(f1_alpha**(i/(dim-1)))
return result
f2 = lambda x: (1-x[0])**2+100*(x[1]-x[0]**2)**2; #Rosenbrok function
g2 = lambda x: np.array([-400*x[0]*(x[1]-x[0]**2)-2*(1-x[0]), 200*(x[1]-x[0]**2)])
h2 = lambda x: np.array([[2+1200*x[0]**2-400*x[1], -400*x[0]], [-400*x[0], 200]])
f3 = lambda x: np.log(f3_epsilon+f1(x)) #log ellipsoid function
def g3(x):
dim=x.shape[0]
result=np.zeros(dim)
for i in range(dim):
result[i]=(2*f1_alpha**(i/(dim-1))*x[i])/(f3_epsilon+f1(x))
return result
def h3(x):
dim=x.shape[0]
result=np.zeros((dim,dim))
f1_elli=f1(x)
for i in range(dim):
for j in range(dim):
if(i==j):
result[i,j]=((2*f1_alpha**(i/(dim-1)))/(f3_epsilon+f1_elli)
- (2*x[i]**2)/(f3_epsilon+f1_elli)**2)
else:
result[i,j]=((-4*f1_alpha**((i+j)/(dim-1))*x[i]*x[j])
/ (f3_epsilon+f1_elli)**2)
return result
funch = lambda x: (np.log(1+np.exp(-np.absolute(f45_q*x)))+np.maximum(f45_q*x,0))/f45_q
def f4(x):
if(isinstance(x, int) or isinstance(x, float)):
return (funch(x) + 100*funch(-x))
else:
dim=x.shape[0] #dimension number
result=0
for i in range(dim):
result+=funch(x[i])+100*funch(-x[i])
return result
'''
def f4_g(x):
dim=x.shape[0] #dimension number
result=np.zeros(dim)
for i in range(dim):
result[i]=(np.exp(f45_q*x[i]))/(1+np.exp(f45_q*x[i]))-100*(np.exp(-f45_q*x[i])/(1+np.exp(-f45_q*x[i])))
return result
def f4_h(x):
dim=x.shape[0] #dimension number
result=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
if(i==j):
result[i,j]=(101*f45_q*np.exp(-f45_q*x[i]))/(1+np.exp(-f45_q*x[i]))**2
else:
result[i,j]=0
return result
'''
def h_d1(x):
if x >= 0:
return 1 / (1 + np.exp(-x * f45_q))
return np.exp(x * f45_q) / (1 + np.exp(x * f45_q))
def h_d11(x):
if x >= 0:
return -(np.exp(-f45_q * x) / (1 + np.exp(-f45_q * x)))
return -(1 / (1 + np.exp(f45_q * x)))
def h_d2(x):
if x >= 0:
return (f45_q * np.exp(-x * f45_q)) / (1 + np.exp(-x * f45_q))**2
return (f45_q * np.exp(x * f45_q)) / (1 + np.exp(x * f45_q)) ** 2
def g4(x):
if isinstance(x, int) or isinstance(x, float):
return h_d1(x) - 100 * h_d1(-x)
else:
d = len(x)
grad = np.zeros(d)
for i in range(d):
grad[i] = h_d1(x[i]) - 100 * h_d1(-x[i])
return grad
def h4(x):
if isinstance(x, int) or isinstance(x, float):
return h_d2(x) + 100 * h_d2(-x)
else:
d = len(x)
hessian = np.zeros((d, d))
for i in range(d):
hessian[i, i] = h_d2(x[i]) + 100 * h_d2(-x[i])
return hessian
def f5(x):
if(isinstance(x, int) or isinstance(x, float)):
return funch(x)**2 + 100*funch(-x)**2
else:
dim=x.shape[0] #dimension number
result=0;
for i in range(dim):
result+=funch(x[i])**2+100*funch(-x[i])**2
return result
def g5(x):
if isinstance(x, int) or isinstance(x, float):
return 2 * h(x) * h_d1(x) - 100 * 2 * h(-x) * h_d1(-x)
else:
d = len(x)
grad = np.zeros(d)
for i in range(d):
grad[i] = 2 * funch(x[i]) * h_d1(x[i]) - 100 * 2 * funch(-x[i]) * h_d1(-x[i])
return grad
def h5(x):
if isinstance(x, int) or isinstance(x, float):
return 2 * np.exp(f45_q * x) * (np.exp(f45_q * x) + np.log(np.exp(f45_q * x) + 1)) / (
np.exp(f45_q * x) + 1) ** 2 + 200 * np.exp(-2 * f45_q * x) * (
np.exp(f45_q * x) * np.log(np.exp(-f45_q * x) + 1) + 1) / (np.exp(-f45_q * x) + 1) ** 2
else:
d = len(x)
hessian = np.zeros((d, d))
for i in range(d):
hessian[i, i] = 2 * h_d1(x[i])**2 + 2*funch(x[i])*h_d2(x[i]) + 200*h_d11(x[i])**2
# 2 * np.exp(f45_q * x[i]) * (np.exp(f45_q * x[i]) + np.log(np.exp(f45_q * x[i]) + 1)) / (
# np.exp(f45_q * x[i]) + 1) ** 2 + 200 * np.exp(-2 * f45_q * x[i]) * (
# np.exp(f45_q * x[i]) * np.log(np.exp(-f45_q * x[i]) + 1) + 1) / (np.exp(-f45_q * x[i]) + 1) ** 2
return hessian | functions.py | import numpy as np
f1_alpha=1000 #set alpha of f1
f3_epsilon=1e-6 #set epsilon of f3
f45_q=10**8
def f1(x): #ellipsoid function
dim=x.shape[0] #dimension number
result=0
for i in range(dim):
result+=f1_alpha**(i/(dim-1))*x[i]**2
return result
def g1(x):
dim=x.shape[0]
result=np.zeros(dim)
for i in range(dim):
result[i]=2*(f1_alpha**(i/(dim-1)))*x[i]
return result
def h1(x):
dim=x.shape[0]
result=np.zeros((dim,dim))
for i in range(dim):
result[i,i]=2*(f1_alpha**(i/(dim-1)))
return result
f2 = lambda x: (1-x[0])**2+100*(x[1]-x[0]**2)**2; #Rosenbrok function
g2 = lambda x: np.array([-400*x[0]*(x[1]-x[0]**2)-2*(1-x[0]), 200*(x[1]-x[0]**2)])
h2 = lambda x: np.array([[2+1200*x[0]**2-400*x[1], -400*x[0]], [-400*x[0], 200]])
f3 = lambda x: np.log(f3_epsilon+f1(x)) #log ellipsoid function
def g3(x):
dim=x.shape[0]
result=np.zeros(dim)
for i in range(dim):
result[i]=(2*f1_alpha**(i/(dim-1))*x[i])/(f3_epsilon+f1(x))
return result
def h3(x):
dim=x.shape[0]
result=np.zeros((dim,dim))
f1_elli=f1(x)
for i in range(dim):
for j in range(dim):
if(i==j):
result[i,j]=((2*f1_alpha**(i/(dim-1)))/(f3_epsilon+f1_elli)
- (2*x[i]**2)/(f3_epsilon+f1_elli)**2)
else:
result[i,j]=((-4*f1_alpha**((i+j)/(dim-1))*x[i]*x[j])
/ (f3_epsilon+f1_elli)**2)
return result
funch = lambda x: (np.log(1+np.exp(-np.absolute(f45_q*x)))+np.maximum(f45_q*x,0))/f45_q
def f4(x):
if(isinstance(x, int) or isinstance(x, float)):
return (funch(x) + 100*funch(-x))
else:
dim=x.shape[0] #dimension number
result=0
for i in range(dim):
result+=funch(x[i])+100*funch(-x[i])
return result
'''
def f4_g(x):
dim=x.shape[0] #dimension number
result=np.zeros(dim)
for i in range(dim):
result[i]=(np.exp(f45_q*x[i]))/(1+np.exp(f45_q*x[i]))-100*(np.exp(-f45_q*x[i])/(1+np.exp(-f45_q*x[i])))
return result
def f4_h(x):
dim=x.shape[0] #dimension number
result=np.zeros((dim,dim))
for i in range(dim):
for j in range(dim):
if(i==j):
result[i,j]=(101*f45_q*np.exp(-f45_q*x[i]))/(1+np.exp(-f45_q*x[i]))**2
else:
result[i,j]=0
return result
'''
def h_d1(x):
if x >= 0:
return 1 / (1 + np.exp(-x * f45_q))
return np.exp(x * f45_q) / (1 + np.exp(x * f45_q))
def h_d11(x):
if x >= 0:
return -(np.exp(-f45_q * x) / (1 + np.exp(-f45_q * x)))
return -(1 / (1 + np.exp(f45_q * x)))
def h_d2(x):
if x >= 0:
return (f45_q * np.exp(-x * f45_q)) / (1 + np.exp(-x * f45_q))**2
return (f45_q * np.exp(x * f45_q)) / (1 + np.exp(x * f45_q)) ** 2
def g4(x):
if isinstance(x, int) or isinstance(x, float):
return h_d1(x) - 100 * h_d1(-x)
else:
d = len(x)
grad = np.zeros(d)
for i in range(d):
grad[i] = h_d1(x[i]) - 100 * h_d1(-x[i])
return grad
def h4(x):
if isinstance(x, int) or isinstance(x, float):
return h_d2(x) + 100 * h_d2(-x)
else:
d = len(x)
hessian = np.zeros((d, d))
for i in range(d):
hessian[i, i] = h_d2(x[i]) + 100 * h_d2(-x[i])
return hessian
def f5(x):
if(isinstance(x, int) or isinstance(x, float)):
return funch(x)**2 + 100*funch(-x)**2
else:
dim=x.shape[0] #dimension number
result=0;
for i in range(dim):
result+=funch(x[i])**2+100*funch(-x[i])**2
return result
def g5(x):
if isinstance(x, int) or isinstance(x, float):
return 2 * h(x) * h_d1(x) - 100 * 2 * h(-x) * h_d1(-x)
else:
d = len(x)
grad = np.zeros(d)
for i in range(d):
grad[i] = 2 * funch(x[i]) * h_d1(x[i]) - 100 * 2 * funch(-x[i]) * h_d1(-x[i])
return grad
def h5(x):
if isinstance(x, int) or isinstance(x, float):
return 2 * np.exp(f45_q * x) * (np.exp(f45_q * x) + np.log(np.exp(f45_q * x) + 1)) / (
np.exp(f45_q * x) + 1) ** 2 + 200 * np.exp(-2 * f45_q * x) * (
np.exp(f45_q * x) * np.log(np.exp(-f45_q * x) + 1) + 1) / (np.exp(-f45_q * x) + 1) ** 2
else:
d = len(x)
hessian = np.zeros((d, d))
for i in range(d):
hessian[i, i] = 2 * h_d1(x[i])**2 + 2*funch(x[i])*h_d2(x[i]) + 200*h_d11(x[i])**2
# 2 * np.exp(f45_q * x[i]) * (np.exp(f45_q * x[i]) + np.log(np.exp(f45_q * x[i]) + 1)) / (
# np.exp(f45_q * x[i]) + 1) ** 2 + 200 * np.exp(-2 * f45_q * x[i]) * (
# np.exp(f45_q * x[i]) * np.log(np.exp(-f45_q * x[i]) + 1) + 1) / (np.exp(-f45_q * x[i]) + 1) ** 2
return hessian | 0.214527 | 0.399724 |
from typing import Tuple, Union
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit import rdBase
from rdkit.Chem.rdchem import Mol
functionality_smarts = {
"ols": "[C,c;!$(C=O)][OH]",
"aliphatic_ols": "[C;!$(C=O);!$([a])][OH]",
"acids": "[#6][#6](=[#8:4])([F,Cl,Br,I,#8H,O-])",
"prime_amines": "[#6;!$(C=O)][NH2;!$([NH2+])]",
"carbonates": "[O]=[C]([F,Cl,Br,I,O])([F,Cl,Br,I,O])",
"acidanhydrides": "[#8]([#6](=[#8]))([#6](=[#8]))",
"prime_thiols": "[#6;!$(C=O)][SH]",
}
def molecule_from_smiles(smiles: str) -> Union[Mol, None]:
"""Generate rdkit mol from smiles
Parameters
----------
smiles : str
SMILES string
Returns
-------
Union[Mol, None]
RDKit Molecule, or None if can't generate
"""
try:
mol = Chem.MolFromSmiles(smiles)
except:
mol = None
return mol
def get_functionality(reactants, distribution=[]):
"""gets the functional groups from a list of reactants
inputs: list of smiles
output: dataframe with count of functional groups
"""
def id_functionality(r):
mol = Chem.MolFromSmiles(r.name)
r.ols = len(
mol.GetSubstructMatches(Chem.MolFromSmarts(functionality_smarts["ols"]))
)
r.aliphatic_ols = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["aliphatic_ols"])
)
)
r.acids = len(
mol.GetSubstructMatches(Chem.MolFromSmarts(functionality_smarts["acids"]))
)
r.prime_amines = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["prime_amines"])
)
)
r.carbonates = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["carbonates"])
)
)
r.acidanhydrides = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["acidanhydrides"])
)
)
return r
df_func = pd.DataFrame(
data=0,
index=reactants,
columns=[
"ols",
"acids",
"prime_amines",
"carbonates",
"aliphatic_ols",
"acidanhydrides",
],
)
df_func = df_func.apply(lambda r: id_functionality(r), axis=1)
# appends distribution to dataframe
if len(distribution) == 0:
df_func["distribution"] = [1] * df_func.shape[0]
else:
df_func["distribution"] = list(distribution)
return df_func
def enumerate_ester_enantiomers(smiles: str) -> Tuple[Tuple[str]]:
"""Generate enantiomer pairs for a monomer that would participate in an esterification reaction
Parameters
----------
smiles : str
SMILES string of the monomer
Returns
-------
Tuple[Tuple[str]]
The enantiomer pairs, or a single string if no enantiomers are created
"""
try:
mol = Chem.MolFromSmiles(smiles)
# Get the atom ids for the acid and ol functionalities
acid = Chem.MolFromSmarts("[O]-[C]=[O]")
ol = Chem.MolFromSmarts("[C;X4]-[O]")
acid_atoms = mol.GetSubstructMatch(acid)
ol_atoms = mol.GetSubstructMatch(ol)
# Get the carbon atoms for both, doesn't matter for acid, but does for ol
atoms = mol.GetAtoms()
for atom_i in acid_atoms:
if atoms[atom_i].GetAtomicNum() == 8:
acid_O = atom_i
for atom_i in ol_atoms:
if atoms[atom_i].GetAtomicNum() == 8:
ol_O = atom_i
# Get shortest path (backbone) and make stereo based on that
# Does shortest path make sense?
atom_path = Chem.GetShortestPath(mol, acid_O, ol_O)
# Get sites that can be stereo
stereo_sites = [
sinfo.centeredOn for sinfo in Chem.rdmolops.FindPotentialStereo(mol)
]
# Find the enantiomers based on stereo sites within the shortest path
enantiomers = []
for atom_i in set(atom_path).intersection(stereo_sites):
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW)
smilesR = Chem.MolToSmiles(mol)
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW)
smilesS = Chem.MolToSmiles(mol)
enantiomers.append((smilesR, smilesS))
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_UNSPECIFIED)
except BaseException as e:
enantiomers = [[smiles]]
if not enantiomers:
enantiomers = [[smiles]]
return enantiomers
class Monomer:
def __init__(self, smiles: str):
self.smiles = smiles
self.canonical_smiles = Chem.CanonSmiles(smiles)
self.molecule = molecule_from_smiles(self.smiles)
self.molecular_weight = ExactMolWt(self.molecule)
df = pd.DataFrame([self.smiles], columns=["smiles"])
self.functionality = get_functionality(df.smiles)
def __repr__(self) -> str:
if self.is_valid:
return f"Valid Monomer with smiles {self.smiles}"
else:
return f"Invalid Monomer with smiles {self.smiles}"
@property
def esterification_enantiomers(self) -> Tuple[Tuple[str]]:
"""Get possible enantiomers that would participate in an esterification reaction
Returns
-------
Tuple[Tuple[str, str]]
Tuple containing diads of enantiomers
"""
return enumerate_ester_enantiomers(self.smiles) | m2p/monomers.py |
from typing import Tuple, Union
import pandas as pd
from rdkit import Chem
from rdkit.Chem.Descriptors import ExactMolWt
from rdkit import rdBase
from rdkit.Chem.rdchem import Mol
functionality_smarts = {
"ols": "[C,c;!$(C=O)][OH]",
"aliphatic_ols": "[C;!$(C=O);!$([a])][OH]",
"acids": "[#6][#6](=[#8:4])([F,Cl,Br,I,#8H,O-])",
"prime_amines": "[#6;!$(C=O)][NH2;!$([NH2+])]",
"carbonates": "[O]=[C]([F,Cl,Br,I,O])([F,Cl,Br,I,O])",
"acidanhydrides": "[#8]([#6](=[#8]))([#6](=[#8]))",
"prime_thiols": "[#6;!$(C=O)][SH]",
}
def molecule_from_smiles(smiles: str) -> Union[Mol, None]:
"""Generate rdkit mol from smiles
Parameters
----------
smiles : str
SMILES string
Returns
-------
Union[Mol, None]
RDKit Molecule, or None if can't generate
"""
try:
mol = Chem.MolFromSmiles(smiles)
except:
mol = None
return mol
def get_functionality(reactants, distribution=[]):
"""gets the functional groups from a list of reactants
inputs: list of smiles
output: dataframe with count of functional groups
"""
def id_functionality(r):
mol = Chem.MolFromSmiles(r.name)
r.ols = len(
mol.GetSubstructMatches(Chem.MolFromSmarts(functionality_smarts["ols"]))
)
r.aliphatic_ols = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["aliphatic_ols"])
)
)
r.acids = len(
mol.GetSubstructMatches(Chem.MolFromSmarts(functionality_smarts["acids"]))
)
r.prime_amines = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["prime_amines"])
)
)
r.carbonates = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["carbonates"])
)
)
r.acidanhydrides = len(
mol.GetSubstructMatches(
Chem.MolFromSmarts(functionality_smarts["acidanhydrides"])
)
)
return r
df_func = pd.DataFrame(
data=0,
index=reactants,
columns=[
"ols",
"acids",
"prime_amines",
"carbonates",
"aliphatic_ols",
"acidanhydrides",
],
)
df_func = df_func.apply(lambda r: id_functionality(r), axis=1)
# appends distribution to dataframe
if len(distribution) == 0:
df_func["distribution"] = [1] * df_func.shape[0]
else:
df_func["distribution"] = list(distribution)
return df_func
def enumerate_ester_enantiomers(smiles: str) -> Tuple[Tuple[str]]:
"""Generate enantiomer pairs for a monomer that would participate in an esterification reaction
Parameters
----------
smiles : str
SMILES string of the monomer
Returns
-------
Tuple[Tuple[str]]
The enantiomer pairs, or a single string if no enantiomers are created
"""
try:
mol = Chem.MolFromSmiles(smiles)
# Get the atom ids for the acid and ol functionalities
acid = Chem.MolFromSmarts("[O]-[C]=[O]")
ol = Chem.MolFromSmarts("[C;X4]-[O]")
acid_atoms = mol.GetSubstructMatch(acid)
ol_atoms = mol.GetSubstructMatch(ol)
# Get the carbon atoms for both, doesn't matter for acid, but does for ol
atoms = mol.GetAtoms()
for atom_i in acid_atoms:
if atoms[atom_i].GetAtomicNum() == 8:
acid_O = atom_i
for atom_i in ol_atoms:
if atoms[atom_i].GetAtomicNum() == 8:
ol_O = atom_i
# Get shortest path (backbone) and make stereo based on that
# Does shortest path make sense?
atom_path = Chem.GetShortestPath(mol, acid_O, ol_O)
# Get sites that can be stereo
stereo_sites = [
sinfo.centeredOn for sinfo in Chem.rdmolops.FindPotentialStereo(mol)
]
# Find the enantiomers based on stereo sites within the shortest path
enantiomers = []
for atom_i in set(atom_path).intersection(stereo_sites):
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CW)
smilesR = Chem.MolToSmiles(mol)
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_TETRAHEDRAL_CCW)
smilesS = Chem.MolToSmiles(mol)
enantiomers.append((smilesR, smilesS))
atoms[atom_i].SetChiralTag(Chem.rdchem.ChiralType.CHI_UNSPECIFIED)
except BaseException as e:
enantiomers = [[smiles]]
if not enantiomers:
enantiomers = [[smiles]]
return enantiomers
class Monomer:
def __init__(self, smiles: str):
self.smiles = smiles
self.canonical_smiles = Chem.CanonSmiles(smiles)
self.molecule = molecule_from_smiles(self.smiles)
self.molecular_weight = ExactMolWt(self.molecule)
df = pd.DataFrame([self.smiles], columns=["smiles"])
self.functionality = get_functionality(df.smiles)
def __repr__(self) -> str:
if self.is_valid:
return f"Valid Monomer with smiles {self.smiles}"
else:
return f"Invalid Monomer with smiles {self.smiles}"
@property
def esterification_enantiomers(self) -> Tuple[Tuple[str]]:
"""Get possible enantiomers that would participate in an esterification reaction
Returns
-------
Tuple[Tuple[str, str]]
Tuple containing diads of enantiomers
"""
return enumerate_ester_enantiomers(self.smiles) | 0.799442 | 0.358943 |
import sys
import codecs
import re
pat = "\[.*?(\d)\]"
reg = re.compile(pat)
JSON = {}
def removeStems(s):
s = s.replace(u'。', '') # Dirty
idx = s.find("(")
if idx!= -1:
s = s[:idx]
return s.strip()
def getStem(s):
stem_r = re.search(ur'\(.+\)', s)
if stem_r:
return s[stem_r.start() + 1:stem_r.end() - 1]
else:
return None
def affixation(s):
import re
from amis_stemmer import gnostic
s = s.replace(u'。', '').strip()
w1 = re.split(r"([\w:'^]+)", s.strip())
w2 = map(gnostic, w1)
return ''.join(w2)
# 加入萌典前端使用的標記
# \ufff9: 阿美語例句
# \ufffa: 英文例句
# \ufffb: 漢語例句
def addsplt(s):
return u'\ufff9'+s[0]+u'\ufffa'+s[1]+u'\ufffb'+s[2]
def mkword(title, definitions, tag, stem):
global JSON
word = {'title': title,
'heteronyms': [{'definitions': definitions}]}
if tag:
word['tag'] = tag
if stem:
word['stem'] = stem
if title in JSON:
print "Add heteronym: " + title
JSON[title]['heteronyms'].append({'definitions': definitions})
else:
JSON[title] = word
def mkdef(defi, examples, link):
defdic = {}
if len(examples) > 0:
defdic['example'] = examples
examples = []
defdic['def'] = defi
if link:
defdic['synonyms'] = map(affixation, link)
return defdic
def readdict(fn):
fp = codecs.open(fn, mode='r', encoding='utf8')
title = None # 詞
tag = None # 疊文
stem = None # 字根
state = None
num_words = 0
for line in fp:
l = line.replace(u'① ', '') \
.replace(u'② ', '') \
.replace(u'③ ', '') \
.replace(u'④ ', '') \
.replace(u'⑤ ', '') \
.replace(u'⑥ ', '') \
.replace(u'⑦ ', '') \
.replace(u'⑧ ', '') \
.replace(u'⑨ ', '')
l = l.strip()
if l == '' and title: # 寫入詞條
num_words += 1
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
title = None
state = None
tag = None
stem = None
definitions = []
examples = []
link = []
defi = ""
continue
if l == '': # 空白行
continue
if l[0] == '#': # 註解
continue
if state is None: # 詞
stem = getStem(l)
title = removeStems(l)
definitions = []
examples = []
link = []
defi = ""
state = 'd'
continue
if l[0:2] == '=>': # 相關詞
state = 'l'
if line[0:4] == ' ': # 例句
state = 'e' + state
if state == 'd': # 漢語定義
tag_r = re.search(ur'(\[([^]]+詞|[^]]+語|疊[^]]*|[^]]+綴)\])', l) # [疊2] [日語借詞] 這類
if tag_r:
tag = l[tag_r.start():tag_r.end()]
l = l.replace(tag, '').replace(u'。。', u'。')
if defi!="": # 有上一個def
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
examples = []
link = []
defi = l;
state = 'd'
continue
if state == 'ed': # 阿美語例句
ex = [affixation(l), '', '']
state = 'a'
continue
if state == 'ea': # 漢文例句
ex[2] = l
examples.append(addsplt(ex))
state = 'd'
continue
if state == 'l': # 相關詞
link.append(l[2:])
state = 'd'
if title:
num_words += 1
defdic = mkdef(defi, examples, link )
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
fp.close()
print 'Total %d words in %s' % (num_words, fn)
if __name__ == '__main__':
import glob
import json
import re
import codecs
for fn in glob.iglob('*.txt'):
print fn
readdict(fn)
f = codecs.open('dict-amis.json', mode='w', encoding='utf8')
f.write(json.dumps(JSON.values(), indent=2, separators=(',', ':'), ensure_ascii = False, encoding="utf8"))
f.close() | txt/moedict.py |
import sys
import codecs
import re
pat = "\[.*?(\d)\]"
reg = re.compile(pat)
JSON = {}
def removeStems(s):
s = s.replace(u'。', '') # Dirty
idx = s.find("(")
if idx!= -1:
s = s[:idx]
return s.strip()
def getStem(s):
stem_r = re.search(ur'\(.+\)', s)
if stem_r:
return s[stem_r.start() + 1:stem_r.end() - 1]
else:
return None
def affixation(s):
import re
from amis_stemmer import gnostic
s = s.replace(u'。', '').strip()
w1 = re.split(r"([\w:'^]+)", s.strip())
w2 = map(gnostic, w1)
return ''.join(w2)
# 加入萌典前端使用的標記
# \ufff9: 阿美語例句
# \ufffa: 英文例句
# \ufffb: 漢語例句
def addsplt(s):
return u'\ufff9'+s[0]+u'\ufffa'+s[1]+u'\ufffb'+s[2]
def mkword(title, definitions, tag, stem):
global JSON
word = {'title': title,
'heteronyms': [{'definitions': definitions}]}
if tag:
word['tag'] = tag
if stem:
word['stem'] = stem
if title in JSON:
print "Add heteronym: " + title
JSON[title]['heteronyms'].append({'definitions': definitions})
else:
JSON[title] = word
def mkdef(defi, examples, link):
defdic = {}
if len(examples) > 0:
defdic['example'] = examples
examples = []
defdic['def'] = defi
if link:
defdic['synonyms'] = map(affixation, link)
return defdic
def readdict(fn):
fp = codecs.open(fn, mode='r', encoding='utf8')
title = None # 詞
tag = None # 疊文
stem = None # 字根
state = None
num_words = 0
for line in fp:
l = line.replace(u'① ', '') \
.replace(u'② ', '') \
.replace(u'③ ', '') \
.replace(u'④ ', '') \
.replace(u'⑤ ', '') \
.replace(u'⑥ ', '') \
.replace(u'⑦ ', '') \
.replace(u'⑧ ', '') \
.replace(u'⑨ ', '')
l = l.strip()
if l == '' and title: # 寫入詞條
num_words += 1
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
title = None
state = None
tag = None
stem = None
definitions = []
examples = []
link = []
defi = ""
continue
if l == '': # 空白行
continue
if l[0] == '#': # 註解
continue
if state is None: # 詞
stem = getStem(l)
title = removeStems(l)
definitions = []
examples = []
link = []
defi = ""
state = 'd'
continue
if l[0:2] == '=>': # 相關詞
state = 'l'
if line[0:4] == ' ': # 例句
state = 'e' + state
if state == 'd': # 漢語定義
tag_r = re.search(ur'(\[([^]]+詞|[^]]+語|疊[^]]*|[^]]+綴)\])', l) # [疊2] [日語借詞] 這類
if tag_r:
tag = l[tag_r.start():tag_r.end()]
l = l.replace(tag, '').replace(u'。。', u'。')
if defi!="": # 有上一個def
defdic = mkdef(defi, examples, link)
if len(defdic) > 0:
definitions.append(defdic)
examples = []
link = []
defi = l;
state = 'd'
continue
if state == 'ed': # 阿美語例句
ex = [affixation(l), '', '']
state = 'a'
continue
if state == 'ea': # 漢文例句
ex[2] = l
examples.append(addsplt(ex))
state = 'd'
continue
if state == 'l': # 相關詞
link.append(l[2:])
state = 'd'
if title:
num_words += 1
defdic = mkdef(defi, examples, link )
if len(defdic) > 0:
definitions.append(defdic)
mkword(title, definitions, tag, stem)
fp.close()
print 'Total %d words in %s' % (num_words, fn)
if __name__ == '__main__':
import glob
import json
import re
import codecs
for fn in glob.iglob('*.txt'):
print fn
readdict(fn)
f = codecs.open('dict-amis.json', mode='w', encoding='utf8')
f.write(json.dumps(JSON.values(), indent=2, separators=(',', ':'), ensure_ascii = False, encoding="utf8"))
f.close() | 0.078722 | 0.180702 |
import math
import os
import random
from string import ascii_lowercase
import psutil
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from src.data.transforms import Crop, StatefulRandomHorizontalFlip
class LRS2Dataset(Dataset):
def __init__(self, path, mode, in_channels=1, max_timesteps=100, max_text_len=200, pretrain_words=0, pretrain=False, augmentations=False):
assert mode in ['train', 'val', 'test']
self.max_timesteps = max_timesteps
self.pretrain = pretrain
self.in_channels = in_channels
self.max_timesteps = max_timesteps
self.augmentations = augmentations if mode in ['train', 'pretrain'] else False
self.skip_long_samples = True
self.max_text_len = max_text_len
self.pretrain_words = pretrain_words
self.file_paths, self.file_names, self.crops = self.build_file_list(path, mode)
self.dictionary = self.build_dictionary(path)
self.char_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '<sos>', '<eos>', '<pad>', '\'', ' ']
self.int2char = dict(enumerate(self.char_list))
self.char2int = {char: index for index, char in self.int2char.items()}
def build_dictionary(self, directory):
dictionary = set()
file = open(f"{directory}/train.txt", "r")
for file in file.readlines():
file = file.split(" ")[0].strip()
path = f"{directory}/mvlrs_v1/main/{file}.txt"
content = open(path, "r").read()
sentence = content.splitlines()[0][7:]
words = sentence.split(" ")
dictionary.update(words)
return list(dictionary)
def build_file_list(self, directory, mode):
file_list, paths = [], []
crops = {}
skipped_samples = 0
if self.pretrain:
path = f"data/preprocess/lrs2/pretrain_crop.txt"
else:
path = f"data/preprocess/lrs2/{mode}_crop.txt"
file = open(path, "r")
content = file.read()
for i, line in enumerate(content.splitlines()):
split = line.split(":")
file = split[0]
crop_str = split[1]
crops[file] = crop_str
if self.pretrain:
file = open(f"{directory}/pretrain.txt", "r")
content = file.read()
for file in content.splitlines():
if file in crops:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/pretrain/{file}")
else:
file = open(f"{directory}/{mode}.txt", "r")
content = file.read()
for file in content.splitlines():
file = file.split(" ")[0]
if file not in crops:
continue
if self.skip_long_samples:
if crops[file].count("|") < self.max_timesteps:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/main/{file}")
else:
skipped_samples += 1
else:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/main/{file}")
if self.skip_long_samples:
print(f"Skipped {skipped_samples} too long samples")
return paths, file_list, crops
def build_tensor(self, frames, crops):
crops = [crop.split(";") for crop in crops]
for i, crop_frame in enumerate(crops):
crop = [float(crop) for crop in crop_frame]
crops[i] = crop
if(self.augmentations):
augmentations = transforms.Compose([
StatefulRandomHorizontalFlip(0.5),
])
else:
augmentations = transforms.Compose([])
temporalVolume = torch.zeros(self.max_timesteps, self.in_channels, 64, 96)
for i, frame in enumerate(frames):
if self.in_channels == 1:
transform = transforms.Compose([
transforms.ToPILImage(),
Crop(crops[i]),
augmentations,
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([0.4161, ], [0.1688, ]),
])
elif self.in_channels == 3:
transform = transforms.Compose([
transforms.ToPILImage(),
Crop(crops[i]),
augmentations,
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
temporalVolume[i] = transform(frame)
temporalVolume = temporalVolume.transpose(1, 0) # (C, D, H, W)
return temporalVolume
def __len__(self):
return len(self.file_paths)
def get_pretrain_words(self, content):
assert self.pretrain_words > 0
lines = content.splitlines()[4:]
words = []
for line in lines:
word, start, stop, _ = line.split(" ")
start, stop = float(start), float(stop)
words.append([word, start, stop])
num_words = min(random.randint(max(self.pretrain_words - 1, 1), self.pretrain_words), len(words))
word_start = random.randint(0, len(words) - num_words)
word_end = word_start + num_words
sample_start = 0
sample_end = 0
content = ""
for word in words[word_start:word_end]:
word, start, end = word
if sample_start == 0:
sample_start = start
if end > sample_end:
sample_end = end
content = content + " " + word
return content, sample_start, sample_end
def __getitem__(self, idx):
file = self.file_names[idx]
file_path = self.file_paths[idx]
content = open(file_path + ".txt", "r").read()
frame_crops = self.crops[file].split("|")
start_sec = 0
stop_sec = None
if self.pretrain:
content, start_sec, stop_sec = self.get_pretrain_words(content)
else:
content = content.splitlines()[0][7:]
crop = frame_crops
video, _, info = torchvision.io.read_video(file_path + ".mp4", start_pts=start_sec, end_pts=stop_sec, pts_unit='sec') # T, H, W, C
video = video.permute(0, 3, 1, 2) # T C H W
num_frames = video.size(0)
if num_frames > self.max_timesteps:
print(f"Cutting frames off. Requires {len(video)} frames: {file}")
video = video[:self.max_timesteps]
num_frames = video.size(0)
if self.pretrain:
fps = info['video_fps']
start_frame = int(start_sec * fps)
crop = frame_crops[start_frame:start_frame + num_frames]
crop = crop[:self.max_timesteps]
assert num_frames <= self.max_timesteps, f"Video too large with {num_frames} frames: {file_path}"
content = content.strip().upper()
assert len(crop) == num_frames
assert len(content) >= 1
frames = self.build_tensor(video, crop)
encoded = self.encode(content)
return frames, num_frames, encoded
def encode(self, content):
encoded = [self.char2int[c] for c in content] + [self.char2int['<eos>']]
if len(encoded) > self.max_text_len:
print(f"Max output length too short. Required {len(encoded)}")
encoded = encoded[:self.max_text_len]
encoded += [self.char2int['<pad>'] for _ in range(self.max_text_len - len(encoded))]
return torch.Tensor(encoded) | src/data/lrs2.py | import math
import os
import random
from string import ascii_lowercase
import psutil
import torch
import torchvision
import torchvision.transforms.functional as F
from PIL import Image
from torch.utils.data import DataLoader, Dataset
from torchvision import transforms
from src.data.transforms import Crop, StatefulRandomHorizontalFlip
class LRS2Dataset(Dataset):
def __init__(self, path, mode, in_channels=1, max_timesteps=100, max_text_len=200, pretrain_words=0, pretrain=False, augmentations=False):
assert mode in ['train', 'val', 'test']
self.max_timesteps = max_timesteps
self.pretrain = pretrain
self.in_channels = in_channels
self.max_timesteps = max_timesteps
self.augmentations = augmentations if mode in ['train', 'pretrain'] else False
self.skip_long_samples = True
self.max_text_len = max_text_len
self.pretrain_words = pretrain_words
self.file_paths, self.file_names, self.crops = self.build_file_list(path, mode)
self.dictionary = self.build_dictionary(path)
self.char_list = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T',
'U', 'V', 'W', 'X', 'Y', 'Z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', '<sos>', '<eos>', '<pad>', '\'', ' ']
self.int2char = dict(enumerate(self.char_list))
self.char2int = {char: index for index, char in self.int2char.items()}
def build_dictionary(self, directory):
dictionary = set()
file = open(f"{directory}/train.txt", "r")
for file in file.readlines():
file = file.split(" ")[0].strip()
path = f"{directory}/mvlrs_v1/main/{file}.txt"
content = open(path, "r").read()
sentence = content.splitlines()[0][7:]
words = sentence.split(" ")
dictionary.update(words)
return list(dictionary)
def build_file_list(self, directory, mode):
file_list, paths = [], []
crops = {}
skipped_samples = 0
if self.pretrain:
path = f"data/preprocess/lrs2/pretrain_crop.txt"
else:
path = f"data/preprocess/lrs2/{mode}_crop.txt"
file = open(path, "r")
content = file.read()
for i, line in enumerate(content.splitlines()):
split = line.split(":")
file = split[0]
crop_str = split[1]
crops[file] = crop_str
if self.pretrain:
file = open(f"{directory}/pretrain.txt", "r")
content = file.read()
for file in content.splitlines():
if file in crops:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/pretrain/{file}")
else:
file = open(f"{directory}/{mode}.txt", "r")
content = file.read()
for file in content.splitlines():
file = file.split(" ")[0]
if file not in crops:
continue
if self.skip_long_samples:
if crops[file].count("|") < self.max_timesteps:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/main/{file}")
else:
skipped_samples += 1
else:
file_list.append(file)
paths.append(f"{directory}/mvlrs_v1/main/{file}")
if self.skip_long_samples:
print(f"Skipped {skipped_samples} too long samples")
return paths, file_list, crops
def build_tensor(self, frames, crops):
crops = [crop.split(";") for crop in crops]
for i, crop_frame in enumerate(crops):
crop = [float(crop) for crop in crop_frame]
crops[i] = crop
if(self.augmentations):
augmentations = transforms.Compose([
StatefulRandomHorizontalFlip(0.5),
])
else:
augmentations = transforms.Compose([])
temporalVolume = torch.zeros(self.max_timesteps, self.in_channels, 64, 96)
for i, frame in enumerate(frames):
if self.in_channels == 1:
transform = transforms.Compose([
transforms.ToPILImage(),
Crop(crops[i]),
augmentations,
transforms.Grayscale(num_output_channels=1),
transforms.ToTensor(),
transforms.Normalize([0.4161, ], [0.1688, ]),
])
elif self.in_channels == 3:
transform = transforms.Compose([
transforms.ToPILImage(),
Crop(crops[i]),
augmentations,
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
])
temporalVolume[i] = transform(frame)
temporalVolume = temporalVolume.transpose(1, 0) # (C, D, H, W)
return temporalVolume
def __len__(self):
return len(self.file_paths)
def get_pretrain_words(self, content):
assert self.pretrain_words > 0
lines = content.splitlines()[4:]
words = []
for line in lines:
word, start, stop, _ = line.split(" ")
start, stop = float(start), float(stop)
words.append([word, start, stop])
num_words = min(random.randint(max(self.pretrain_words - 1, 1), self.pretrain_words), len(words))
word_start = random.randint(0, len(words) - num_words)
word_end = word_start + num_words
sample_start = 0
sample_end = 0
content = ""
for word in words[word_start:word_end]:
word, start, end = word
if sample_start == 0:
sample_start = start
if end > sample_end:
sample_end = end
content = content + " " + word
return content, sample_start, sample_end
def __getitem__(self, idx):
file = self.file_names[idx]
file_path = self.file_paths[idx]
content = open(file_path + ".txt", "r").read()
frame_crops = self.crops[file].split("|")
start_sec = 0
stop_sec = None
if self.pretrain:
content, start_sec, stop_sec = self.get_pretrain_words(content)
else:
content = content.splitlines()[0][7:]
crop = frame_crops
video, _, info = torchvision.io.read_video(file_path + ".mp4", start_pts=start_sec, end_pts=stop_sec, pts_unit='sec') # T, H, W, C
video = video.permute(0, 3, 1, 2) # T C H W
num_frames = video.size(0)
if num_frames > self.max_timesteps:
print(f"Cutting frames off. Requires {len(video)} frames: {file}")
video = video[:self.max_timesteps]
num_frames = video.size(0)
if self.pretrain:
fps = info['video_fps']
start_frame = int(start_sec * fps)
crop = frame_crops[start_frame:start_frame + num_frames]
crop = crop[:self.max_timesteps]
assert num_frames <= self.max_timesteps, f"Video too large with {num_frames} frames: {file_path}"
content = content.strip().upper()
assert len(crop) == num_frames
assert len(content) >= 1
frames = self.build_tensor(video, crop)
encoded = self.encode(content)
return frames, num_frames, encoded
def encode(self, content):
encoded = [self.char2int[c] for c in content] + [self.char2int['<eos>']]
if len(encoded) > self.max_text_len:
print(f"Max output length too short. Required {len(encoded)}")
encoded = encoded[:self.max_text_len]
encoded += [self.char2int['<pad>'] for _ in range(self.max_text_len - len(encoded))]
return torch.Tensor(encoded) | 0.696991 | 0.300157 |
import xdl
import unittest
import numpy as np
import sys
from xdl.python.lib.datatype import *
from xdl.python.lib.graph import execute
try:
from xdl.python.backend.mxnet.mxnet_backend import *
except ImportError:
sys.exit(0)
def main():
dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
gear = xdl.mock_dense_op(shape=[1, 1], value=0.01, name_="gear")
labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
gear.set_shape([1, 1])
dense.set_shape([1, 16])
labels.set_shape([1, 1])
with xdl.model_scope("ams_main"):
loss = ams_main(main_model)(dense, labels, gear_inputs=[gear])
sess = xdl.TrainSession()
return sess.run([xdl.get_collection("gear_grad")])
def gear():
forward = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="forward")
backward = xdl.mock_dense_op(shape=[1, 16], value=0.02, name_="backward")
labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label1")
init_grad = xdl.mock_dense_op(shape=[1, 1], value=0.3, name_="init_grad")
forward.set_shape([1, 16])
backward.set_shape([1, 16])
labels.set_shape([1, 1])
init_grad.set_shape([1, 1])
predict = ams_gear([forward], [backward], init_grad)(gear_model)(None)
with xdl.model_scope("ams_gear_forward"):
sess = xdl.TrainSession()
prediction = sess.run(predict)
with xdl.model_scope("ams_gear_backward"):
grads = xdl.get_gradient("fc_weight")
sess = xdl.TrainSession()
fc_weight_grad = sess.run(grads)
return prediction, fc_weight_grad
def main_model(dense, label, gear_inputs):
weight = mx.sym.var(name='fc_weight', init=mx.init.Constant(0.1))
fc = mx.symbol.FullyConnected(data=dense, num_hidden=1, weight=weight, name="fc")
logits = fc + gear_inputs[0]
return mx.sym.MakeLoss(logits)
def gear_model(inputs):
weight = mx.sym.var(name='fc_weight', init=mx.init.Constant(0.1))
fc = mx.sym.FullyConnected(data=inputs, num_hidden=1, name='fc', weight=weight)
return fc
class MxnetBackendTest(unittest.TestCase):
def test_ams_main(self):
gear_gradient = main()
self.assertTrue(gear_gradient[0]==np.array([[1.0]], dtype=np.float32))
def test_ams_gear(self):
prediction, grad = gear()
self.assertTrue((prediction==np.array([[0.016]], dtype=np.float32)).all())
self.assertTrue((grad==np.repeat(np.array([0.006], dtype=np.float32), 16).reshape(1,16)).all())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MxnetBackendTest)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite()) | xdl/test/python/unit_test/backend/mxnet_backend_test.py |
import xdl
import unittest
import numpy as np
import sys
from xdl.python.lib.datatype import *
from xdl.python.lib.graph import execute
try:
from xdl.python.backend.mxnet.mxnet_backend import *
except ImportError:
sys.exit(0)
def main():
dense = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="dense")
gear = xdl.mock_dense_op(shape=[1, 1], value=0.01, name_="gear")
labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label")
gear.set_shape([1, 1])
dense.set_shape([1, 16])
labels.set_shape([1, 1])
with xdl.model_scope("ams_main"):
loss = ams_main(main_model)(dense, labels, gear_inputs=[gear])
sess = xdl.TrainSession()
return sess.run([xdl.get_collection("gear_grad")])
def gear():
forward = xdl.mock_dense_op(shape=[1, 16], value=0.01, name_="forward")
backward = xdl.mock_dense_op(shape=[1, 16], value=0.02, name_="backward")
labels = xdl.mock_dense_op(shape=[1, 1], value=1.0, name_="label1")
init_grad = xdl.mock_dense_op(shape=[1, 1], value=0.3, name_="init_grad")
forward.set_shape([1, 16])
backward.set_shape([1, 16])
labels.set_shape([1, 1])
init_grad.set_shape([1, 1])
predict = ams_gear([forward], [backward], init_grad)(gear_model)(None)
with xdl.model_scope("ams_gear_forward"):
sess = xdl.TrainSession()
prediction = sess.run(predict)
with xdl.model_scope("ams_gear_backward"):
grads = xdl.get_gradient("fc_weight")
sess = xdl.TrainSession()
fc_weight_grad = sess.run(grads)
return prediction, fc_weight_grad
def main_model(dense, label, gear_inputs):
weight = mx.sym.var(name='fc_weight', init=mx.init.Constant(0.1))
fc = mx.symbol.FullyConnected(data=dense, num_hidden=1, weight=weight, name="fc")
logits = fc + gear_inputs[0]
return mx.sym.MakeLoss(logits)
def gear_model(inputs):
weight = mx.sym.var(name='fc_weight', init=mx.init.Constant(0.1))
fc = mx.sym.FullyConnected(data=inputs, num_hidden=1, name='fc', weight=weight)
return fc
class MxnetBackendTest(unittest.TestCase):
def test_ams_main(self):
gear_gradient = main()
self.assertTrue(gear_gradient[0]==np.array([[1.0]], dtype=np.float32))
def test_ams_gear(self):
prediction, grad = gear()
self.assertTrue((prediction==np.array([[0.016]], dtype=np.float32)).all())
self.assertTrue((grad==np.repeat(np.array([0.006], dtype=np.float32), 16).reshape(1,16)).all())
def suite():
return unittest.TestLoader().loadTestsFromTestCase(MxnetBackendTest)
if __name__ == '__main__':
unittest.TextTestRunner().run(suite()) | 0.392453 | 0.407776 |
import numpy as np
from utils.unit_conversions import db_to_lin, lin_to_db
from utils import constants
import atm
def get_thermal_noise(bandwidth_hz, noise_figure_db=0, temp_ext_k=0):
"""
N = thermal_noise(bw,nf,t_ext)
Compute the total noise power, given the receiver's noise bandwidth, noise figure, and external noise temperature.
Ported from MATLAB Code
<NAME>
15 March 2021
:param bandwidth_hz: Receiver noise bandwidth [Hz]
:param noise_figure_db: Receiver noise figure [dB] (DEFAULT = 0 dB)
:param temp_ext_k: External noise temp [K] (DEFAULT = 0 K)
:return: Thermal noise power [dBW]
"""
# Add the external noise temp to the reference temp (270 K)
temp = constants.ref_temp + temp_ext_k
# Boltzmann's Constant
k = constants.boltzmann
# Equation (D.6)
return lin_to_db(k * temp * bandwidth_hz) + noise_figure_db
def get_atmospheric_noise_temp(freq_hz, alt_start_m=0, el_angle_deg=90):
"""
Computes the noise temperature contribution from the reradaition of
energy absorbed by the atmosphere in the direction of the antenna's
mainlobe.
Ported from MATLAB code.
<NAME>
15 March 2021
:param freq_hz: Frequency [Hz]
:param alt_start_m: Altitude of receiver [m]
:param el_angle_deg: Elevation angle of receive mainbeam [degrees above local ground plane]
:return: Atmospheric noise temperature [K]
"""
# Assume integrated antenna gain is unity
alpha_a = 1
# Compute zenith loss along main propagation path
zenith_angle_rad = (90-el_angle_deg)*np.pi/180
loss_db = atm.calc_zenith_loss(freq_hz,alt_start_m,zenith_angle_rad)
loss_lin = db_to_lin(loss_db)
# Compute average atmospheric temp
alt_bands = np.arange(start=alt_start_m, stop=100.0e3+100, step=100)
atmosphere = atm.make_standard_atmosphere(alt_bands)
t_atmos = np.mean(atmosphere.temp)
# t_atmos = utils.constants.T0;
# Equation D.12
return alpha_a * t_atmos * (1-1/loss_lin)
def get_sun_noise_temp(freq_hz):
"""
Returns the noise temp (in Kelvin) for the sun at the specified
frequency f (in Hertz). f can be a scalar, or N-dimensional matrix.
Assumes a quiet sun, and represents a rough approximation from ITU
documentation on radio noise. Sun noise can be several orders of
magnitude larger during solar disturbances.
Ref: Rec. ITU-R P.372-14
Ported from MATLAB Code
<NAME>
15 March 2021
:param freq_hz: Carrier frequency [Hz]
:return: Sun noise temp [K]
"""
# Based on a visual reading on Figure 12 and the corresponding text
f_ghz = np.hstack((np.array([.05, .2]), np.arange(start=1, stop=10, step=1),
np.arange(start=10, step=10, stop=110)))
t_ref = np.asarray([1e6, 1e6, 2e5, 9e4, 4.5e4, 2.9e4, 2e4, 1.6e4, 1.4e4, 1.3e4, 1.2e4, 1e4, 7e3, 6.3e3,
6.2e3, 6e3, 6e3, 6e3, 6e3, 6e3, 6e3])
# Perform linear interpolation
return np.interp(xp=f_ghz, yp=t_ref, x=freq_hz/1e9, left=0, right=0)
def get_moon_noise_temp():
"""
Returns the noise temp (in Kelvin) for the moon.
The moon noise temp is fairly constant across spectrum, with ~140 K during new moon phase and ~280 K during at full
moon. Using the arithmatic mean here as an approximate value.
Ported from MATLAB Code
Ref: Rec. ITU-R P.372-8
<NAME>
15 March 2021
:return: Moon noise temp [K]
"""
return (140 + 280)/2
def get_cosmic_noise_temp(freq_hz, rx_alt_m=0, alpha_c=0.95, gain_sun_dbi=-np.inf, gain_moon_dbi=-np.inf):
"""
Computes the combined cosmic noise temperature, including contributions from the sun, the moon, and the galactic
background. Includes approximate effect of atmospheric loss (sun and moon are treated as as coming from zenith;
rather than their true angles.
Ported from MATLAB Code
<NAME>
15 March 2021
:param freq_hz: Carrier frequency [Hz]
:param rx_alt_m: Receiver altitude [m]
:param alpha_c: Fraction of the antenna's receive pattern that is above the horizon [0-1]
:param gain_sun_dbi: Antenna gain directed at the sun [dBi]
:param gain_moon_dbi: Antenna gain directed at the moon [dBi]
:return: Combined cosmic noise temperature [K]
"""
# Compute Raw Noise Temp
temp_100_mhz = 3050 # Geometric mean of 100 MHz noise spectrum samples
temp_cosmic = temp_100_mhz * (100e6 / freq_hz) ** 2.5 + 2.7
temp_sun = get_sun_noise_temp(freq_hz)
temp_moon = get_moon_noise_temp()
# Above 2 GHz, the only contribution is from cosmic background radiation
# (2.7 K), which is essentially negligible.
high_freq_mask = freq_hz >= 2e9
np.place(temp_cosmic, high_freq_mask, 2.7)
# Apply Antenna Patterns
gain_sun_lin = db_to_lin(gain_sun_dbi)
gain_moon_lin = db_to_lin(gain_moon_dbi)
init_temp = (temp_cosmic * alpha_c) + (temp_sun * 4.75e-6 * gain_sun_lin) + (temp_moon * 4.75e-6 * gain_moon_lin)
# Compute Atmospheric Losses for Zenith Path at pi/4 (45 deg from zenith)
zenith_loss_db = np.reshape(atm.calc_zenith_loss(freq_hz, rx_alt_m, np.pi / 4), np.shape(freq_hz))
zenith_loss_lin = db_to_lin(zenith_loss_db)
# Apply Atmospheric Loss to combined galactic noise temp
return init_temp / zenith_loss_lin
def get_ground_noise_temp(ant_gain_ground_dbi=-5, ground_emissivity=1, angular_area=np.pi):
"""
Compute the combined noise temperature from ground effects; predominantly caused by reradiation of thermal energy
from the sun.
Ported from MATLAB Code
<NAME>
15 March 2021
:param ant_gain_ground_dbi: Average antenna gain in direction of the ground [dBi] (DEFAULT = -5 dBi)
:param ground_emissivity: Emissivity of ground (Default = 1)
:param angular_area: Area (in steradians) of ground as visible from antenna (DEFAULT = pi)
:return: Ground noise temperature [K]
"""
# Convert average ground antenna gain to linear units
gain_lin = db_to_lin(ant_gain_ground_dbi)
# Assume ground temp is 290 K (ref temp)
thermal_temp_ground = constants.ref_temp
# Compute ground noise temp according to (D.13)
return angular_area * gain_lin * ground_emissivity * thermal_temp_ground / (4*np.pi) | noise/model.py | import numpy as np
from utils.unit_conversions import db_to_lin, lin_to_db
from utils import constants
import atm
def get_thermal_noise(bandwidth_hz, noise_figure_db=0, temp_ext_k=0):
"""
N = thermal_noise(bw,nf,t_ext)
Compute the total noise power, given the receiver's noise bandwidth, noise figure, and external noise temperature.
Ported from MATLAB Code
<NAME>
15 March 2021
:param bandwidth_hz: Receiver noise bandwidth [Hz]
:param noise_figure_db: Receiver noise figure [dB] (DEFAULT = 0 dB)
:param temp_ext_k: External noise temp [K] (DEFAULT = 0 K)
:return: Thermal noise power [dBW]
"""
# Add the external noise temp to the reference temp (270 K)
temp = constants.ref_temp + temp_ext_k
# Boltzmann's Constant
k = constants.boltzmann
# Equation (D.6)
return lin_to_db(k * temp * bandwidth_hz) + noise_figure_db
def get_atmospheric_noise_temp(freq_hz, alt_start_m=0, el_angle_deg=90):
"""
Computes the noise temperature contribution from the reradaition of
energy absorbed by the atmosphere in the direction of the antenna's
mainlobe.
Ported from MATLAB code.
<NAME>
15 March 2021
:param freq_hz: Frequency [Hz]
:param alt_start_m: Altitude of receiver [m]
:param el_angle_deg: Elevation angle of receive mainbeam [degrees above local ground plane]
:return: Atmospheric noise temperature [K]
"""
# Assume integrated antenna gain is unity
alpha_a = 1
# Compute zenith loss along main propagation path
zenith_angle_rad = (90-el_angle_deg)*np.pi/180
loss_db = atm.calc_zenith_loss(freq_hz,alt_start_m,zenith_angle_rad)
loss_lin = db_to_lin(loss_db)
# Compute average atmospheric temp
alt_bands = np.arange(start=alt_start_m, stop=100.0e3+100, step=100)
atmosphere = atm.make_standard_atmosphere(alt_bands)
t_atmos = np.mean(atmosphere.temp)
# t_atmos = utils.constants.T0;
# Equation D.12
return alpha_a * t_atmos * (1-1/loss_lin)
def get_sun_noise_temp(freq_hz):
"""
Returns the noise temp (in Kelvin) for the sun at the specified
frequency f (in Hertz). f can be a scalar, or N-dimensional matrix.
Assumes a quiet sun, and represents a rough approximation from ITU
documentation on radio noise. Sun noise can be several orders of
magnitude larger during solar disturbances.
Ref: Rec. ITU-R P.372-14
Ported from MATLAB Code
<NAME>
15 March 2021
:param freq_hz: Carrier frequency [Hz]
:return: Sun noise temp [K]
"""
# Based on a visual reading on Figure 12 and the corresponding text
f_ghz = np.hstack((np.array([.05, .2]), np.arange(start=1, stop=10, step=1),
np.arange(start=10, step=10, stop=110)))
t_ref = np.asarray([1e6, 1e6, 2e5, 9e4, 4.5e4, 2.9e4, 2e4, 1.6e4, 1.4e4, 1.3e4, 1.2e4, 1e4, 7e3, 6.3e3,
6.2e3, 6e3, 6e3, 6e3, 6e3, 6e3, 6e3])
# Perform linear interpolation
return np.interp(xp=f_ghz, yp=t_ref, x=freq_hz/1e9, left=0, right=0)
def get_moon_noise_temp():
"""
Returns the noise temp (in Kelvin) for the moon.
The moon noise temp is fairly constant across spectrum, with ~140 K during new moon phase and ~280 K during at full
moon. Using the arithmatic mean here as an approximate value.
Ported from MATLAB Code
Ref: Rec. ITU-R P.372-8
<NAME>
15 March 2021
:return: Moon noise temp [K]
"""
return (140 + 280)/2
def get_cosmic_noise_temp(freq_hz, rx_alt_m=0, alpha_c=0.95, gain_sun_dbi=-np.inf, gain_moon_dbi=-np.inf):
"""
Computes the combined cosmic noise temperature, including contributions from the sun, the moon, and the galactic
background. Includes approximate effect of atmospheric loss (sun and moon are treated as as coming from zenith;
rather than their true angles.
Ported from MATLAB Code
<NAME>
15 March 2021
:param freq_hz: Carrier frequency [Hz]
:param rx_alt_m: Receiver altitude [m]
:param alpha_c: Fraction of the antenna's receive pattern that is above the horizon [0-1]
:param gain_sun_dbi: Antenna gain directed at the sun [dBi]
:param gain_moon_dbi: Antenna gain directed at the moon [dBi]
:return: Combined cosmic noise temperature [K]
"""
# Compute Raw Noise Temp
temp_100_mhz = 3050 # Geometric mean of 100 MHz noise spectrum samples
temp_cosmic = temp_100_mhz * (100e6 / freq_hz) ** 2.5 + 2.7
temp_sun = get_sun_noise_temp(freq_hz)
temp_moon = get_moon_noise_temp()
# Above 2 GHz, the only contribution is from cosmic background radiation
# (2.7 K), which is essentially negligible.
high_freq_mask = freq_hz >= 2e9
np.place(temp_cosmic, high_freq_mask, 2.7)
# Apply Antenna Patterns
gain_sun_lin = db_to_lin(gain_sun_dbi)
gain_moon_lin = db_to_lin(gain_moon_dbi)
init_temp = (temp_cosmic * alpha_c) + (temp_sun * 4.75e-6 * gain_sun_lin) + (temp_moon * 4.75e-6 * gain_moon_lin)
# Compute Atmospheric Losses for Zenith Path at pi/4 (45 deg from zenith)
zenith_loss_db = np.reshape(atm.calc_zenith_loss(freq_hz, rx_alt_m, np.pi / 4), np.shape(freq_hz))
zenith_loss_lin = db_to_lin(zenith_loss_db)
# Apply Atmospheric Loss to combined galactic noise temp
return init_temp / zenith_loss_lin
def get_ground_noise_temp(ant_gain_ground_dbi=-5, ground_emissivity=1, angular_area=np.pi):
"""
Compute the combined noise temperature from ground effects; predominantly caused by reradiation of thermal energy
from the sun.
Ported from MATLAB Code
<NAME>
15 March 2021
:param ant_gain_ground_dbi: Average antenna gain in direction of the ground [dBi] (DEFAULT = -5 dBi)
:param ground_emissivity: Emissivity of ground (Default = 1)
:param angular_area: Area (in steradians) of ground as visible from antenna (DEFAULT = pi)
:return: Ground noise temperature [K]
"""
# Convert average ground antenna gain to linear units
gain_lin = db_to_lin(ant_gain_ground_dbi)
# Assume ground temp is 290 K (ref temp)
thermal_temp_ground = constants.ref_temp
# Compute ground noise temp according to (D.13)
return angular_area * gain_lin * ground_emissivity * thermal_temp_ground / (4*np.pi) | 0.911642 | 0.610395 |
# --- imports -----------------------------------------------------------------
import torch.nn as nn
import tensorflow as tf
from network.wrappers.NetworkBase import NetworkBase
class ResNet(NetworkBase):
def __init__(self, network_type, loss, accuracy, lr, framework, training, trainable_layers=None, num_filters=64,
optimizer='adam', nonlin='relu', num_classes=2):
"""
ResNet Convolutional Neural Network constructor
:param loss: used loss function
:param lr: learning rate
:param training: is training True/False
:param num_filters: number of filters
:param optimizer: used optimizer
:param nonlin: used nonliniearity
:param num_classes: number of classes/labels
"""
super().__init__(network_type=network_type, loss=loss, accuracy=accuracy, framework=framework, lr=lr, training=training,
trainable_layers=trainable_layers, num_filters=num_filters, optimizer=optimizer, nonlin=nonlin,
num_classes=num_classes)
self.weights, self.biases, self.nets = [], [], []
def build_net(self, X):
"""
Build the ResNet Convolutional Neural Network
:param X: input tensor
:return: network
"""
# Stage 1
with tf.name_scope('s_stage_1'):
conv = tf.layers.conv2d(X, filters=self.num_filters, kernel_size=3, name='conv_1_1', padding='same')
batch_norm = tf.layers.batch_normalization(conv, training=self.is_training, fused=False,
name='batch_1_1')
nonlin = self.nonlin_f(batch_norm, name='activation_1_1')
# Stage 2
with tf.name_scope('s_stage_2'):
conv_2_1 = self._basic_block(nonlin, filters=64, i=21, strides=1)
conv_2_2 = self._basic_block(conv_2_1, filters=64, i=22, strides=1)
# Stage 3
with tf.name_scope('s_stage_3'):
conv_3_1 = self._basic_block(conv_2_2, filters=128, i=31, strides=2)
conv_3_2 = self._basic_block(conv_3_1, filters=128, i=32, strides=1)
# Stage 4
with tf.name_scope('s_stage_4'):
conv_4_1 = self._basic_block(conv_3_2, filters=256, i=41, strides=2)
conv_4_2 = self._basic_block(conv_4_1, filters=256, i=42, strides=1)
# Stage 5
with tf.name_scope('s_stage_5'):
conv_5_1 = self._basic_block(conv_4_2, filters=512, i=51, strides=2)
conv_5_2 = self._basic_block(conv_5_1, filters=512, i=52, strides=1)
# Output Layer
with tf.name_scope('s_outputs'):
pooling_1 = tf.layers.average_pooling2d(conv_5_2, pool_size=2, strides=3, padding='valid', name='pooling_1')
flat = tf.layers.flatten(pooling_1, name='flatten')
output_p = tf.layers.dense(flat, units=self.num_classes, name='output', activation='softmax')
return output_p
def _basic_block(self, X, filters, i, strides):
# Retrieve Filters
conv_x_1 = tf.layers.conv2d(X, filters=filters, kernel_size=1, padding='same', name='conv_10_1_' + str(i),
strides=strides)
batch_norm_x_1 = tf.layers.batch_normalization(conv_x_1, training=self.is_training, fused=False,
name='batch_10_1_' + str(i))
nonlin = self.nonlin_f(batch_norm_x_1, name='activation_10_1_' + str(i))
conv_x_2 = tf.layers.conv2d(nonlin, filters=filters, kernel_size=3, padding='same', name='conv_10_2_' + str(i),
strides=1)
batch_norm_x_2 = tf.layers.batch_normalization(conv_x_2, training=self.is_training, fused=False,
name='batch_10_2_' + str(i))
shortcut = tf.layers.Layer()
if strides != 1 or X != filters:
shortcut = tf.layers.conv2d(X, filters=filters, kernel_size=1, padding='valid', name='conv_10_3_' + str(i),
strides=strides)
shortcut = tf.layers.batch_normalization(shortcut, training=self.is_training, fused=False,
name='batch_10_3_' + str(i))
output = batch_norm_x_2+shortcut
output = self.nonlin_f(output, name='activation_10_2_' + str(i))
return output
class ResNet_pt(NetworkBase, nn.Module):
def __init__(self, network_type, loss, accuracy, lr, framework, training, trainable_layers=None, num_filters=64,
optimizer='adam', nonlin='relu', num_classes=2):
NetworkBase.__init__(self, network_type=network_type, loss=loss, accuracy=accuracy, framework=framework, lr=lr, training=training,
trainable_layers=trainable_layers, num_filters=num_filters, optimizer=optimizer, nonlin=nonlin,
num_classes=num_classes)
nn.Module.__init__(self)
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.conv2_x = self._make_layer(BasicBlock, 64, 2, 1)
self.conv3_x = self._make_layer(BasicBlock, 128, 2, 2)
self.conv4_x = self._make_layer(BasicBlock, 256, 2, 2)
self.conv5_x = self._make_layer(BasicBlock, 512, 2, 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_classes)
def _make_layer(self, block, out_channels, blocks, stride=1):
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, X):
output = self.conv1(X)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
# residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels)
)
# shortcut
self.shortcut = nn.Sequential()
# the shortcut output dimension is not the same with residual function
# use 1*1 convolution to match the dimension
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x)) | network/wrappers/ResNet.py |
# --- imports -----------------------------------------------------------------
import torch.nn as nn
import tensorflow as tf
from network.wrappers.NetworkBase import NetworkBase
class ResNet(NetworkBase):
def __init__(self, network_type, loss, accuracy, lr, framework, training, trainable_layers=None, num_filters=64,
optimizer='adam', nonlin='relu', num_classes=2):
"""
ResNet Convolutional Neural Network constructor
:param loss: used loss function
:param lr: learning rate
:param training: is training True/False
:param num_filters: number of filters
:param optimizer: used optimizer
:param nonlin: used nonliniearity
:param num_classes: number of classes/labels
"""
super().__init__(network_type=network_type, loss=loss, accuracy=accuracy, framework=framework, lr=lr, training=training,
trainable_layers=trainable_layers, num_filters=num_filters, optimizer=optimizer, nonlin=nonlin,
num_classes=num_classes)
self.weights, self.biases, self.nets = [], [], []
def build_net(self, X):
"""
Build the ResNet Convolutional Neural Network
:param X: input tensor
:return: network
"""
# Stage 1
with tf.name_scope('s_stage_1'):
conv = tf.layers.conv2d(X, filters=self.num_filters, kernel_size=3, name='conv_1_1', padding='same')
batch_norm = tf.layers.batch_normalization(conv, training=self.is_training, fused=False,
name='batch_1_1')
nonlin = self.nonlin_f(batch_norm, name='activation_1_1')
# Stage 2
with tf.name_scope('s_stage_2'):
conv_2_1 = self._basic_block(nonlin, filters=64, i=21, strides=1)
conv_2_2 = self._basic_block(conv_2_1, filters=64, i=22, strides=1)
# Stage 3
with tf.name_scope('s_stage_3'):
conv_3_1 = self._basic_block(conv_2_2, filters=128, i=31, strides=2)
conv_3_2 = self._basic_block(conv_3_1, filters=128, i=32, strides=1)
# Stage 4
with tf.name_scope('s_stage_4'):
conv_4_1 = self._basic_block(conv_3_2, filters=256, i=41, strides=2)
conv_4_2 = self._basic_block(conv_4_1, filters=256, i=42, strides=1)
# Stage 5
with tf.name_scope('s_stage_5'):
conv_5_1 = self._basic_block(conv_4_2, filters=512, i=51, strides=2)
conv_5_2 = self._basic_block(conv_5_1, filters=512, i=52, strides=1)
# Output Layer
with tf.name_scope('s_outputs'):
pooling_1 = tf.layers.average_pooling2d(conv_5_2, pool_size=2, strides=3, padding='valid', name='pooling_1')
flat = tf.layers.flatten(pooling_1, name='flatten')
output_p = tf.layers.dense(flat, units=self.num_classes, name='output', activation='softmax')
return output_p
def _basic_block(self, X, filters, i, strides):
# Retrieve Filters
conv_x_1 = tf.layers.conv2d(X, filters=filters, kernel_size=1, padding='same', name='conv_10_1_' + str(i),
strides=strides)
batch_norm_x_1 = tf.layers.batch_normalization(conv_x_1, training=self.is_training, fused=False,
name='batch_10_1_' + str(i))
nonlin = self.nonlin_f(batch_norm_x_1, name='activation_10_1_' + str(i))
conv_x_2 = tf.layers.conv2d(nonlin, filters=filters, kernel_size=3, padding='same', name='conv_10_2_' + str(i),
strides=1)
batch_norm_x_2 = tf.layers.batch_normalization(conv_x_2, training=self.is_training, fused=False,
name='batch_10_2_' + str(i))
shortcut = tf.layers.Layer()
if strides != 1 or X != filters:
shortcut = tf.layers.conv2d(X, filters=filters, kernel_size=1, padding='valid', name='conv_10_3_' + str(i),
strides=strides)
shortcut = tf.layers.batch_normalization(shortcut, training=self.is_training, fused=False,
name='batch_10_3_' + str(i))
output = batch_norm_x_2+shortcut
output = self.nonlin_f(output, name='activation_10_2_' + str(i))
return output
class ResNet_pt(NetworkBase, nn.Module):
def __init__(self, network_type, loss, accuracy, lr, framework, training, trainable_layers=None, num_filters=64,
optimizer='adam', nonlin='relu', num_classes=2):
NetworkBase.__init__(self, network_type=network_type, loss=loss, accuracy=accuracy, framework=framework, lr=lr, training=training,
trainable_layers=trainable_layers, num_filters=num_filters, optimizer=optimizer, nonlin=nonlin,
num_classes=num_classes)
nn.Module.__init__(self)
self.in_channels = 64
self.conv1 = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True))
self.conv2_x = self._make_layer(BasicBlock, 64, 2, 1)
self.conv3_x = self._make_layer(BasicBlock, 128, 2, 2)
self.conv4_x = self._make_layer(BasicBlock, 256, 2, 2)
self.conv5_x = self._make_layer(BasicBlock, 512, 2, 2)
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512, num_classes)
def _make_layer(self, block, out_channels, blocks, stride=1):
# we have num_block blocks per layer, the first block
# could be 1 or 2, other blocks would always be 1
strides = [stride] + [1] * (blocks - 1)
layers = []
for stride in strides:
layers.append(block(self.in_channels, out_channels, stride))
self.in_channels = out_channels
return nn.Sequential(*layers)
def forward(self, X):
output = self.conv1(X)
output = self.conv2_x(output)
output = self.conv3_x(output)
output = self.conv4_x(output)
output = self.conv5_x(output)
output = self.avg_pool(output)
output = output.view(output.size(0), -1)
output = self.fc(output)
return output
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride=1):
super().__init__()
# residual function
self.residual_function = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=3, stride=stride, padding=1, bias=False),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1, bias=False),
nn.BatchNorm2d(out_channels)
)
# shortcut
self.shortcut = nn.Sequential()
# the shortcut output dimension is not the same with residual function
# use 1*1 convolution to match the dimension
if stride != 1 or in_channels != out_channels:
self.shortcut = nn.Sequential(
nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(out_channels)
)
def forward(self, x):
return nn.ReLU(inplace=True)(self.residual_function(x) + self.shortcut(x)) | 0.945676 | 0.467636 |
import numpy as np
from skimage.io import imread, imsave
import os
import sys
def color_transfer(content_img, style_img):
'''
Transfer style image color to content image.
Method described in https://arxiv.org/abs/1606.05897
Args:
content_img: type=ndarray, shape=(Wc,Hc,C=3)
style_img: type=ndarray, shape=(Ws,Hs,C=3)
Returns:
content_img_hat: content image with the color of style image. type=ndarray, shape=(Wc,Hc,C=3)
'''
content_mat = np.transpose(content_img.reshape((-1, content_img.shape[-1]))) # ndarray, shape=(3, W*H)
style_mat = np.transpose(style_img.reshape((-1, style_img.shape[-1])))
assert content_mat.shape[0] == 3
assert style_mat.shape[0] == 3
# cov matrix:
content_cov = np.cov(content_mat) # ndarray, shape=(3, 3)
style_cov = np.cov(style_mat)
# mean vec:
content_mean = np.mean(content_mat, axis=-1)
style_mean = np.mean(style_mat, axis=-1)
if np.isnan(content_cov).any():
raise ValueError('content_cov as NaN')
if np.isinf(content_cov).any():
raise ValueError('content_cov as Inf')
if np.isnan(style_cov).any():
raise ValueError('style_cov as NaN')
if np.isinf(style_cov).any():
raise ValueError('style_cov as Inf')
# evd:
Sc, Uc = np.linalg.eig(content_cov)
Ss, Us = np.linalg.eig(style_cov)
content_cov_rec = Uc @ np.diag(Sc) @ Uc.transpose()
style_cov_rec = Us @ np.diag(Ss) @ Us.transpose()
assert (Sc>=0).all() # cov matrix should be semi-positive
assert (Ss>=0).all()
# linear transform:
# A = (Us @ np.diag(Ss**0.5)) @ \
# (Uc @ np.diag(Sc**(-0.5))).transpose()
A = (Us @ np.diag(Ss**0.5) @ Us.transpose()) @ \
(Uc @ np.diag(Sc**(-0.5)) @ Uc.transpose()).transpose()
b = style_mean - A @ content_mean
# get new image:
new_mat = A @ content_mat + np.expand_dims(b, axis=1) # ndarray, shape=(3, W*H)
content_img_hat = new_mat.transpose().reshape(content_img.shape)
# deal with image range and dtype:
content_img_hat[content_img_hat<0] = 0
content_img_hat[content_img_hat>255] = 255
content_img_hat = content_img_hat.astype(np.uint8)
content_hat_cov = np.cov(new_mat)
content_hat_mean = np.mean(new_mat, axis=-1)
return content_img_hat
def channel_tranfer(content_img, style_img):
pass
def color_transfer_per_img(src, dest):
img1 = imread(src)
img2 = imread(dest)
img2_new = color_transfer(img2, img1)
return img2_new
# imsave('new_img.png', img2_new)
def post_process_color_transfer(src_folder, dest_folder, save_folder):
fnames = os.listdir(src_folder)
for fname in fnames:
if 'png' in fname:
assert fname in os.listdir(dest_folder), "File Name Not Matching"
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for fname in fnames:
if 'png' in fname:
src = os.path.join(src_folder, fname)
dest = os.path.join(dest_folder, fname)
new_img = color_transfer_per_img(src, dest)
imsave(os.path.join(save_folder, fname), new_img)
if __name__ == "__main__":
src_folder = sys.argv[1]
dest_folder = sys.argv[2]
save_folder = sys.argv[3]
post_process_color_transfer(src_folder, dest_folder, save_folder) | AGD_ST/search/util_visual/colortransfer.py | import numpy as np
from skimage.io import imread, imsave
import os
import sys
def color_transfer(content_img, style_img):
'''
Transfer style image color to content image.
Method described in https://arxiv.org/abs/1606.05897
Args:
content_img: type=ndarray, shape=(Wc,Hc,C=3)
style_img: type=ndarray, shape=(Ws,Hs,C=3)
Returns:
content_img_hat: content image with the color of style image. type=ndarray, shape=(Wc,Hc,C=3)
'''
content_mat = np.transpose(content_img.reshape((-1, content_img.shape[-1]))) # ndarray, shape=(3, W*H)
style_mat = np.transpose(style_img.reshape((-1, style_img.shape[-1])))
assert content_mat.shape[0] == 3
assert style_mat.shape[0] == 3
# cov matrix:
content_cov = np.cov(content_mat) # ndarray, shape=(3, 3)
style_cov = np.cov(style_mat)
# mean vec:
content_mean = np.mean(content_mat, axis=-1)
style_mean = np.mean(style_mat, axis=-1)
if np.isnan(content_cov).any():
raise ValueError('content_cov as NaN')
if np.isinf(content_cov).any():
raise ValueError('content_cov as Inf')
if np.isnan(style_cov).any():
raise ValueError('style_cov as NaN')
if np.isinf(style_cov).any():
raise ValueError('style_cov as Inf')
# evd:
Sc, Uc = np.linalg.eig(content_cov)
Ss, Us = np.linalg.eig(style_cov)
content_cov_rec = Uc @ np.diag(Sc) @ Uc.transpose()
style_cov_rec = Us @ np.diag(Ss) @ Us.transpose()
assert (Sc>=0).all() # cov matrix should be semi-positive
assert (Ss>=0).all()
# linear transform:
# A = (Us @ np.diag(Ss**0.5)) @ \
# (Uc @ np.diag(Sc**(-0.5))).transpose()
A = (Us @ np.diag(Ss**0.5) @ Us.transpose()) @ \
(Uc @ np.diag(Sc**(-0.5)) @ Uc.transpose()).transpose()
b = style_mean - A @ content_mean
# get new image:
new_mat = A @ content_mat + np.expand_dims(b, axis=1) # ndarray, shape=(3, W*H)
content_img_hat = new_mat.transpose().reshape(content_img.shape)
# deal with image range and dtype:
content_img_hat[content_img_hat<0] = 0
content_img_hat[content_img_hat>255] = 255
content_img_hat = content_img_hat.astype(np.uint8)
content_hat_cov = np.cov(new_mat)
content_hat_mean = np.mean(new_mat, axis=-1)
return content_img_hat
def channel_tranfer(content_img, style_img):
pass
def color_transfer_per_img(src, dest):
img1 = imread(src)
img2 = imread(dest)
img2_new = color_transfer(img2, img1)
return img2_new
# imsave('new_img.png', img2_new)
def post_process_color_transfer(src_folder, dest_folder, save_folder):
fnames = os.listdir(src_folder)
for fname in fnames:
if 'png' in fname:
assert fname in os.listdir(dest_folder), "File Name Not Matching"
if not os.path.exists(save_folder):
os.mkdir(save_folder)
for fname in fnames:
if 'png' in fname:
src = os.path.join(src_folder, fname)
dest = os.path.join(dest_folder, fname)
new_img = color_transfer_per_img(src, dest)
imsave(os.path.join(save_folder, fname), new_img)
if __name__ == "__main__":
src_folder = sys.argv[1]
dest_folder = sys.argv[2]
save_folder = sys.argv[3]
post_process_color_transfer(src_folder, dest_folder, save_folder) | 0.432782 | 0.422922 |
import numpy as np
import tensorflow as tf
from sklearn.neighbors import KDTree
from tqdm import tqdm
from config import read_config
from data_loader import DataLoader
from sincnet import create_print_maker
def check_norms(vectors):
norms = [np.linalg.norm(v) for v in vectors]
assert abs(1 - min(norms)) < 1e-6
assert abs(1 - max(norms)) < 1e-6
def unite_equally_labeled_voiceprints(labels, voiceprints):
label_to_voiceprints = dict()
for l, v in zip(labels, voiceprints):
if l not in label_to_voiceprints:
label_to_voiceprints[l] = []
label_to_voiceprints[l].append(v)
labels = list(l for l, _ in label_to_voiceprints.items())
voiceprints = list(np.mean(vs, axis=0) for _, vs in label_to_voiceprints.items())
voiceprints = tf.math.l2_normalize(voiceprints, axis=1).numpy()
check_norms(voiceprints)
return labels, voiceprints
def make_path_voiceprints(model, dataset):
path_to_voiceprint_sum = dict()
path_to_voiceprint_count = dict()
for path_batch, signal_batch, _ in tqdm(dataset):
voiceprint_batch = model.predict(signal_batch)
for p, v in zip(path_batch, voiceprint_batch):
if p not in path_to_voiceprint_sum:
path_to_voiceprint_sum[p] = np.zeros(v.shape)
path_to_voiceprint_count[p] = 0
path_to_voiceprint_sum[p] += v
path_to_voiceprint_count[p] += 1
paths = []
voiceprints = []
for path in path_to_voiceprint_sum:
paths.append(path)
v = path_to_voiceprint_sum[path] / path_to_voiceprint_count[path]
voiceprints.append(v)
voiceprints = tf.math.l2_normalize(voiceprints, axis=1).numpy()
check_norms(voiceprints)
return paths, voiceprints
def calculate_accuracies(base_labels, base_voiceprints, test_labels, test_voiceprints, max_top):
assert len(base_labels) == len(base_voiceprints)
assert len(test_labels) == len(test_voiceprints)
base_labels = np.array(base_labels)
test_labels = np.array(test_labels)
base_voiceprints = np.array(base_voiceprints)
kdtree = KDTree(base_voiceprints)
top_to_accuracy = dict()
for top in tqdm(range(1, max_top + 1)):
closest_indexes = kdtree.query(test_voiceprints, k=top, return_distance=False, sort_results=True)
predicted_labels = np.array([base_labels[c] for c in closest_indexes])
accuracy = np.mean([test in predicted for test, predicted in zip(test_labels, predicted_labels)])
top_to_accuracy[top] = accuracy
return top_to_accuracy
def test(cfg, model, train_dataset, test_dataset):
paths, voiceprints = make_path_voiceprints(model, train_dataset)
labels = [cfg.path_to_label[p] for p in paths]
base_labels, base_voiceprints = unite_equally_labeled_voiceprints(labels, voiceprints)
paths, test_voiceprints = make_path_voiceprints(model, test_dataset)
test_labels = [cfg.path_to_label[p] for p in paths]
top_to_accuracy = calculate_accuracies(
base_labels,
base_voiceprints,
test_labels,
test_voiceprints,
cfg.max_top
)
return top_to_accuracy
def main():
cfg = read_config()
model = create_print_maker(cfg)
# Skip mismatch enables to load weights of networks with other head
model.load_weights(cfg.checkpoint_file, by_name=True, skip_mismatch=True)
for layer in model.layers:
layer.trainable = False
data_loader = DataLoader(cfg)
train_dataset = data_loader.make_test_iterable(cfg.train_list)
test_dataset = data_loader.make_test_iterable(cfg.test_list)
top_to_accuracy = test(cfg, model, train_dataset, test_dataset)
for top, acc in sorted(top_to_accuracy.items()):
print(top, acc)
if __name__ == '__main__':
main() | test_print_maker.py | import numpy as np
import tensorflow as tf
from sklearn.neighbors import KDTree
from tqdm import tqdm
from config import read_config
from data_loader import DataLoader
from sincnet import create_print_maker
def check_norms(vectors):
norms = [np.linalg.norm(v) for v in vectors]
assert abs(1 - min(norms)) < 1e-6
assert abs(1 - max(norms)) < 1e-6
def unite_equally_labeled_voiceprints(labels, voiceprints):
label_to_voiceprints = dict()
for l, v in zip(labels, voiceprints):
if l not in label_to_voiceprints:
label_to_voiceprints[l] = []
label_to_voiceprints[l].append(v)
labels = list(l for l, _ in label_to_voiceprints.items())
voiceprints = list(np.mean(vs, axis=0) for _, vs in label_to_voiceprints.items())
voiceprints = tf.math.l2_normalize(voiceprints, axis=1).numpy()
check_norms(voiceprints)
return labels, voiceprints
def make_path_voiceprints(model, dataset):
path_to_voiceprint_sum = dict()
path_to_voiceprint_count = dict()
for path_batch, signal_batch, _ in tqdm(dataset):
voiceprint_batch = model.predict(signal_batch)
for p, v in zip(path_batch, voiceprint_batch):
if p not in path_to_voiceprint_sum:
path_to_voiceprint_sum[p] = np.zeros(v.shape)
path_to_voiceprint_count[p] = 0
path_to_voiceprint_sum[p] += v
path_to_voiceprint_count[p] += 1
paths = []
voiceprints = []
for path in path_to_voiceprint_sum:
paths.append(path)
v = path_to_voiceprint_sum[path] / path_to_voiceprint_count[path]
voiceprints.append(v)
voiceprints = tf.math.l2_normalize(voiceprints, axis=1).numpy()
check_norms(voiceprints)
return paths, voiceprints
def calculate_accuracies(base_labels, base_voiceprints, test_labels, test_voiceprints, max_top):
assert len(base_labels) == len(base_voiceprints)
assert len(test_labels) == len(test_voiceprints)
base_labels = np.array(base_labels)
test_labels = np.array(test_labels)
base_voiceprints = np.array(base_voiceprints)
kdtree = KDTree(base_voiceprints)
top_to_accuracy = dict()
for top in tqdm(range(1, max_top + 1)):
closest_indexes = kdtree.query(test_voiceprints, k=top, return_distance=False, sort_results=True)
predicted_labels = np.array([base_labels[c] for c in closest_indexes])
accuracy = np.mean([test in predicted for test, predicted in zip(test_labels, predicted_labels)])
top_to_accuracy[top] = accuracy
return top_to_accuracy
def test(cfg, model, train_dataset, test_dataset):
paths, voiceprints = make_path_voiceprints(model, train_dataset)
labels = [cfg.path_to_label[p] for p in paths]
base_labels, base_voiceprints = unite_equally_labeled_voiceprints(labels, voiceprints)
paths, test_voiceprints = make_path_voiceprints(model, test_dataset)
test_labels = [cfg.path_to_label[p] for p in paths]
top_to_accuracy = calculate_accuracies(
base_labels,
base_voiceprints,
test_labels,
test_voiceprints,
cfg.max_top
)
return top_to_accuracy
def main():
cfg = read_config()
model = create_print_maker(cfg)
# Skip mismatch enables to load weights of networks with other head
model.load_weights(cfg.checkpoint_file, by_name=True, skip_mismatch=True)
for layer in model.layers:
layer.trainable = False
data_loader = DataLoader(cfg)
train_dataset = data_loader.make_test_iterable(cfg.train_list)
test_dataset = data_loader.make_test_iterable(cfg.test_list)
top_to_accuracy = test(cfg, model, train_dataset, test_dataset)
for top, acc in sorted(top_to_accuracy.items()):
print(top, acc)
if __name__ == '__main__':
main() | 0.663342 | 0.49939 |
from boto.s3.key import Key
from sdk_release_tools import log
from sdk_release_tools.versions import parse_major_minor
import os
__all__ = ['Delete', 'Download', 'Pin', 'Unpin', 'Upload', 'delete',
'download', 'pin', 'unpin', 'upload']
def absolute(root):
if not os.path.isabs(root):
root = os.path.join(os.getcwd(), root)
return root
class Context(object):
def __init__(self, root=None, variables=None, bucket=None, dry_run=True, silent=False,
copy_on_pin=False):
self.root = root
self.variables = variables or {}
self.bucket = bucket
self.rules = (bucket.get_website_configuration_obj().routing_rules if
bucket else None)
self.dry_run = dry_run
self.silent = silent
self.copy_on_pin = copy_on_pin
def absolute(self, key):
"""
Get the absolute path to a key prepended by this Context's root, and
interpolate any variables.
"""
path = self.relative(key)
if self.root:
path = os.path.join(self.root, path)
return absolute(path)
def relative(self, key):
"""
Get the relative path to a key, and interpolate any variables.
"""
return key.format(**self.variables)
class Ops(object):
def __init__(self, tree):
self.tree = tree
def _fold(self, context, tree=None):
tree = tree or self.tree
for key, value in tree.items():
context = self._op(key, value, context)
return context
def _op(self, key, value, context):
if key.endswith('/'):
return self._op_dir(key, value, context)
return self._op_file(key, value, context)
def _op_dir(self, key, value, context):
return context
def _op_file(self, key, value, context):
return context
def run(self, context):
return self._fold(context)
class Delete(Ops):
def _op_dir(self, key, value, context):
src = context.relative(value)
for sub_key in context.bucket.list(src):
context = self._op_file(sub_key.name, sub_key.name, context)
return context
def _op_file(self, key, value, context):
src = context.relative(value)
if not context.silent:
response = raw_input("Confirm deletion of " + src + " [y/n]: ").lower()
if response == "yes" or response == "y":
log.log(" Continuing deletion of: " + src + "\n")
self._delete_file(src, context)
else:
log.log(" Skipping, " + src + " will be protected.\n")
else:
log.log(src)
self._delete_file(src,context)
return context
def _delete_file(self, src, context):
src_key = context.bucket.get_key(src)
if not src_key:
log.warn(' Key {} does not exist'.format(src))
if not context.dry_run and src_key:
context.bucket.delete_key(src)
log.log(" " + src + " deleted")
return context
class Download(Ops):
def _op_dir(self, key, value, context):
src = context.relative(value)
for sub_key in context.bucket.list(src):
context = self._op_file(
os.path.join(key, sub_key.name[len(src):]), sub_key.name,
context)
return context
def _op_file(self, key, value, context):
src = context.relative(value)
dst = context.absolute(key)
log.log('{} -> {}'.format(src, dst))
src_key = context.bucket.get_key(src)
if not src_key:
log.error(' Key {} does not exist'.format(src))
if not context.dry_run and src_key:
dst_dir = os.path.dirname(dst)
try:
os.makedirs(dst_dir)
except:
pass
src_key.get_contents_to_filename(dst)
return context
def reconfigure_website(bucket, rules):
config = bucket.get_website_configuration_obj()
bucket.configure_website(suffix=config.suffix, error_key=config.error_key,
routing_rules=rules)
class Pin(Ops):
def _op(self, key, value, context):
src = context.relative(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
# Delete any previous RoutingRules. We have to use S3 Key redirects.
for rule in list(context.rules):
key_prefix = rule.condition.key_prefix
if not src.startswith(key_prefix):
continue
replace_key_prefix = rule.redirect.replace_key_prefix
try:
major_minor = parse_major_minor(
os.path.split(key_prefix.rstrip('/'))[1])
except:
continue
if (major_minor != parse_major_minor(
'{major}.{minor}'.format(**context.variables))):
continue
context.rules.remove(rule)
log.warn(' Deleting RoutingRule that pointed to {}'.format(
replace_key_prefix))
# Create S3 Key redirect.
src_key = context.bucket.get_key(src)
if not src_key:
log.log(' Creating S3 Key redirect')
else:
existing_redirect = src_key.get_redirect()
log.warn(' Updating S3 Key redirect that pointed to {}'.format(
existing_redirect))
if not context.dry_run:
if not src_key:
src_key = Key(context.bucket)
src_key.key = src
src_key.set_redirect('/' + dst.lstrip('/'), headers={
'Cache-Control': 'max-age=0, no-cache, no-store'
})
return context
def run(self, context):
context = super(Pin, self).run(context)
if not context.dry_run:
reconfigure_website(context.bucket, context.rules)
return context
class Unpin(Ops):
def _op(self, key, value, context):
src = context.relative(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
found = False
# Delete any RoutingRules.
for rule in list(context.rules):
key_prefix = rule.condition.key_prefix
if not src.startswith(key_prefix):
continue
replace_key_prefix = rule.redirect.replace_key_prefix
try:
major_minor = parse_major_minor(
os.path.split(key_prefix.rstrip('/'))[1])
except:
continue
if (major_minor != parse_major_minor(
'{major}.{minor}'.format(**context.variables))):
continue
found = True
context.rules.remove(rule)
log.warn(' Deleting RoutingRule that pointed to {}'.format(
replace_key_prefix))
# Delete any S3 Key redirects.
src_key = context.bucket.get_key(src)
if src_key:
found = True
if not context.dry_run:
context.bucket.delete_key(src)
if not found:
log.warn(' Redirect {} does not exist'.format(src))
return context
def run(self, context):
context = super(Unpin, self).run(context)
if not context.dry_run:
reconfigure_website(context.bucket, context.rules)
return context
class Upload(Ops):
def _op_dir(self, key, value, context):
srcdir = context.absolute(key)
for path, _, srcs in os.walk(srcdir):
for src in srcs:
sub_path = path[len(context.absolute(key)):]
sub_key = os.path.join(key, sub_path, src)
sub_value = os.path.join(value, sub_path, src)
context = self._op_file(sub_key, sub_value, context)
return context
def _op_file(self, key, value, context):
src = context.absolute(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
dst_key = context.bucket.get_key(dst)
if dst_key:
log.warn(' Updating Key')
if not context.dry_run:
if not dst_key:
dst_key = Key(context.bucket)
dst_key.key = dst
dst_key.set_contents_from_filename(src, headers={
'Cache-Control': 'max-age=315360000',
'Expires': 'Thu, 31 Dec 2037 23:55:55 GMT'
})
return context
def delete(tree, **kwargs):
return Delete(tree).run(Context(**kwargs))
def download(tree, **kwargs):
return Download(tree).run(Context(**kwargs))
def pin(tree, **kwargs):
return Pin(tree).run(Context(**kwargs))
def unpin(tree, **kwargs):
return Unpin(tree).run(Context(**kwargs))
def upload(tree, **kwargs):
return Upload(tree).run(Context(**kwargs)) | node_modules/twilio-sync/tools/sdk-release-tool/sdk_release_tools/ops.py | from boto.s3.key import Key
from sdk_release_tools import log
from sdk_release_tools.versions import parse_major_minor
import os
__all__ = ['Delete', 'Download', 'Pin', 'Unpin', 'Upload', 'delete',
'download', 'pin', 'unpin', 'upload']
def absolute(root):
if not os.path.isabs(root):
root = os.path.join(os.getcwd(), root)
return root
class Context(object):
def __init__(self, root=None, variables=None, bucket=None, dry_run=True, silent=False,
copy_on_pin=False):
self.root = root
self.variables = variables or {}
self.bucket = bucket
self.rules = (bucket.get_website_configuration_obj().routing_rules if
bucket else None)
self.dry_run = dry_run
self.silent = silent
self.copy_on_pin = copy_on_pin
def absolute(self, key):
"""
Get the absolute path to a key prepended by this Context's root, and
interpolate any variables.
"""
path = self.relative(key)
if self.root:
path = os.path.join(self.root, path)
return absolute(path)
def relative(self, key):
"""
Get the relative path to a key, and interpolate any variables.
"""
return key.format(**self.variables)
class Ops(object):
def __init__(self, tree):
self.tree = tree
def _fold(self, context, tree=None):
tree = tree or self.tree
for key, value in tree.items():
context = self._op(key, value, context)
return context
def _op(self, key, value, context):
if key.endswith('/'):
return self._op_dir(key, value, context)
return self._op_file(key, value, context)
def _op_dir(self, key, value, context):
return context
def _op_file(self, key, value, context):
return context
def run(self, context):
return self._fold(context)
class Delete(Ops):
def _op_dir(self, key, value, context):
src = context.relative(value)
for sub_key in context.bucket.list(src):
context = self._op_file(sub_key.name, sub_key.name, context)
return context
def _op_file(self, key, value, context):
src = context.relative(value)
if not context.silent:
response = raw_input("Confirm deletion of " + src + " [y/n]: ").lower()
if response == "yes" or response == "y":
log.log(" Continuing deletion of: " + src + "\n")
self._delete_file(src, context)
else:
log.log(" Skipping, " + src + " will be protected.\n")
else:
log.log(src)
self._delete_file(src,context)
return context
def _delete_file(self, src, context):
src_key = context.bucket.get_key(src)
if not src_key:
log.warn(' Key {} does not exist'.format(src))
if not context.dry_run and src_key:
context.bucket.delete_key(src)
log.log(" " + src + " deleted")
return context
class Download(Ops):
def _op_dir(self, key, value, context):
src = context.relative(value)
for sub_key in context.bucket.list(src):
context = self._op_file(
os.path.join(key, sub_key.name[len(src):]), sub_key.name,
context)
return context
def _op_file(self, key, value, context):
src = context.relative(value)
dst = context.absolute(key)
log.log('{} -> {}'.format(src, dst))
src_key = context.bucket.get_key(src)
if not src_key:
log.error(' Key {} does not exist'.format(src))
if not context.dry_run and src_key:
dst_dir = os.path.dirname(dst)
try:
os.makedirs(dst_dir)
except:
pass
src_key.get_contents_to_filename(dst)
return context
def reconfigure_website(bucket, rules):
config = bucket.get_website_configuration_obj()
bucket.configure_website(suffix=config.suffix, error_key=config.error_key,
routing_rules=rules)
class Pin(Ops):
def _op(self, key, value, context):
src = context.relative(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
# Delete any previous RoutingRules. We have to use S3 Key redirects.
for rule in list(context.rules):
key_prefix = rule.condition.key_prefix
if not src.startswith(key_prefix):
continue
replace_key_prefix = rule.redirect.replace_key_prefix
try:
major_minor = parse_major_minor(
os.path.split(key_prefix.rstrip('/'))[1])
except:
continue
if (major_minor != parse_major_minor(
'{major}.{minor}'.format(**context.variables))):
continue
context.rules.remove(rule)
log.warn(' Deleting RoutingRule that pointed to {}'.format(
replace_key_prefix))
# Create S3 Key redirect.
src_key = context.bucket.get_key(src)
if not src_key:
log.log(' Creating S3 Key redirect')
else:
existing_redirect = src_key.get_redirect()
log.warn(' Updating S3 Key redirect that pointed to {}'.format(
existing_redirect))
if not context.dry_run:
if not src_key:
src_key = Key(context.bucket)
src_key.key = src
src_key.set_redirect('/' + dst.lstrip('/'), headers={
'Cache-Control': 'max-age=0, no-cache, no-store'
})
return context
def run(self, context):
context = super(Pin, self).run(context)
if not context.dry_run:
reconfigure_website(context.bucket, context.rules)
return context
class Unpin(Ops):
def _op(self, key, value, context):
src = context.relative(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
found = False
# Delete any RoutingRules.
for rule in list(context.rules):
key_prefix = rule.condition.key_prefix
if not src.startswith(key_prefix):
continue
replace_key_prefix = rule.redirect.replace_key_prefix
try:
major_minor = parse_major_minor(
os.path.split(key_prefix.rstrip('/'))[1])
except:
continue
if (major_minor != parse_major_minor(
'{major}.{minor}'.format(**context.variables))):
continue
found = True
context.rules.remove(rule)
log.warn(' Deleting RoutingRule that pointed to {}'.format(
replace_key_prefix))
# Delete any S3 Key redirects.
src_key = context.bucket.get_key(src)
if src_key:
found = True
if not context.dry_run:
context.bucket.delete_key(src)
if not found:
log.warn(' Redirect {} does not exist'.format(src))
return context
def run(self, context):
context = super(Unpin, self).run(context)
if not context.dry_run:
reconfigure_website(context.bucket, context.rules)
return context
class Upload(Ops):
def _op_dir(self, key, value, context):
srcdir = context.absolute(key)
for path, _, srcs in os.walk(srcdir):
for src in srcs:
sub_path = path[len(context.absolute(key)):]
sub_key = os.path.join(key, sub_path, src)
sub_value = os.path.join(value, sub_path, src)
context = self._op_file(sub_key, sub_value, context)
return context
def _op_file(self, key, value, context):
src = context.absolute(key)
dst = context.relative(value)
log.log('{} -> {}'.format(src, dst))
dst_key = context.bucket.get_key(dst)
if dst_key:
log.warn(' Updating Key')
if not context.dry_run:
if not dst_key:
dst_key = Key(context.bucket)
dst_key.key = dst
dst_key.set_contents_from_filename(src, headers={
'Cache-Control': 'max-age=315360000',
'Expires': 'Thu, 31 Dec 2037 23:55:55 GMT'
})
return context
def delete(tree, **kwargs):
return Delete(tree).run(Context(**kwargs))
def download(tree, **kwargs):
return Download(tree).run(Context(**kwargs))
def pin(tree, **kwargs):
return Pin(tree).run(Context(**kwargs))
def unpin(tree, **kwargs):
return Unpin(tree).run(Context(**kwargs))
def upload(tree, **kwargs):
return Upload(tree).run(Context(**kwargs)) | 0.587352 | 0.121921 |
from rest_framework import viewsets, status, filters, generics
from rest_framework.response import Response
from obeflix_back.models import Video, Categoria
from obeflix_back.serializer import VideoSerializer, CategoriaSerializer, ListaVideoPorCategoriaSerializer
class VideosViewSet(viewsets.ModelViewSet):
"""Exibindo todos os vídeos"""
queryset = Video.objects.all()
serializer_class = VideoSerializer
# "search" query param
filter_backends = [filters.SearchFilter]
search_fields = ['titulo']
def destroy(self, request, *args, **kwargs):
"""(DELETE) Deletando um video"""
try:
instance = self.get_object()
self.perform_destroy(instance)
except:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': 'Vídeo não encontrado.'})
return Response(status=status.HTTP_200_OK, data={'detail': 'Vídeo deletado com sucesso!'})
class CategoriasViewSet(viewsets.ModelViewSet):
"""Exibindo todas as categorias"""
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
def destroy(self, request, *args, **kwargs):
"""(DELETE) Deletando uma categoria"""
try:
instance = self.get_object()
if instance == Categoria.objects.get(id__iexact=1):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED , data={'detail': 'Você não pode deletar a categoria 1.'})
else:
self.perform_destroy(instance)
except:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': 'Categoria não encontrada.'})
return Response(status=status.HTTP_200_OK, data={'detail': 'Categoria deletada com sucesso!'})
class ListaVideosPorCategoria(generics.ListAPIView):
"""Lista os videos em uma Categoria"""
def get_queryset(self):
queryset = Video.objects.filter(categoriaId=self.kwargs['pk'])
return queryset
serializer_class = ListaVideoPorCategoriaSerializer
# "search" query param
filter_backends = [filters.SearchFilter]
search_fields = ['titulo'] | obeflix_back/views.py | from rest_framework import viewsets, status, filters, generics
from rest_framework.response import Response
from obeflix_back.models import Video, Categoria
from obeflix_back.serializer import VideoSerializer, CategoriaSerializer, ListaVideoPorCategoriaSerializer
class VideosViewSet(viewsets.ModelViewSet):
"""Exibindo todos os vídeos"""
queryset = Video.objects.all()
serializer_class = VideoSerializer
# "search" query param
filter_backends = [filters.SearchFilter]
search_fields = ['titulo']
def destroy(self, request, *args, **kwargs):
"""(DELETE) Deletando um video"""
try:
instance = self.get_object()
self.perform_destroy(instance)
except:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': 'Vídeo não encontrado.'})
return Response(status=status.HTTP_200_OK, data={'detail': 'Vídeo deletado com sucesso!'})
class CategoriasViewSet(viewsets.ModelViewSet):
"""Exibindo todas as categorias"""
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
def destroy(self, request, *args, **kwargs):
"""(DELETE) Deletando uma categoria"""
try:
instance = self.get_object()
if instance == Categoria.objects.get(id__iexact=1):
return Response(status=status.HTTP_405_METHOD_NOT_ALLOWED , data={'detail': 'Você não pode deletar a categoria 1.'})
else:
self.perform_destroy(instance)
except:
return Response(status=status.HTTP_404_NOT_FOUND, data={'detail': 'Categoria não encontrada.'})
return Response(status=status.HTTP_200_OK, data={'detail': 'Categoria deletada com sucesso!'})
class ListaVideosPorCategoria(generics.ListAPIView):
"""Lista os videos em uma Categoria"""
def get_queryset(self):
queryset = Video.objects.filter(categoriaId=self.kwargs['pk'])
return queryset
serializer_class = ListaVideoPorCategoriaSerializer
# "search" query param
filter_backends = [filters.SearchFilter]
search_fields = ['titulo'] | 0.478041 | 0.146362 |
from user import User
from credentials import Credentials
import random
def greetings():
print(" __ __ ")
print(" /\ /\ | | | | ")
print("| | | | ________ | | | | _____ ")
print("| |____| | | ___ | | | | | / \ ")
print("| ____ | | |___| | | | | | | ___ | ")
print("| | | | | ______| | |__ | |__ | |___| | ")
print("| | | | | |______ | | | | | | ")
print(" \/ \/ \_______/ \_____/ \____/ \_____/ ")
greetings()
def password (password):
'''
Function to rewrite the Value error to make it easier to understand
'''
print("Create new password ")
try:
number = int(input())
return number
except ValueError:
return "That was not a valid input"
def create_contact(fname, lname, password, email):
'''
Function to create a new contact
'''
new_user = User(fname, lname, password, email)
return new_user
def save_user(user):
'''
Function to save user
'''
user.save_user()
def del_user():
'''
Function to delete a user
'''
contact.delete_user()
def find_user(password):
'''
Function that finds a user by number and returns the user
'''
return User.find_by_password(password)
def check_existing_user(password):
'''
Function that check if a user exists with that password and return a Boolean
'''
return User.user_exist(password)
def display_user():
'''
Function that returns all the saved user
'''
return user.display_user()
def main():
print("...........Whatsup Huuuumaaan?.This is the place where I, the bot, make passwords for you. What is your name?...........")
user_name = input()
print(f"........Waddup {user_name}. my master (Developer) wants me to assist you in making a user account.......")
print('\n')
while True:
print(
"Yo human...Use these short codes to walk through around my master's app : cu - create a new user, dc - display user, fc -find a user, ex -exit the user list")
short_code = input().lower()
if short_code == 'cu':
print("...............New User.............")
print("-"*10)
print("-"*10)
print("...............Pop up your First name...............")
f_name = input()
print("-"*10)
print("...............Pop up your Last name...............")
l_name = input()
print("-"*10)
print("..................Let me do the magic in making your Password................")
random_number = random.randint(1000,9999)
print(random_number)
print("-"*10)
print(".................Email address..................")
e_address = input()
print("-"*10)
print("-"*10)
# create and save new contact.
save_user(create_user(f_name,l_name,password,e_address))
print('\n')
print(f"New User {f_name} {l_name} created")
print('\n')
elif short_code == 'dc':
if display_users():
print("Here is a list of all your user")
print('\n')
for user in display_users():
print(
f"{user.first_name} {user.last_name} .....{user.password}")
print('\n')
else:
print('\n')
print(
"you don't have any")
print('\n')
elif short_code == 'fc':
print("Enter the password you want to search for")
search_password = input()
if check_existing_user(search_password):
search_user = find_user(
search_password)
print(
f"{search_user.first_name} {search_user.last_name}")
print('-' * 20)
print(
f"Password.......{search_user.password}")
print(
f"Email address.......{search_user.email}")
else:
print("Again I don't get it")
elif short_code == "ex":
print("Adios!.......")
break
else:
print(
"I'm a bot I can't. PLEASE use the short codes")
if __name__ == '__main__':
main() | run.py | from user import User
from credentials import Credentials
import random
def greetings():
print(" __ __ ")
print(" /\ /\ | | | | ")
print("| | | | ________ | | | | _____ ")
print("| |____| | | ___ | | | | | / \ ")
print("| ____ | | |___| | | | | | | ___ | ")
print("| | | | | ______| | |__ | |__ | |___| | ")
print("| | | | | |______ | | | | | | ")
print(" \/ \/ \_______/ \_____/ \____/ \_____/ ")
greetings()
def password (password):
'''
Function to rewrite the Value error to make it easier to understand
'''
print("Create new password ")
try:
number = int(input())
return number
except ValueError:
return "That was not a valid input"
def create_contact(fname, lname, password, email):
'''
Function to create a new contact
'''
new_user = User(fname, lname, password, email)
return new_user
def save_user(user):
'''
Function to save user
'''
user.save_user()
def del_user():
'''
Function to delete a user
'''
contact.delete_user()
def find_user(password):
'''
Function that finds a user by number and returns the user
'''
return User.find_by_password(password)
def check_existing_user(password):
'''
Function that check if a user exists with that password and return a Boolean
'''
return User.user_exist(password)
def display_user():
'''
Function that returns all the saved user
'''
return user.display_user()
def main():
print("...........Whatsup Huuuumaaan?.This is the place where I, the bot, make passwords for you. What is your name?...........")
user_name = input()
print(f"........Waddup {user_name}. my master (Developer) wants me to assist you in making a user account.......")
print('\n')
while True:
print(
"Yo human...Use these short codes to walk through around my master's app : cu - create a new user, dc - display user, fc -find a user, ex -exit the user list")
short_code = input().lower()
if short_code == 'cu':
print("...............New User.............")
print("-"*10)
print("-"*10)
print("...............Pop up your First name...............")
f_name = input()
print("-"*10)
print("...............Pop up your Last name...............")
l_name = input()
print("-"*10)
print("..................Let me do the magic in making your Password................")
random_number = random.randint(1000,9999)
print(random_number)
print("-"*10)
print(".................Email address..................")
e_address = input()
print("-"*10)
print("-"*10)
# create and save new contact.
save_user(create_user(f_name,l_name,password,e_address))
print('\n')
print(f"New User {f_name} {l_name} created")
print('\n')
elif short_code == 'dc':
if display_users():
print("Here is a list of all your user")
print('\n')
for user in display_users():
print(
f"{user.first_name} {user.last_name} .....{user.password}")
print('\n')
else:
print('\n')
print(
"you don't have any")
print('\n')
elif short_code == 'fc':
print("Enter the password you want to search for")
search_password = input()
if check_existing_user(search_password):
search_user = find_user(
search_password)
print(
f"{search_user.first_name} {search_user.last_name}")
print('-' * 20)
print(
f"Password.......{search_user.password}")
print(
f"Email address.......{search_user.email}")
else:
print("Again I don't get it")
elif short_code == "ex":
print("Adios!.......")
break
else:
print(
"I'm a bot I can't. PLEASE use the short codes")
if __name__ == '__main__':
main() | 0.290176 | 0.13759 |
import numpy as np
import math
from scipy.sparse import diags
from scipy import linalg
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
import base64
# Material class defines a dictionary conditioning an information
# on material characteristics of each element layer
class Material:
def __init__(self):
self.layers = []
def new_material(self, material_name, cond, rho, c, l):
my_dict = {'material' : material_name,
'conductivity' : cond,
'density' : rho,
'capacity' : c,
'layer_length' : l}
self.layers.append(my_dict)
# Defining Resistance matrix and input vector needed for equation
# solution of system R * T = t, where R is resistance matrix, T is
# solution vector of temperatures (T = t / R) and t is input vector
class Resistance:
def __init__(self, layers, delta_x, delta_t, Rsi=0.13, Rse=0.04):
self.layers = layers
self.R = []
self.tau = []
self.dx = delta_x
self.delta_t = delta_t
self.Rsi = Rsi
self.Rse = Rse
self.mat_list = []
self.l = 0
self.R_i = 0
self.R_e = 0
def resistance_tau(self):
mesh = []
mesh_cumulative = []
help = 0
eps = 1e-5
diag_c = []
diag_w = [0]
diag_e = []
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
density = self.layers[i]['density']
capacity = self.layers[i]['capacity']
self.mat_list.append([min, max, conductivity, density, capacity])
help = max
self.l = max
mat_list = np.copy(self.mat_list)
help = 0
for i in range(len(mat_list)):
if self.dx > (mat_list[i][1] - mat_list[i][0]):
delta_x = (mat_list[i][1] - mat_list[i][0])
else:
delta_x = self.dx
min = mat_list[i][0]
max = mat_list[i][1]
material = mat_list[i][2]
firstl = True
lastl = True
for j in range(math.ceil((max - min) / delta_x)):
help += delta_x
if (i == (len(mat_list) - 1)) and (abs(help - mat_list[i][1]) < eps) and lastl:
dx = dx_h
Rw = dx / 2 / mat_list[-1][2] + delta_x / 2 / mat_list[-1][2]
Re = dx / 2 / mat_list[-1][2] + self.Rse
self.R_e = Re
Ci = mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
self.tau.append(mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t)
## loop check
#print('inside last 1')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(dx)
mesh.append(dx)
break
elif (i == (len(mat_list) - 1)) and (help - mat_list[i][1] > 0) and lastl:
dx = delta_x - (help - mat_list[i][1])
Rw = dx / 2 / mat_list[-1][2] + delta_x / 2 / mat_list[-1][2]
Re = dx / 2 / mat_list[-1][2] + self.Rse
self.R_e = Re
Ci = mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
self.tau.append(mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t)
## loop check
#print('inside last 2')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(dx)
mesh.append(dx)
break
if help == delta_x:
Rw = help / 2 / mat_list[0][2] + self.Rsi
self.R_i = Rw
Re = help / mat_list[0][2]
Ci = mat_list[0][3] * mat_list[0][4] * help / self.delta_t + 1 / Rw + 1 / Re
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[0][3] * mat_list[0][4] * help / self.delta_t)
## loop check
#print('!!inside first')
#print('help: ', help)
## mesh check
#print(delta_x)
mesh.append(delta_x)
dx_h = delta_x
continue
if (help >= mat_list[i][0]) and j == 0 and i > 0:
dx = dx_h
Rw = dx / 2 / mat_list[i-1][2] + delta_x / 2 / mat_list[i][2]
Re = delta_x / mat_list[i][2]
Ci = mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t)
help = mat_list[i][0] + delta_x
## loop check
#print('!!inside b1')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(delta_x)
mesh.append(delta_x)
continue
if (i < (len(mat_list)-1)) and (abs(help - mat_list[i][1]) < eps) and lastl:
dx = mat_list[i][1] - (help - delta_x)
dx_h = dx
Rw = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i][2]
Re = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i+1][2]
Ci = mat_list[i][3] * mat_list[i][4] * dx / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * dx / self.delta_t)
lastl = False
## loop check
#print('!!inside b21')
#print('help: ', help)
#print('dx =', dx)
#print(help, mat_list[i][1])
## mesh check
#print(dx)
mesh.append(dx)
continue
elif (i < (len(mat_list)-1)) and (help - mat_list[i][1] > 0) and lastl:
dx = mat_list[i][1] - (help - delta_x)
dx_h = dx
Rw = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i][2]
Re = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i+1][2]
Ci = mat_list[i][3] * mat_list[i][4] * dx / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * dx / self.delta_t)
lastl = False
## loop check
#print('!!inside b22')
#print('help: ', help)
#print(help, mat_list[i][1])
## mesh check
#print(dx)
mesh.append(dx)
continue
Re = delta_x / mat_list[i][2]
Rw = Re
Ci = mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t)
## mesh check
#print(delta_x)
mesh.append(delta_x)
#self.R = diags([diag_w, diag_c, diag_e], [-1, 0, 1]).toarray()
diag_e.append(0)
self.R = np.array([diag_w, diag_c, diag_e])
self.tau = np.array(self.tau)
return self.R, self.tau, [self.R_i, self.R_e], mesh
def solve_he(self, R_mat, tau, R_bound, initial, indoor, outdoor):
initial = np.array(initial)
indoor = np.array(indoor)
outdoor = np.array(outdoor)
results = []
end = len(indoor)
perc = 0
for i in range(end):
initial_res = np.array(indoor[i])
initial_res = np.append(initial_res, initial)
initial_res = np.append(initial_res, outdoor[i])
results.append(initial_res)
tau2 = tau * initial
tau2[0] = tau2[0] + indoor[i] / R_bound[0]
tau2[-1] = tau2[-1] + outdoor[i] / R_bound[1]
initial = linalg.solve_banded((1, 1), R_mat, tau2)
return results
def q_Q(self, temperatures, mesh):
mat_list = np.copy(self.mat_list)
q = []
for i in range(len(temperatures)):
R = mesh[0] / mat_list[0][2] + self.Rsi
q.append((temperatures[i][1] - temperatures[i][0]) / R)
q = np.array(q)
Q = np.sum(q)
return q, Q
class U_heat_flux:
def __init__(self, layers, Rsi=0.13, Rse=0.04):
self.layers = layers
self.Rsi = Rsi
self.Rse = Rse
def uval(self):
mat_list = []
help = 0
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
mat_list.append([min, max, conductivity])
R = self.Rsi
for i in range(len(mat_list)):
R += (mat_list[i][1] - mat_list[i][0]) / mat_list[i][2]
R += self.Rse
return 1 / R
def q_U(self, U, indoor, outdoor):
mat_list = []
help = 0
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
mat_list.append([min, max, conductivity])
results = []
point = 0
points = [-0.02, point]
for i in range(len(mat_list)):
point += mat_list[i][1]
points.append(point)
points.append(point + 0.02)
for i in range(len(indoor)):
q = - U * (indoor[i] - outdoor[i])
ith_result = [indoor[i]]
next = indoor[i] + q * self.Rsi
ith_result.append(next)
for j in range(len(mat_list)):
Ri = (mat_list[j][1] - mat_list[j][0]) / mat_list[j][2]
next += q * Ri
ith_result.append(next)
ith_result.append(outdoor[i])
results.append(ith_result)
return results, points
def q_Q(self, U, indoor, outdoor):
q = []
for i in range(len(indoor)):
q.append(- U * (indoor[i] - outdoor[i]))
q = np.array(q)
Q = np.sum(q)
return q, Q | calc/htool.py | import numpy as np
import math
from scipy.sparse import diags
from scipy import linalg
import matplotlib.pyplot as plt
import pandas as pd
from sklearn import metrics
import base64
# Material class defines a dictionary conditioning an information
# on material characteristics of each element layer
class Material:
def __init__(self):
self.layers = []
def new_material(self, material_name, cond, rho, c, l):
my_dict = {'material' : material_name,
'conductivity' : cond,
'density' : rho,
'capacity' : c,
'layer_length' : l}
self.layers.append(my_dict)
# Defining Resistance matrix and input vector needed for equation
# solution of system R * T = t, where R is resistance matrix, T is
# solution vector of temperatures (T = t / R) and t is input vector
class Resistance:
def __init__(self, layers, delta_x, delta_t, Rsi=0.13, Rse=0.04):
self.layers = layers
self.R = []
self.tau = []
self.dx = delta_x
self.delta_t = delta_t
self.Rsi = Rsi
self.Rse = Rse
self.mat_list = []
self.l = 0
self.R_i = 0
self.R_e = 0
def resistance_tau(self):
mesh = []
mesh_cumulative = []
help = 0
eps = 1e-5
diag_c = []
diag_w = [0]
diag_e = []
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
density = self.layers[i]['density']
capacity = self.layers[i]['capacity']
self.mat_list.append([min, max, conductivity, density, capacity])
help = max
self.l = max
mat_list = np.copy(self.mat_list)
help = 0
for i in range(len(mat_list)):
if self.dx > (mat_list[i][1] - mat_list[i][0]):
delta_x = (mat_list[i][1] - mat_list[i][0])
else:
delta_x = self.dx
min = mat_list[i][0]
max = mat_list[i][1]
material = mat_list[i][2]
firstl = True
lastl = True
for j in range(math.ceil((max - min) / delta_x)):
help += delta_x
if (i == (len(mat_list) - 1)) and (abs(help - mat_list[i][1]) < eps) and lastl:
dx = dx_h
Rw = dx / 2 / mat_list[-1][2] + delta_x / 2 / mat_list[-1][2]
Re = dx / 2 / mat_list[-1][2] + self.Rse
self.R_e = Re
Ci = mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
self.tau.append(mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t)
## loop check
#print('inside last 1')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(dx)
mesh.append(dx)
break
elif (i == (len(mat_list) - 1)) and (help - mat_list[i][1] > 0) and lastl:
dx = delta_x - (help - mat_list[i][1])
Rw = dx / 2 / mat_list[-1][2] + delta_x / 2 / mat_list[-1][2]
Re = dx / 2 / mat_list[-1][2] + self.Rse
self.R_e = Re
Ci = mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
self.tau.append(mat_list[-1][3] * mat_list[-1][4] * dx / self.delta_t)
## loop check
#print('inside last 2')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(dx)
mesh.append(dx)
break
if help == delta_x:
Rw = help / 2 / mat_list[0][2] + self.Rsi
self.R_i = Rw
Re = help / mat_list[0][2]
Ci = mat_list[0][3] * mat_list[0][4] * help / self.delta_t + 1 / Rw + 1 / Re
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[0][3] * mat_list[0][4] * help / self.delta_t)
## loop check
#print('!!inside first')
#print('help: ', help)
## mesh check
#print(delta_x)
mesh.append(delta_x)
dx_h = delta_x
continue
if (help >= mat_list[i][0]) and j == 0 and i > 0:
dx = dx_h
Rw = dx / 2 / mat_list[i-1][2] + delta_x / 2 / mat_list[i][2]
Re = delta_x / mat_list[i][2]
Ci = mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t + 1 / Rw + 1 / Re
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t)
help = mat_list[i][0] + delta_x
## loop check
#print('!!inside b1')
#print('help: ', help)
#print('dx =', dx, 'delta_x = ', delta_x)
## mesh check
#print(delta_x)
mesh.append(delta_x)
continue
if (i < (len(mat_list)-1)) and (abs(help - mat_list[i][1]) < eps) and lastl:
dx = mat_list[i][1] - (help - delta_x)
dx_h = dx
Rw = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i][2]
Re = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i+1][2]
Ci = mat_list[i][3] * mat_list[i][4] * dx / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * dx / self.delta_t)
lastl = False
## loop check
#print('!!inside b21')
#print('help: ', help)
#print('dx =', dx)
#print(help, mat_list[i][1])
## mesh check
#print(dx)
mesh.append(dx)
continue
elif (i < (len(mat_list)-1)) and (help - mat_list[i][1] > 0) and lastl:
dx = mat_list[i][1] - (help - delta_x)
dx_h = dx
Rw = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i][2]
Re = dx / 2 / mat_list[i][2] + delta_x / 2 / mat_list[i+1][2]
Ci = mat_list[i][3] * mat_list[i][4] * dx / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * dx / self.delta_t)
lastl = False
## loop check
#print('!!inside b22')
#print('help: ', help)
#print(help, mat_list[i][1])
## mesh check
#print(dx)
mesh.append(dx)
continue
Re = delta_x / mat_list[i][2]
Rw = Re
Ci = mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t + 1 / Re + 1 / Rw
diag_w.append(-1 / Rw)
diag_c.append(Ci)
diag_e.append(-1 / Re)
self.tau.append(mat_list[i][3] * mat_list[i][4] * delta_x / self.delta_t)
## mesh check
#print(delta_x)
mesh.append(delta_x)
#self.R = diags([diag_w, diag_c, diag_e], [-1, 0, 1]).toarray()
diag_e.append(0)
self.R = np.array([diag_w, diag_c, diag_e])
self.tau = np.array(self.tau)
return self.R, self.tau, [self.R_i, self.R_e], mesh
def solve_he(self, R_mat, tau, R_bound, initial, indoor, outdoor):
initial = np.array(initial)
indoor = np.array(indoor)
outdoor = np.array(outdoor)
results = []
end = len(indoor)
perc = 0
for i in range(end):
initial_res = np.array(indoor[i])
initial_res = np.append(initial_res, initial)
initial_res = np.append(initial_res, outdoor[i])
results.append(initial_res)
tau2 = tau * initial
tau2[0] = tau2[0] + indoor[i] / R_bound[0]
tau2[-1] = tau2[-1] + outdoor[i] / R_bound[1]
initial = linalg.solve_banded((1, 1), R_mat, tau2)
return results
def q_Q(self, temperatures, mesh):
mat_list = np.copy(self.mat_list)
q = []
for i in range(len(temperatures)):
R = mesh[0] / mat_list[0][2] + self.Rsi
q.append((temperatures[i][1] - temperatures[i][0]) / R)
q = np.array(q)
Q = np.sum(q)
return q, Q
class U_heat_flux:
def __init__(self, layers, Rsi=0.13, Rse=0.04):
self.layers = layers
self.Rsi = Rsi
self.Rse = Rse
def uval(self):
mat_list = []
help = 0
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
mat_list.append([min, max, conductivity])
R = self.Rsi
for i in range(len(mat_list)):
R += (mat_list[i][1] - mat_list[i][0]) / mat_list[i][2]
R += self.Rse
return 1 / R
def q_U(self, U, indoor, outdoor):
mat_list = []
help = 0
for i in range(len(self.layers)):
min = help
max = help + self.layers[i]['layer_length']
conductivity = self.layers[i]['conductivity']
mat_list.append([min, max, conductivity])
results = []
point = 0
points = [-0.02, point]
for i in range(len(mat_list)):
point += mat_list[i][1]
points.append(point)
points.append(point + 0.02)
for i in range(len(indoor)):
q = - U * (indoor[i] - outdoor[i])
ith_result = [indoor[i]]
next = indoor[i] + q * self.Rsi
ith_result.append(next)
for j in range(len(mat_list)):
Ri = (mat_list[j][1] - mat_list[j][0]) / mat_list[j][2]
next += q * Ri
ith_result.append(next)
ith_result.append(outdoor[i])
results.append(ith_result)
return results, points
def q_Q(self, U, indoor, outdoor):
q = []
for i in range(len(indoor)):
q.append(- U * (indoor[i] - outdoor[i]))
q = np.array(q)
Q = np.sum(q)
return q, Q | 0.293202 | 0.342159 |
from __future__ import print_function
import copy
import json
import re
import traceback
import zipfile
import arrow
from passive_data_kit.models import DataPoint
from passive_data_kit_external_data.models import annotate_field
from ..utils import hash_content, encrypt_content, create_engagement_event, queue_batch_insert, include_data
def process_comments(request_identifier, comments_raw): # pylint: disable=too-many-branches
comments = json.loads(comments_raw)
if 'comments' in comments: # pylint: disable=too-many-nested-blocks
for comment in comments['comments']: # pylint: disable=too-many-nested-blocks
comment = copy.deepcopy(comment)
created = arrow.get(comment['timestamp']).datetime
if include_data(request_identifier, created, comment):
if 'title' in comment:
comment['pdk_encrypted_title'] = encrypt_content(comment['title'].encode('utf-8'))
annotate_field(comment, 'title', comment['title'])
del comment['title']
if 'data' in comment:
data = comment['data']
for datum in data:
if 'comment' in datum:
comment_obj = datum['comment']
if 'comment' in comment_obj:
comment_obj['pdk_encrypted_comment'] = encrypt_content(comment_obj['comment'].encode('utf-8'))
annotate_field(comment_obj, 'comment', comment_obj['comment'])
del comment_obj['comment']
if 'author' in comment_obj:
comment_obj['pdk_hashed_author'] = hash_content(comment_obj['author'])
comment_obj['pdk_encrypted_author'] = encrypt_content(comment_obj['author'].encode('utf-8'))
del comment_obj['author']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-comment', request_identifier, comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
if 'comments_v2' in comments: # pylint: disable=too-many-nested-blocks
for comment in comments['comments_v2']: # pylint: disable=too-many-nested-blocks
comment = copy.deepcopy(comment)
created = arrow.get(comment['timestamp']).datetime
if include_data(request_identifier, created, comment):
if 'title' in comment:
comment['pdk_encrypted_title'] = encrypt_content(comment['title'].encode('utf-8'))
annotate_field(comment, 'title', comment['title'])
del comment['title']
if 'data' in comment:
data = comment['data']
for datum in data:
if 'comment' in datum:
comment_obj = datum['comment']
if 'comment' in comment_obj:
comment_obj['pdk_encrypted_comment'] = encrypt_content(comment_obj['comment'].encode('utf-8'))
annotate_field(comment_obj, 'comment', comment_obj['comment'])
del comment_obj['comment']
if 'author' in comment_obj:
comment_obj['pdk_hashed_author'] = hash_content(comment_obj['author'])
comment_obj['pdk_encrypted_author'] = encrypt_content(comment_obj['author'].encode('utf-8'))
del comment_obj['author']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-comment', request_identifier, comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
def process_posts(request_identifier, posts_raw): # pylint: disable=too-many-branches, too-many-statements
posts = json.loads(posts_raw)
source = 'user'
if isinstance(posts, dict):
source = 'others'
if 'wall_posts_sent_to_you' in posts and 'activity_log_data' in posts['wall_posts_sent_to_you']:
posts = posts['wall_posts_sent_to_you']['activity_log_data']
if 'timestamp' in posts:
posts = [posts]
for post in posts: # pylint: disable=too-many-nested-blocks
post = copy.deepcopy(post)
if isinstance(post, dict):
created = arrow.get(post['timestamp']).datetime
if include_data(request_identifier, created, post):
if 'title' in post:
post['pdk_encrypted_title'] = encrypt_content(post['title'].encode('utf-8'))
annotate_field(post, 'title', post['title'])
del post['title']
if 'data' in post:
for datum in post['data']:
if 'post' in datum:
datum['pdk_encrypted_post'] = encrypt_content(datum['post'].encode('utf-8'))
annotate_field(datum, 'post', datum['post'])
del datum['post']
if 'attachments' in post:
for attachment in post['attachments']:
if 'data' in attachment:
for datum in attachment['data']:
if 'event' in datum:
event = datum['event']
if 'name' in event:
event['pdk_encrypted_name'] = encrypt_content(event['name'].encode('utf-8'))
annotate_field(event, 'name', event['name'])
del event['name']
if 'description' in event:
event['pdk_encrypted_description'] = encrypt_content(event['description'].encode('utf-8'))
annotate_field(event, 'description', event['description'])
del event['description']
if 'place' in event:
place_str = json.dumps(event['place'], indent=2)
event['pdk_encrypted_place'] = encrypt_content(place_str.encode('utf-8'))
annotate_field(event, 'place', place_str)
del event['place']
if 'external_context' in datum:
external_context = datum['external_context']
if 'url' in external_context:
external_context['pdk_encrypted_url'] = encrypt_content(external_context['url'].encode('utf-8'))
annotate_field(external_context, 'url', external_context['url'])
del external_context['url']
if 'media' in datum:
media = datum['media']
if 'title' in media:
media['pdk_encrypted_title'] = encrypt_content(media['title'].encode('utf-8'))
annotate_field(media, 'title', media['title'])
del media['title']
if 'description' in media:
media['pdk_encrypted_description'] = encrypt_content(media['description'].encode('utf-8'))
annotate_field(media, 'description', media['description'])
del media['description']
if 'uri' in media:
media['pdk_encrypted_uri'] = encrypt_content(media['uri'].encode('utf-8'))
annotate_field(media, 'uri', media['uri'])
del media['uri']
if 'media_metadata' in media:
metadata_str = json.dumps(media['media_metadata'], indent=2)
media['pdk_encrypted_media_metadata'] = encrypt_content(metadata_str.encode('utf-8'))
del media['media_metadata']
if 'place' in datum:
place_str = json.dumps(datum['place'], indent=2)
datum['pdk_encrypted_place'] = encrypt_content(place_str.encode('utf-8'))
del datum['place']
post['pdk_facebook_source'] = source
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-post', request_identifier, post, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='post', start=created)
def process_viewed(request_identifier, viewed_raw): # pylint: disable=too-many-branches, too-many-statements
metadata = json.loads(viewed_raw)
for thing in metadata['viewed_things']: # pylint: disable=too-many-nested-blocks
if thing['name'] == 'Facebook Watch Videos and Shows':
for child in thing['children']:
if child['name'] == 'Shows':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created)
elif child['name'] == 'Time Viewed':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created, duration=entry['data']['watch_position_seconds'])
elif thing['name'] == 'Facebook Live Videos':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created)
elif thing['name'] == 'Articles':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_share'] = encrypt_content(entry['data']['share'].encode('utf-8'))
entry['data']['pdk_hashed_share'] = hash_content(entry['data']['share'].encode('utf-8'))
del entry['data']['share']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-link', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='link', start=created)
elif thing['name'] == 'Marketplace Interactions':
for child in thing['children']:
if child['name'] == 'Marketplace Items':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-market', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='shopping', start=created)
elif thing['name'] == 'Ads':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
if 'uri' in entry['data']:
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-ad-viewed', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='advertising', start=created)
def process_visited(request_identifier, viewed_raw): # pylint: disable=too-many-branches
metadata = json.loads(viewed_raw)
for thing in metadata['visited_things']:
if thing['name'] == 'Profile visits':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-profile-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='profile', start=created)
elif thing['name'] == 'Page visits':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-page-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='page', start=created)
elif thing['name'] == 'Events visited':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-event-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='event', start=created)
elif thing['name'] == 'Groups visited':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-group-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='group', start=created)
def process_page_reactions(request_identifier, reactions_raw):
reactions = json.loads(reactions_raw)
for reaction in reactions['page_likes']:
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'name' in reaction:
reaction['pdk_encrypted_name'] = encrypt_content(reaction['name'].encode('utf-8'))
annotate_field(reaction, 'name', reaction['name'])
del reaction['name']
reaction['content_type'] = 'page'
reaction['reaction'] = 'like'
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_post_comment_reactions(request_identifier, reactions_raw): # pylint: disable=too-many-branches, too-many-statements
reactions = json.loads(reactions_raw)
if 'reactions' in reactions: # pylint: disable=too-many-nested-blocks
for reaction in reactions['reactions']: # pylint: disable=too-many-nested-blocks
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'title' in reaction:
reaction['pdk_encrypted_title'] = encrypt_content(reaction['title'].encode('utf-8'))
annotate_field(reaction, 'title', reaction['title'])
if '\'s post' in reaction['title']:
reaction['content_type'] = 'post'
elif '\'s comment' in reaction['title']:
reaction['content_type'] = 'comment'
elif '\'s photo' in reaction['title']:
reaction['content_type'] = 'photo'
elif '\'s video' in reaction['title']:
reaction['content_type'] = 'video'
else:
reaction['content_type'] = 'unknown'
del reaction['title']
if 'data' in reaction:
for data_item in reaction['data']:
if 'reaction' in data_item:
data_item['reaction']['reaction'] = data_item['reaction']['reaction'].lower()
if 'actor' in data_item['reaction']:
data_item['reaction']['pdk_encrypted_actor'] = encrypt_content(data_item['reaction']['actor'].encode('utf-8'))
annotate_field(data_item['reaction'], 'actor', data_item['reaction']['actor'])
del data_item['reaction']['actor']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
if 'reactions_v2' in reactions: # pylint: disable=too-many-nested-blocks
for reaction in reactions['reactions_v2']: # pylint: disable=too-many-nested-blocks
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'title' in reaction:
reaction['pdk_encrypted_title'] = encrypt_content(reaction['title'].encode('utf-8'))
annotate_field(reaction, 'title', reaction['title'])
if '\'s post' in reaction['title']:
reaction['content_type'] = 'post'
elif '\'s comment' in reaction['title']:
reaction['content_type'] = 'comment'
elif '\'s photo' in reaction['title']:
reaction['content_type'] = 'photo'
elif '\'s video' in reaction['title']:
reaction['content_type'] = 'video'
else:
reaction['content_type'] = 'unknown'
del reaction['title']
if 'data' in reaction:
for data_item in reaction['data']:
if 'reaction' in data_item:
data_item['reaction']['reaction'] = data_item['reaction']['reaction'].lower()
if 'actor' in data_item['reaction']:
data_item['reaction']['pdk_encrypted_actor'] = encrypt_content(data_item['reaction']['actor'].encode('utf-8'))
annotate_field(data_item['reaction'], 'actor', data_item['reaction']['actor'])
del data_item['reaction']['actor']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_messages(request_identifier, messages_raw, full_names):
messages = json.loads(messages_raw)
for message in messages['messages']:
message = copy.deepcopy(message)
created = None
try:
created = arrow.get(message['timestamp_ms']).datetime
except ValueError:
try:
created = arrow.get(message['timestamp_ms'] / 1000).datetime
except ValueError:
pass
if created is not None and include_data(request_identifier, created, message):
if 'content' in message:
message['pdk_encrypted_content'] = encrypt_content(message['content'].encode('utf-8'))
annotate_field(message, 'content', message['content'])
del message['content']
if 'share' in message:
share = message['share']
for share_key in copy.deepcopy(share):
if share_key == 'link':
share['pdk_encrypted_link'] = encrypt_content(share[share_key].encode('utf-8'))
annotate_field(share, 'link', share[share_key])
del share[share_key]
if message['sender_name'] in full_names:
message['pdk_direction'] = 'outgoing'
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='message', start=created)
else:
message['pdk_direction'] = 'incoming'
create_engagement_event(source='facebook', identifier=request_identifier, incoming_engagement=1.0, engagement_type='message', start=created)
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-message', request_identifier, message, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
def process_search_history(request_identifier, searches_raw):
searches = json.loads(searches_raw)
for search in searches['searches']:
created = None
try:
created = arrow.get(search['timestamp']).datetime
except ValueError:
try:
created = arrow.get(search['timestamp'] / 1000).datetime
except ValueError:
pass
if created is not None and include_data(request_identifier, created, search): # pylint: disable=too-many-nested-blocks
if 'attachments' in search:
for attachment in search['attachments']:
if 'data' in attachment:
for data in attachment['data']:
if 'text' in data:
payload = {
'pdk_encrypted_query': encrypt_content(data['text'].encode('utf-8'))
}
annotate_field(payload, 'query', data['text'])
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='search', start=created)
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-search', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
def import_data(request_identifier, path): # pylint: disable=too-many-branches, too-many-statements
content_bundle = zipfile.ZipFile(path)
full_names = []
for content_file in content_bundle.namelist():
try:
if re.match(r'^messages/inbox/.*\.json', content_file):
if len(full_names) == 0: # pylint: disable=len-as-condition
try:
autofill = json.loads(content_bundle.open('messages/autofill_information.json').read())
full_names.extend(autofill['autofill_information_v2']['FULL_NAME'])
except KeyError:
pass # missing autofill_information.json
process_messages(request_identifier, content_bundle.open(content_file).read(), full_names)
elif content_file.endswith('/'):
pass
elif content_file.lower().endswith('.jpg'):
pass
elif content_file.lower().endswith('.png'):
pass
elif content_file.lower().endswith('.mp4'):
pass
elif content_file.lower().endswith('.gif'):
pass
elif content_file.lower().endswith('.pdf'):
pass
elif content_file.lower().endswith('.webp'):
pass
elif content_file.lower().endswith('.aac'):
pass
elif content_file.lower().endswith('.mp3'):
pass
elif content_file.lower().endswith('.psd'):
pass
elif content_file.lower().endswith('.docx'):
pass
elif content_file.lower().endswith('.otf'):
pass
elif content_file.lower().endswith('.xml'):
pass
elif content_file.lower().endswith('.zip'):
pass
elif content_file.lower().endswith('.rar'):
pass
elif re.match(r'^photos_and_videos\/', content_file):
pass
elif re.match(r'^comments\/.*\.json', content_file):
process_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^comments_and_reactions\/comments.json', content_file):
process_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^posts\/.*\.json', content_file):
process_posts(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^about_you\/viewed.json', content_file):
process_viewed(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^about_you\/visited.json', content_file):
process_visited(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes_and_reactions\/pages.json', content_file):
process_page_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes_and_reactions\/posts_and_comments.json', content_file):
process_post_comment_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^comments_and_reactions\/posts_and_comments.json', content_file):
process_post_comment_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^search_history\/your_search_history.json', content_file):
process_search_history(request_identifier, content_bundle.open(content_file).read())
else:
print('FACEBOOK[' + request_identifier + ']: Unable to process: ' + content_file + ' -- ' + str(content_bundle.getinfo(content_file).file_size))
except: # pylint: disable=bare-except
traceback.print_exc()
return False
return True
def external_data_metadata(generator_identifier, point):
if generator_identifier.startswith('pdk-external-facebook') is False:
return None
metadata = {}
metadata['service'] = 'Facebook'
metadata['event'] = generator_identifier
if generator_identifier == 'pdk-external-facebook-comment':
metadata['event'] = 'Upload Comment'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Text'
elif generator_identifier == 'pdk-external-facebook-post':
metadata['event'] = 'Upload Post'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Text'
properties = point.fetch_properties()
if 'pdk_encrypted_url' in properties:
metadata['media_type'] = 'Link'
if 'pdk_encrypted_media_metadata' in properties:
metadata['media_type'] = 'Multimedia'
if 'pdk_encrypted_place' in properties:
metadata['media_type'] = 'Location'
return metadata
def update_data_type_definition(definition): # pylint: disable=too-many-statements
if 'pdk-external-facebook-post' in definition['passive-data-metadata.generator-id']['observed']:
del definition['attachments']
if 'pdk_encrypted_title' in definition:
definition['pdk_encrypted_title']['is_freetext'] = True
definition['pdk_encrypted_title']['pdk_variable_name'] = 'Encrypted post title'
definition['pdk_encrypted_title']['pdk_variable_description'] = 'Encrypted title of the original post, saved for use later (with proper authorizations and keys).'
definition['pdk_encrypted_title']['pdk_codebook_order'] = 0
definition['pdk_encrypted_title']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'data[].pdk_encrypted_post' in definition:
definition['data[].pdk_encrypted_post']['is_freetext'] = True
definition['data[].pdk_encrypted_post']['pdk_variable_name'] = 'Encrypted post contents'
definition['data[].pdk_encrypted_post']['pdk_variable_description'] = 'Encrypted contents of the original post, saved for use later (with proper authorizations and keys).'
definition['data[].pdk_encrypted_post']['pdk_codebook_order'] = 1
definition['data[].pdk_encrypted_post']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_uri' in definition:
definition['attachments[].data[].media.pdk_encrypted_uri']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_variable_name'] = 'Encrypted remote content URI'
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_variable_description'] = 'Encrypted contents of the original post media URI, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_codebook_order'] = 2
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_description' in definition:
definition['attachments[].data[].media.pdk_encrypted_description']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_variable_name'] = 'Encrypted media description'
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_variable_description'] = 'Encrypted description of media item attached to the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_codebook_order'] = 3
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_media_metadata' in definition:
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_variable_name'] = 'Encrypted media metadata'
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_variable_description'] = 'Encrypted metadata of media item attached to the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_codebook_order'] = 4
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].external_context.pdk_encrypted_url' in definition:
definition['attachments[].data[].external_context.pdk_encrypted_url']['is_freetext'] = True
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_variable_name'] = 'Encrypted URL'
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_variable_description'] = 'Encrypted contents of the original URL shared in the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_codebook_order'] = 5
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_title' in definition:
definition['attachments[].data[].media.pdk_encrypted_title']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_variable_name'] = 'Encrypted remote content title'
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_variable_description'] = 'Encrypted contents of the original post media title, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_codebook_order'] = 6
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].pdk_encrypted_place' in definition:
definition['attachments[].data[].pdk_encrypted_place']['is_freetext'] = True
definition['attachments[].data[].pdk_encrypted_place']['pdk_variable_name'] = 'Encrypted place name'
definition['attachments[].data[].pdk_encrypted_place']['pdk_variable_description'] = 'Encrypted name of the place tagged on the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].pdk_encrypted_place']['pdk_codebook_order'] = 7
definition['attachments[].data[].pdk_encrypted_place']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post' | importers/facebook.py |
from __future__ import print_function
import copy
import json
import re
import traceback
import zipfile
import arrow
from passive_data_kit.models import DataPoint
from passive_data_kit_external_data.models import annotate_field
from ..utils import hash_content, encrypt_content, create_engagement_event, queue_batch_insert, include_data
def process_comments(request_identifier, comments_raw): # pylint: disable=too-many-branches
comments = json.loads(comments_raw)
if 'comments' in comments: # pylint: disable=too-many-nested-blocks
for comment in comments['comments']: # pylint: disable=too-many-nested-blocks
comment = copy.deepcopy(comment)
created = arrow.get(comment['timestamp']).datetime
if include_data(request_identifier, created, comment):
if 'title' in comment:
comment['pdk_encrypted_title'] = encrypt_content(comment['title'].encode('utf-8'))
annotate_field(comment, 'title', comment['title'])
del comment['title']
if 'data' in comment:
data = comment['data']
for datum in data:
if 'comment' in datum:
comment_obj = datum['comment']
if 'comment' in comment_obj:
comment_obj['pdk_encrypted_comment'] = encrypt_content(comment_obj['comment'].encode('utf-8'))
annotate_field(comment_obj, 'comment', comment_obj['comment'])
del comment_obj['comment']
if 'author' in comment_obj:
comment_obj['pdk_hashed_author'] = hash_content(comment_obj['author'])
comment_obj['pdk_encrypted_author'] = encrypt_content(comment_obj['author'].encode('utf-8'))
del comment_obj['author']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-comment', request_identifier, comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
if 'comments_v2' in comments: # pylint: disable=too-many-nested-blocks
for comment in comments['comments_v2']: # pylint: disable=too-many-nested-blocks
comment = copy.deepcopy(comment)
created = arrow.get(comment['timestamp']).datetime
if include_data(request_identifier, created, comment):
if 'title' in comment:
comment['pdk_encrypted_title'] = encrypt_content(comment['title'].encode('utf-8'))
annotate_field(comment, 'title', comment['title'])
del comment['title']
if 'data' in comment:
data = comment['data']
for datum in data:
if 'comment' in datum:
comment_obj = datum['comment']
if 'comment' in comment_obj:
comment_obj['pdk_encrypted_comment'] = encrypt_content(comment_obj['comment'].encode('utf-8'))
annotate_field(comment_obj, 'comment', comment_obj['comment'])
del comment_obj['comment']
if 'author' in comment_obj:
comment_obj['pdk_hashed_author'] = hash_content(comment_obj['author'])
comment_obj['pdk_encrypted_author'] = encrypt_content(comment_obj['author'].encode('utf-8'))
del comment_obj['author']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-comment', request_identifier, comment, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='comment', start=created)
def process_posts(request_identifier, posts_raw): # pylint: disable=too-many-branches, too-many-statements
posts = json.loads(posts_raw)
source = 'user'
if isinstance(posts, dict):
source = 'others'
if 'wall_posts_sent_to_you' in posts and 'activity_log_data' in posts['wall_posts_sent_to_you']:
posts = posts['wall_posts_sent_to_you']['activity_log_data']
if 'timestamp' in posts:
posts = [posts]
for post in posts: # pylint: disable=too-many-nested-blocks
post = copy.deepcopy(post)
if isinstance(post, dict):
created = arrow.get(post['timestamp']).datetime
if include_data(request_identifier, created, post):
if 'title' in post:
post['pdk_encrypted_title'] = encrypt_content(post['title'].encode('utf-8'))
annotate_field(post, 'title', post['title'])
del post['title']
if 'data' in post:
for datum in post['data']:
if 'post' in datum:
datum['pdk_encrypted_post'] = encrypt_content(datum['post'].encode('utf-8'))
annotate_field(datum, 'post', datum['post'])
del datum['post']
if 'attachments' in post:
for attachment in post['attachments']:
if 'data' in attachment:
for datum in attachment['data']:
if 'event' in datum:
event = datum['event']
if 'name' in event:
event['pdk_encrypted_name'] = encrypt_content(event['name'].encode('utf-8'))
annotate_field(event, 'name', event['name'])
del event['name']
if 'description' in event:
event['pdk_encrypted_description'] = encrypt_content(event['description'].encode('utf-8'))
annotate_field(event, 'description', event['description'])
del event['description']
if 'place' in event:
place_str = json.dumps(event['place'], indent=2)
event['pdk_encrypted_place'] = encrypt_content(place_str.encode('utf-8'))
annotate_field(event, 'place', place_str)
del event['place']
if 'external_context' in datum:
external_context = datum['external_context']
if 'url' in external_context:
external_context['pdk_encrypted_url'] = encrypt_content(external_context['url'].encode('utf-8'))
annotate_field(external_context, 'url', external_context['url'])
del external_context['url']
if 'media' in datum:
media = datum['media']
if 'title' in media:
media['pdk_encrypted_title'] = encrypt_content(media['title'].encode('utf-8'))
annotate_field(media, 'title', media['title'])
del media['title']
if 'description' in media:
media['pdk_encrypted_description'] = encrypt_content(media['description'].encode('utf-8'))
annotate_field(media, 'description', media['description'])
del media['description']
if 'uri' in media:
media['pdk_encrypted_uri'] = encrypt_content(media['uri'].encode('utf-8'))
annotate_field(media, 'uri', media['uri'])
del media['uri']
if 'media_metadata' in media:
metadata_str = json.dumps(media['media_metadata'], indent=2)
media['pdk_encrypted_media_metadata'] = encrypt_content(metadata_str.encode('utf-8'))
del media['media_metadata']
if 'place' in datum:
place_str = json.dumps(datum['place'], indent=2)
datum['pdk_encrypted_place'] = encrypt_content(place_str.encode('utf-8'))
del datum['place']
post['pdk_facebook_source'] = source
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-post', request_identifier, post, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='post', start=created)
def process_viewed(request_identifier, viewed_raw): # pylint: disable=too-many-branches, too-many-statements
metadata = json.loads(viewed_raw)
for thing in metadata['viewed_things']: # pylint: disable=too-many-nested-blocks
if thing['name'] == 'Facebook Watch Videos and Shows':
for child in thing['children']:
if child['name'] == 'Shows':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created)
elif child['name'] == 'Time Viewed':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created, duration=entry['data']['watch_position_seconds'])
elif thing['name'] == 'Facebook Live Videos':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-watch', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='video', start=created)
elif thing['name'] == 'Articles':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_share'] = encrypt_content(entry['data']['share'].encode('utf-8'))
entry['data']['pdk_hashed_share'] = hash_content(entry['data']['share'].encode('utf-8'))
del entry['data']['share']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-link', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='link', start=created)
elif thing['name'] == 'Marketplace Interactions':
for child in thing['children']:
if child['name'] == 'Marketplace Items':
for entry in child['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-market', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='shopping', start=created)
elif thing['name'] == 'Ads':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
if 'uri' in entry['data']:
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-ad-viewed', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='advertising', start=created)
def process_visited(request_identifier, viewed_raw): # pylint: disable=too-many-branches
metadata = json.loads(viewed_raw)
for thing in metadata['visited_things']:
if thing['name'] == 'Profile visits':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-profile-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='profile', start=created)
elif thing['name'] == 'Page visits':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-page-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='page', start=created)
elif thing['name'] == 'Events visited':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-event-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='event', start=created)
elif thing['name'] == 'Groups visited':
for entry in thing['entries']:
created = arrow.get(entry['timestamp']).datetime
if include_data(request_identifier, created, entry):
entry['data']['pdk_encrypted_uri'] = encrypt_content(entry['data']['uri'].encode('utf-8'))
entry['data']['pdk_hashed_uri'] = hash_content(entry['data']['uri'].encode('utf-8'))
del entry['data']['uri']
entry['data']['pdk_encrypted_name'] = encrypt_content(entry['data']['name'].encode('utf-8'))
entry['data']['pdk_hashed_name'] = hash_content(entry['data']['name'].encode('utf-8'))
annotate_field(entry, 'name', entry['data']['name'])
del entry['data']['name']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-group-visit', request_identifier, entry, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.0, engagement_type='group', start=created)
def process_page_reactions(request_identifier, reactions_raw):
reactions = json.loads(reactions_raw)
for reaction in reactions['page_likes']:
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'name' in reaction:
reaction['pdk_encrypted_name'] = encrypt_content(reaction['name'].encode('utf-8'))
annotate_field(reaction, 'name', reaction['name'])
del reaction['name']
reaction['content_type'] = 'page'
reaction['reaction'] = 'like'
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_post_comment_reactions(request_identifier, reactions_raw): # pylint: disable=too-many-branches, too-many-statements
reactions = json.loads(reactions_raw)
if 'reactions' in reactions: # pylint: disable=too-many-nested-blocks
for reaction in reactions['reactions']: # pylint: disable=too-many-nested-blocks
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'title' in reaction:
reaction['pdk_encrypted_title'] = encrypt_content(reaction['title'].encode('utf-8'))
annotate_field(reaction, 'title', reaction['title'])
if '\'s post' in reaction['title']:
reaction['content_type'] = 'post'
elif '\'s comment' in reaction['title']:
reaction['content_type'] = 'comment'
elif '\'s photo' in reaction['title']:
reaction['content_type'] = 'photo'
elif '\'s video' in reaction['title']:
reaction['content_type'] = 'video'
else:
reaction['content_type'] = 'unknown'
del reaction['title']
if 'data' in reaction:
for data_item in reaction['data']:
if 'reaction' in data_item:
data_item['reaction']['reaction'] = data_item['reaction']['reaction'].lower()
if 'actor' in data_item['reaction']:
data_item['reaction']['pdk_encrypted_actor'] = encrypt_content(data_item['reaction']['actor'].encode('utf-8'))
annotate_field(data_item['reaction'], 'actor', data_item['reaction']['actor'])
del data_item['reaction']['actor']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
if 'reactions_v2' in reactions: # pylint: disable=too-many-nested-blocks
for reaction in reactions['reactions_v2']: # pylint: disable=too-many-nested-blocks
created = arrow.get(reaction['timestamp']).datetime
if include_data(request_identifier, created, reaction):
if 'title' in reaction:
reaction['pdk_encrypted_title'] = encrypt_content(reaction['title'].encode('utf-8'))
annotate_field(reaction, 'title', reaction['title'])
if '\'s post' in reaction['title']:
reaction['content_type'] = 'post'
elif '\'s comment' in reaction['title']:
reaction['content_type'] = 'comment'
elif '\'s photo' in reaction['title']:
reaction['content_type'] = 'photo'
elif '\'s video' in reaction['title']:
reaction['content_type'] = 'video'
else:
reaction['content_type'] = 'unknown'
del reaction['title']
if 'data' in reaction:
for data_item in reaction['data']:
if 'reaction' in data_item:
data_item['reaction']['reaction'] = data_item['reaction']['reaction'].lower()
if 'actor' in data_item['reaction']:
data_item['reaction']['pdk_encrypted_actor'] = encrypt_content(data_item['reaction']['actor'].encode('utf-8'))
annotate_field(data_item['reaction'], 'actor', data_item['reaction']['actor'])
del data_item['reaction']['actor']
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-reaction', request_identifier, reaction, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='reaction', start=created)
def process_messages(request_identifier, messages_raw, full_names):
messages = json.loads(messages_raw)
for message in messages['messages']:
message = copy.deepcopy(message)
created = None
try:
created = arrow.get(message['timestamp_ms']).datetime
except ValueError:
try:
created = arrow.get(message['timestamp_ms'] / 1000).datetime
except ValueError:
pass
if created is not None and include_data(request_identifier, created, message):
if 'content' in message:
message['pdk_encrypted_content'] = encrypt_content(message['content'].encode('utf-8'))
annotate_field(message, 'content', message['content'])
del message['content']
if 'share' in message:
share = message['share']
for share_key in copy.deepcopy(share):
if share_key == 'link':
share['pdk_encrypted_link'] = encrypt_content(share[share_key].encode('utf-8'))
annotate_field(share, 'link', share[share_key])
del share[share_key]
if message['sender_name'] in full_names:
message['pdk_direction'] = 'outgoing'
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=1.0, engagement_type='message', start=created)
else:
message['pdk_direction'] = 'incoming'
create_engagement_event(source='facebook', identifier=request_identifier, incoming_engagement=1.0, engagement_type='message', start=created)
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-message', request_identifier, message, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
def process_search_history(request_identifier, searches_raw):
searches = json.loads(searches_raw)
for search in searches['searches']:
created = None
try:
created = arrow.get(search['timestamp']).datetime
except ValueError:
try:
created = arrow.get(search['timestamp'] / 1000).datetime
except ValueError:
pass
if created is not None and include_data(request_identifier, created, search): # pylint: disable=too-many-nested-blocks
if 'attachments' in search:
for attachment in search['attachments']:
if 'data' in attachment:
for data in attachment['data']:
if 'text' in data:
payload = {
'pdk_encrypted_query': encrypt_content(data['text'].encode('utf-8'))
}
annotate_field(payload, 'query', data['text'])
create_engagement_event(source='facebook', identifier=request_identifier, outgoing_engagement=0.5, engagement_type='search', start=created)
queue_batch_insert(DataPoint.objects.create_data_point('pdk-external-facebook-search', request_identifier, payload, user_agent='Passive Data Kit External Importer', created=created, skip_save=True, skip_extract_secondary_identifier=True))
def import_data(request_identifier, path): # pylint: disable=too-many-branches, too-many-statements
content_bundle = zipfile.ZipFile(path)
full_names = []
for content_file in content_bundle.namelist():
try:
if re.match(r'^messages/inbox/.*\.json', content_file):
if len(full_names) == 0: # pylint: disable=len-as-condition
try:
autofill = json.loads(content_bundle.open('messages/autofill_information.json').read())
full_names.extend(autofill['autofill_information_v2']['FULL_NAME'])
except KeyError:
pass # missing autofill_information.json
process_messages(request_identifier, content_bundle.open(content_file).read(), full_names)
elif content_file.endswith('/'):
pass
elif content_file.lower().endswith('.jpg'):
pass
elif content_file.lower().endswith('.png'):
pass
elif content_file.lower().endswith('.mp4'):
pass
elif content_file.lower().endswith('.gif'):
pass
elif content_file.lower().endswith('.pdf'):
pass
elif content_file.lower().endswith('.webp'):
pass
elif content_file.lower().endswith('.aac'):
pass
elif content_file.lower().endswith('.mp3'):
pass
elif content_file.lower().endswith('.psd'):
pass
elif content_file.lower().endswith('.docx'):
pass
elif content_file.lower().endswith('.otf'):
pass
elif content_file.lower().endswith('.xml'):
pass
elif content_file.lower().endswith('.zip'):
pass
elif content_file.lower().endswith('.rar'):
pass
elif re.match(r'^photos_and_videos\/', content_file):
pass
elif re.match(r'^comments\/.*\.json', content_file):
process_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^comments_and_reactions\/comments.json', content_file):
process_comments(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^posts\/.*\.json', content_file):
process_posts(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^about_you\/viewed.json', content_file):
process_viewed(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^about_you\/visited.json', content_file):
process_visited(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes_and_reactions\/pages.json', content_file):
process_page_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^likes_and_reactions\/posts_and_comments.json', content_file):
process_post_comment_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^comments_and_reactions\/posts_and_comments.json', content_file):
process_post_comment_reactions(request_identifier, content_bundle.open(content_file).read())
elif re.match(r'^search_history\/your_search_history.json', content_file):
process_search_history(request_identifier, content_bundle.open(content_file).read())
else:
print('FACEBOOK[' + request_identifier + ']: Unable to process: ' + content_file + ' -- ' + str(content_bundle.getinfo(content_file).file_size))
except: # pylint: disable=bare-except
traceback.print_exc()
return False
return True
def external_data_metadata(generator_identifier, point):
if generator_identifier.startswith('pdk-external-facebook') is False:
return None
metadata = {}
metadata['service'] = 'Facebook'
metadata['event'] = generator_identifier
if generator_identifier == 'pdk-external-facebook-comment':
metadata['event'] = 'Upload Comment'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Text'
elif generator_identifier == 'pdk-external-facebook-post':
metadata['event'] = 'Upload Post'
metadata['direction'] = 'Outgoing'
metadata['media_type'] = 'Text'
properties = point.fetch_properties()
if 'pdk_encrypted_url' in properties:
metadata['media_type'] = 'Link'
if 'pdk_encrypted_media_metadata' in properties:
metadata['media_type'] = 'Multimedia'
if 'pdk_encrypted_place' in properties:
metadata['media_type'] = 'Location'
return metadata
def update_data_type_definition(definition): # pylint: disable=too-many-statements
if 'pdk-external-facebook-post' in definition['passive-data-metadata.generator-id']['observed']:
del definition['attachments']
if 'pdk_encrypted_title' in definition:
definition['pdk_encrypted_title']['is_freetext'] = True
definition['pdk_encrypted_title']['pdk_variable_name'] = 'Encrypted post title'
definition['pdk_encrypted_title']['pdk_variable_description'] = 'Encrypted title of the original post, saved for use later (with proper authorizations and keys).'
definition['pdk_encrypted_title']['pdk_codebook_order'] = 0
definition['pdk_encrypted_title']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'data[].pdk_encrypted_post' in definition:
definition['data[].pdk_encrypted_post']['is_freetext'] = True
definition['data[].pdk_encrypted_post']['pdk_variable_name'] = 'Encrypted post contents'
definition['data[].pdk_encrypted_post']['pdk_variable_description'] = 'Encrypted contents of the original post, saved for use later (with proper authorizations and keys).'
definition['data[].pdk_encrypted_post']['pdk_codebook_order'] = 1
definition['data[].pdk_encrypted_post']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_uri' in definition:
definition['attachments[].data[].media.pdk_encrypted_uri']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_variable_name'] = 'Encrypted remote content URI'
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_variable_description'] = 'Encrypted contents of the original post media URI, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_codebook_order'] = 2
definition['attachments[].data[].media.pdk_encrypted_uri']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_description' in definition:
definition['attachments[].data[].media.pdk_encrypted_description']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_variable_name'] = 'Encrypted media description'
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_variable_description'] = 'Encrypted description of media item attached to the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_codebook_order'] = 3
definition['attachments[].data[].media.pdk_encrypted_description']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_media_metadata' in definition:
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_variable_name'] = 'Encrypted media metadata'
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_variable_description'] = 'Encrypted metadata of media item attached to the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_codebook_order'] = 4
definition['attachments[].data[].media.pdk_encrypted_media_metadata']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].external_context.pdk_encrypted_url' in definition:
definition['attachments[].data[].external_context.pdk_encrypted_url']['is_freetext'] = True
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_variable_name'] = 'Encrypted URL'
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_variable_description'] = 'Encrypted contents of the original URL shared in the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_codebook_order'] = 5
definition['attachments[].data[].external_context.pdk_encrypted_url']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].media.pdk_encrypted_title' in definition:
definition['attachments[].data[].media.pdk_encrypted_title']['is_freetext'] = True
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_variable_name'] = 'Encrypted remote content title'
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_variable_description'] = 'Encrypted contents of the original post media title, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_codebook_order'] = 6
definition['attachments[].data[].media.pdk_encrypted_title']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post'
if 'attachments[].data[].pdk_encrypted_place' in definition:
definition['attachments[].data[].pdk_encrypted_place']['is_freetext'] = True
definition['attachments[].data[].pdk_encrypted_place']['pdk_variable_name'] = 'Encrypted place name'
definition['attachments[].data[].pdk_encrypted_place']['pdk_variable_description'] = 'Encrypted name of the place tagged on the post, saved for use later (with proper authorizations and keys).'
definition['attachments[].data[].pdk_encrypted_place']['pdk_codebook_order'] = 7
definition['attachments[].data[].pdk_encrypted_place']['pdk_codebook_group'] = 'Passive Data Kit: External Data: Facebook Post' | 0.38827 | 0.070144 |
import pytest
import pgdb
from test_02_submit_rider import generic_rider_insert
from test_03_submit_driver import generic_driver_insert
from test_04_matches import getMatcherActivityStats, getMatchRecord
@pytest.fixture
def pgdbConnMatchEngine(dbhost, db, matchengineuser):
return pgdb.connect(dbhost + ':' + db + ':' + matchengineuser)
@pytest.fixture
def pgdbConnAdmin(dbhost, db, adminuser):
return pgdb.connect(dbhost + ':' + db + ':' + adminuser)
@pytest.fixture
def pgdbConnWeb(dbhost, db, frontenduser):
return pgdb.connect(dbhost + ':' + db + ':' + frontenduser)
def cleanup(pgdbConnAdmin):
cursor=pgdbConnAdmin.cursor()
cursor.execute("DELETE FROM carpoolvote.match")
cursor.execute("DELETE FROM carpoolvote.match_engine_activity_log")
cursor.execute("DELETE FROM carpoolvote.outgoing_email")
cursor.execute("DELETE FROM carpoolvote.outgoing_sms")
cursor.execute("DELETE FROM carpoolvote.rider")
cursor.execute("DELETE FROM carpoolvote.driver")
cursor.execute("DELETE FROM carpoolvote.helper")
pgdbConnAdmin.commit()
def test_user_actions_001_driver_cancels_drive_offer_input_val(pgdbConnAdmin, pgdbConnWeb):
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : '12345', 'confparam' : '12346'})
results = cursor.fetchone()
assert results[0] == 2
assert len(results[1]) > 0
def test_user_actions_002_driver_cancels_drive_offer_email_only(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert drive offer
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 0
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 0
# 6. Cancel it again
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
def test_user_actions_003_driver_cancels_drive_offer_email_sms(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert drive offer
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'SMS',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
def test_user_actions_004_rider_cancels_ride_request_input_val(pgdbConnAdmin, pgdbConnWeb):
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : '12345', 'confparam' : '12346'})
results = cursor.fetchone()
assert results[0] == 2
assert len(results[1]) > 0
def test_user_actions_005_rider_cancels_ride_request_email_only(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert ride request
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 0
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 0
# 6. Cancel it again
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# now test cancellation on confirmed match
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Cannot match an already matched record
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
# Cancel the ride request
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_rider, 'confparam' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Pending'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
pgdbConnMatchEngine.commit()
def test_user_actions_006_rider_cancels_ride_request_email_sms(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert ride request
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'SMS',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
def test_user_actions_007_driver_confirm_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Cannot match an already matched record
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
def test_user_actions_008_driver_cancels_confirmed_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
rider_args2 = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName2',
'RiderLastName' : 'RiderLastName2',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
results = generic_rider_insert(pgdbConnWeb, rider_args2)
uuid_rider2=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider2)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
driver_args2 = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName2',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
results = generic_driver_insert(pgdbConnWeb, driver_args2)
uuid_driver2=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver2)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==4
assert match_stats['proposed_count']==4
pgdbConnMatchEngine.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# Match is confirmed.
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
# Cannot confirm and already confirmed match
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
pgdbConnWeb.commit()
# A 2nd Driver is not able to confirm an already confirmed ride request
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver2, 'uuid_rider' : uuid_rider, 'confirm' : driver_args2['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) > 0
assert results[0] == 2
pgdbConnWeb.commit()
# Same driver confirms 2nd rider
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider2, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# Match is confirmed (driver has 2 confirmed matches)
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Now driver cancels one match
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_confirmed_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider2, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
# Driver2 cancels
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver2, 'confparam' : driver_args2['DriverPhone']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'Pending'
# Driver1 cancels
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverPhone']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Pending'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'Pending'
def test_user_actions_009_rider_cancels_confirmed_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
# Cannot confirm an already confirmed match
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
pgdbConnWeb.commit()
# Match is confirmed.
# Now rider cancels the match
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_confirmed_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
pgdbConnMatchEngine.commit()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Pending'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Pending'
# The rider cancels ride request
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_rider, 'confparam' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# The driver cancels drive offer
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# check for issue #123
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled' | db/test/test_05_user_actions.py | import pytest
import pgdb
from test_02_submit_rider import generic_rider_insert
from test_03_submit_driver import generic_driver_insert
from test_04_matches import getMatcherActivityStats, getMatchRecord
@pytest.fixture
def pgdbConnMatchEngine(dbhost, db, matchengineuser):
return pgdb.connect(dbhost + ':' + db + ':' + matchengineuser)
@pytest.fixture
def pgdbConnAdmin(dbhost, db, adminuser):
return pgdb.connect(dbhost + ':' + db + ':' + adminuser)
@pytest.fixture
def pgdbConnWeb(dbhost, db, frontenduser):
return pgdb.connect(dbhost + ':' + db + ':' + frontenduser)
def cleanup(pgdbConnAdmin):
cursor=pgdbConnAdmin.cursor()
cursor.execute("DELETE FROM carpoolvote.match")
cursor.execute("DELETE FROM carpoolvote.match_engine_activity_log")
cursor.execute("DELETE FROM carpoolvote.outgoing_email")
cursor.execute("DELETE FROM carpoolvote.outgoing_sms")
cursor.execute("DELETE FROM carpoolvote.rider")
cursor.execute("DELETE FROM carpoolvote.driver")
cursor.execute("DELETE FROM carpoolvote.helper")
pgdbConnAdmin.commit()
def test_user_actions_001_driver_cancels_drive_offer_input_val(pgdbConnAdmin, pgdbConnWeb):
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : '12345', 'confparam' : '12346'})
results = cursor.fetchone()
assert results[0] == 2
assert len(results[1]) > 0
def test_user_actions_002_driver_cancels_drive_offer_email_only(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert drive offer
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 0
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 0
# 6. Cancel it again
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
def test_user_actions_003_driver_cancels_drive_offer_email_sms(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert drive offer
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'SMS',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 1
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 2
def test_user_actions_004_rider_cancels_ride_request_input_val(pgdbConnAdmin, pgdbConnWeb):
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : '12345', 'confparam' : '12346'})
results = cursor.fetchone()
assert results[0] == 2
assert len(results[1]) > 0
def test_user_actions_005_rider_cancels_ride_request_email_only(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert ride request
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 0
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 0
# 6. Cancel it again
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# now test cancellation on confirmed match
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Cannot match an already matched record
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
# Cancel the ride request
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_rider, 'confparam' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 7. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver where "UUID"=%(uuid)s """,
{'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Pending'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
pgdbConnMatchEngine.commit()
def test_user_actions_006_rider_cancels_ride_request_email_sms(pgdbConnAdmin, pgdbConnWeb):
cleanup(pgdbConnAdmin)
# 1. insert ride request
args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00|2019-10-01T02:00/2019-10-01T03:00',
'TotalPartySize' : '10',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'SMS',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, args)
uuid=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(error_text)==0
assert error_code==0
assert len(uuid)>0
pgdbConnWeb.commit()
# 2. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 1
# 3. Cancel it
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid, 'confparam' : args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# 4. check the status
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT status FROM carpoolvote.rider where "UUID"=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# 5. Check the number of email and sms notification
cursor = pgdbConnAdmin.cursor()
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_email WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
cursor.execute("""SELECT COUNT(*) FROM carpoolvote.outgoing_sms WHERE uuid=%(uuid)s """,
{'uuid' : uuid})
results = cursor.fetchone()
assert results[0] == 2
def test_user_actions_007_driver_confirm_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Cannot match an already matched record
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
def test_user_actions_008_driver_cancels_confirmed_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
rider_args2 = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName2',
'RiderLastName' : 'RiderLastName2',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
results = generic_rider_insert(pgdbConnWeb, rider_args2)
uuid_rider2=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider2)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
driver_args2 = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName2',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
results = generic_driver_insert(pgdbConnWeb, driver_args2)
uuid_driver2=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver2)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==4
assert match_stats['proposed_count']==4
pgdbConnMatchEngine.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# Match is confirmed.
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
# Cannot confirm and already confirmed match
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
pgdbConnWeb.commit()
# A 2nd Driver is not able to confirm an already confirmed ride request
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver2, 'uuid_rider' : uuid_rider, 'confirm' : driver_args2['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) > 0
assert results[0] == 2
pgdbConnWeb.commit()
# Same driver confirms 2nd rider
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider2, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# Match is confirmed (driver has 2 confirmed matches)
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
# Now driver cancels one match
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_confirmed_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider2, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'MatchProposed'
# Driver2 cancels
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver2, 'confparam' : driver_args2['DriverPhone']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'MatchConfirmed'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'Pending'
# Driver1 cancels
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverPhone']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver2)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver)
assert match_record['status'] == 'Canceled'
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider2, uuid_driver2)
assert match_record['status'] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver2})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Pending'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider2})
results = cursor.fetchone()
assert results[0] == 'Pending'
def test_user_actions_009_rider_cancels_confirmed_match(pgdbConnAdmin, pgdbConnMatchEngine, pgdbConnWeb):
cleanup(pgdbConnAdmin)
rider_args = {
'IPAddress' : '127.0.0.1',
'RiderFirstName' : 'RiderFirstName',
'RiderLastName' : 'RiderLastName',
'RiderEmail' : '<EMAIL>',
'RiderPhone' : '555-555-5555',
'RiderCollectionZIP' : '90210',
'RiderDropOffZIP' : '90210',
'AvailableRideTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'TotalPartySize' : '1',
'TwoWayTripNeeded' : 'True',
'RiderIsVulnerable' : 'True',
'RiderWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'NeedWheelchair' : 'True',
'RiderPreferredContact' : 'Email',
'RiderAccommodationNotes' : 'I am picky',
'RiderLegalConsent' : 'True',
'RiderWillBeSafe' : 'True',
'RiderCollectionAddress' : 'at home',
'RiderDestinationAddress' : 'at the polls'
}
results = generic_rider_insert(pgdbConnWeb, rider_args)
uuid_rider=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_rider)>0
driver_args = {
'IPAddress' : '127.0.0.1',
'DriverCollectionZIP' : '90210',
'DriverCollectionRadius' : '10',
'AvailableDriveTimesLocal' : '2018-10-01T02:00/2018-10-01T03:00',
'DriverCanLoadRiderWithWheelchair' : 'True',
'SeatCount' : '1',
'DriverLicenseNumber' : '',
'DriverFirstName' : 'DriverFirstName',
'DriverLastName' : 'DriverLastName',
'DriverEmail' : '<EMAIL>',
'DriverPhone' : '666-666-6666',
'DrivingOnBehalfOfOrganization' : 'True',
'DrivingOBOOrganizationName' : 'Good Org',
'RidersCanSeeDriverDetails' : 'True',
'DriverWillNotTalkPolitics' : 'True',
'PleaseStayInTouch' : 'True',
'DriverPreferredContact' : 'Email',
'DriverWillTakeCare' : 'True'
}
results = generic_driver_insert(pgdbConnWeb, driver_args)
uuid_driver=results['uuid']
error_code=results['error_code']
error_text=results['error_text']
assert len(uuid_driver)>0
pgdbConnWeb.commit()
cursor = pgdbConnMatchEngine.cursor()
cursor.execute("SELECT * FROM carpoolvote.perform_match()")
match_stats = getMatcherActivityStats(pgdbConnMatchEngine)
assert match_stats['error_count']==0
assert match_stats['expired_count']==0
assert match_stats['evaluated_pairs']==1
assert match_stats['proposed_count']==1
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchProposed'
pgdbConnMatchEngine.commit()
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'MatchConfirmed'
pgdbConnMatchEngine.commit()
# Cannot confirm an already confirmed match
cursor.execute("SELECT * FROM carpoolvote.driver_confirm_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1])> 0
assert results[0] == 2
pgdbConnWeb.commit()
# Match is confirmed.
# Now rider cancels the match
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_confirmed_match(%(uuid_driver)s, %(uuid_rider)s, %(confirm)s)",
{'uuid_driver' : uuid_driver, 'uuid_rider' : uuid_rider, 'confirm' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
match_record = getMatchRecord(pgdbConnMatchEngine, uuid_rider, uuid_driver)
assert match_record['status'] == 'Canceled'
pgdbConnMatchEngine.commit()
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Pending'
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Pending'
# The rider cancels ride request
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.rider_cancel_ride_request(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_rider, 'confparam' : rider_args['RiderLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
# The driver cancels drive offer
cursor = pgdbConnWeb.cursor()
cursor.execute("SELECT * FROM carpoolvote.driver_cancel_drive_offer(%(uuid)s, %(confparam)s)",
{'uuid' : uuid_driver, 'confparam' : driver_args['DriverLastName']})
results = cursor.fetchone()
assert len(results[1]) == 0
assert results[0] == 0
pgdbConnWeb.commit()
# check for issue #123
cursor.execute("""SELECT status FROM carpoolvote.rider WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_rider})
results = cursor.fetchone()
assert results[0] == 'Canceled'
cursor.execute("""SELECT status FROM carpoolvote.driver WHERE "UUID"=%(uuid)s """, {'uuid' : uuid_driver})
results = cursor.fetchone()
assert results[0] == 'Canceled' | 0.342901 | 0.120905 |
import pandas as pd
class Dataset:
def __init__(self):
self.train_set = None
self.vocab_index = {}
self.index_vocab = {}
self.vocab_length = -1
def load_dataset(self):
self.train_set = pd.read_csv('data/dataset_annotated.csv', encoding='ISO-8859-1')
# print(train_set.head(5))
def cleanse_dataset(self):
# buang hyperlink
self.train_set['text'] = self.train_set['text'].str.replace(
r'http[s]*\:\/\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r'pic\.twitter\.com\.\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r'bit\.ly\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r't\.co\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
# buang punctuation tersisa
self.train_set['text'] = self.train_set['text'].str.replace(r'[\`\-\=]', '')
self.train_set['text'] = self.train_set['text'].str.replace(r'[\~\!\@\#\$\%\^\&\*\(\_\+]', '')
self.train_set['text'] = self.train_set['text'].str.replace(r'[\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]', '')
def build_dictionaries(self):
# Constructing dictionary: Vocab --> Index
self.vocab_index = {}
all_tweets = self.train_set['text'].tolist()
index_counter = 1
for t in all_tweets:
t = t.lower()
cur_words = t.split(' ')
for w in cur_words:
if w not in self.vocab_index:
self.vocab_index[w] = index_counter
index_counter += 1
# Constructing dictionary: Index --> Vocab
self.index_vocab = {}
for w, i in self.vocab_index.items():
self.index_vocab[i] = w
self.vocab_length = len(self.vocab_index.keys())
def get_dataset(self):
return self.train_set
def vocab_to_index(self):
return self.vocab_index
def index_to_vocab(self):
return self.index_vocab | dataset.py |
import pandas as pd
class Dataset:
def __init__(self):
self.train_set = None
self.vocab_index = {}
self.index_vocab = {}
self.vocab_length = -1
def load_dataset(self):
self.train_set = pd.read_csv('data/dataset_annotated.csv', encoding='ISO-8859-1')
# print(train_set.head(5))
def cleanse_dataset(self):
# buang hyperlink
self.train_set['text'] = self.train_set['text'].str.replace(
r'http[s]*\:\/\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r'pic\.twitter\.com\.\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r'bit\.ly\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
self.train_set['text'] = self.train_set['text'].str.replace(
r't\.co\/[a-zA-Z0-9\`\-\=\~\!\@\#\$\%\^\&\*\(\_\+\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]* ', ''
)
# buang punctuation tersisa
self.train_set['text'] = self.train_set['text'].str.replace(r'[\`\-\=]', '')
self.train_set['text'] = self.train_set['text'].str.replace(r'[\~\!\@\#\$\%\^\&\*\(\_\+]', '')
self.train_set['text'] = self.train_set['text'].str.replace(r'[\[\]\{\}\\\|\;\'\:\"\,\.\/\<\>\?]', '')
def build_dictionaries(self):
# Constructing dictionary: Vocab --> Index
self.vocab_index = {}
all_tweets = self.train_set['text'].tolist()
index_counter = 1
for t in all_tweets:
t = t.lower()
cur_words = t.split(' ')
for w in cur_words:
if w not in self.vocab_index:
self.vocab_index[w] = index_counter
index_counter += 1
# Constructing dictionary: Index --> Vocab
self.index_vocab = {}
for w, i in self.vocab_index.items():
self.index_vocab[i] = w
self.vocab_length = len(self.vocab_index.keys())
def get_dataset(self):
return self.train_set
def vocab_to_index(self):
return self.vocab_index
def index_to_vocab(self):
return self.index_vocab | 0.328637 | 0.108519 |
import math
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils import model_zoo
class CBR(nn.Module):
"""
This class defines the convolution layer with batch normalization and PReLU activation
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): stride rate for down-sampling. Default is 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
self.act = nn.PReLU(n_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
output = self.bn(output)
output = self.act(output)
return output
class BR(nn.Module):
"""
This class groups the batch normalization and PReLU activation
"""
def __init__(self, n_out: int) -> None:
"""
Args:
n_out (int): output feature maps
"""
super().__init__()
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
self.act = nn.PReLU(n_out)
def forward(self, x):
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): normalized and thresholded feature map
"""
output = self.bn(x)
output = self.act(output)
return output
class CB(nn.Module):
"""
This class groups the convolution and batch normalization
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
output = self.bn(output)
return output
class C(nn.Module):
"""
This class is for a convolutional layer.
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
return output
class CDilated(nn.Module):
"""
This class defines the dilated convolution.
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1, d: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
d (int): optional dilation rate. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2) * d
self.conv = nn.Conv2d(
n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False, dilation=d
)
def forward(self, x):
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
return output
class DownSamplerB(nn.Module):
def __init__(self, n_in: int, n_out: int) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
"""
super().__init__()
n = int(n_out / 5)
n1 = n_out - 4 * n
self.c1 = C(n_in, n, 3, 2)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(n_out, eps=1e-3)
self.act = nn.PReLU(n_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.c1(x)
d1 = self.d1(output)
d2 = self.d2(output)
d4 = self.d4(output)
d8 = self.d8(output)
d16 = self.d16(output)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3, add4], 1)
output = self.bn(combine)
output = self.act(output)
return output
class DilatedParllelResidualBlockB(nn.Module):
"""
This class defines the ESP block, which is based on the following principle
Reduce ---> Split ---> Transform --> Merge
"""
def __init__(self, n_in: int, n_out: int, add: bool = True) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
add (bool): if true, add a residual connection through identity operation. You can use
projection too as in ResNet paper, but we avoid to use it if the dimensions
are not the same because we do not want to increase the module complexity
"""
super().__init__()
n = int(n_out / 5)
n1 = n_out - 4 * n
self.c1 = C(n_in, n, 1, 1)
self.d1 = CDilated(n, n1, 3, 1, 1) # dilation rate of 2^0
self.d2 = CDilated(n, n, 3, 1, 2) # dilation rate of 2^1
self.d4 = CDilated(n, n, 3, 1, 4) # dilation rate of 2^2
self.d8 = CDilated(n, n, 3, 1, 8) # dilation rate of 2^3
self.d16 = CDilated(n, n, 3, 1, 16) # dilation rate of 2^4
self.bn = BR(n_out)
self.add = add
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.c1(x)
d1 = self.d1(output)
d2 = self.d2(output)
d4 = self.d4(output)
d8 = self.d8(output)
d16 = self.d16(output)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3, add4], 1)
if self.add:
combine = x + combine
output = self.bn(combine)
return output
class _AtrousSpatialPyramidPoolingModule(nn.Module):
"""
operations performed:
1x1 x depth
3x3 x depth dilation 6
3x3 x depth dilation 12
3x3 x depth dilation 18
image pooling
concatenate all together
Final 1x1 conv
"""
def __init__(self, in_dim, reduction_dim=256, output_stride=16, rates=(6, 12, 18)):
super(_AtrousSpatialPyramidPoolingModule, self).__init__()
# Check if we are using distributed BN and use the nn from encoding.nn
# library rather than using standard pytorch.nn
if output_stride == 8:
rates = [2 * r for r in rates]
elif output_stride == 16:
pass
else:
raise "output stride of {} not supported".format(output_stride)
self.features = []
# 1x1
self.features.append(
nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
Norm2d(reduction_dim),
nn.ReLU(inplace=True),
)
)
# other rates
for r in rates:
self.features.append(
nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=3, dilation=r, padding=r, bias=False),
Norm2d(reduction_dim),
nn.ReLU(inplace=True),
)
)
self.features = torch.nn.ModuleList(self.features)
# img level features
self.img_pooling = nn.AdaptiveAvgPool2d(1)
self.img_conv = nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True)
)
def forward(self, x):
x_size = x.size()
img_features = self.img_pooling(x)
img_features = self.img_conv(img_features)
img_features = Upsample(img_features, x_size[2:])
out = img_features
for f in self.features:
y = f(x)
out = torch.cat((out, y), 1)
return out
class InputProjectionA(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, sampling_times: int) -> None:
"""
Args:
sampling_times (int): the rate at which one wants to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, sampling_times):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input RGB Image
Returns:
output (torch.Tensor): down-sampled image (pyramid-based approach)
"""
for pool in self.pool:
x = pool(x)
return x
def Norm2d(in_channels):
"""
Custom Norm Function to allow flexible switching
"""
normalization_layer = nn.BatchNorm2d(in_channels)
return normalization_layer
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode="bilinear", align_corners=True)
def initialize_weights(*models):
"""
Initialize Model Weights
"""
for model in models:
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
def initialize_pretrained_model(model, num_classes, settings):
"""
Initialize Pretrain Model Information,
Download weights, load weights, set variables
"""
assert num_classes == settings["num_classes"], "num_classes should be {}, but is {}".format(
settings["num_classes"], num_classes
)
weights = model_zoo.load_url(settings["url"])
model.load_state_dict(weights)
model.input_space = settings["input_space"]
model.input_size = settings["input_size"]
model.input_range = settings["input_range"]
model.mean = settings["mean"]
model.std = settings["std"]
class SEModule(nn.Module):
"""
Squeeze Excitation Module.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class SEResNetBottleneckBase(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(SEResNetBottleneckBase):
"""
Bottleneck for SENet154.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = Norm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False
)
self.bn2 = Norm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(SEResNetBottleneckBase):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(SEResNetBottleneckBase):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = Norm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = Norm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
def bnrelu(channels: int) -> nn.Sequential:
"""
Single Layer BN and Relu
"""
return nn.Sequential(Norm2d(channels), nn.ReLU(inplace=True))
class GlobalAvgPool2d(nn.Module):
"""
Global average pooling over the input's spatial dimensions.
Code adapted from:
https://github.com/mapillary/inplace_abn/
BSD 3-Clause License
Copyright (c) 2017, mapillary
All rights reserved.
"""
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
@staticmethod
def forward(inputs: torch.Tensor) -> torch.Tensor:
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
class IdentityResidualBlock(nn.Module):
"""
Identity Residual Block for WideResnet.
Code adapted from:
https://github.com/mapillary/inplace_abn/
BSD 3-Clause License
Copyright (c) 2017, mapillary
All rights reserved.
"""
def __init__(
self,
in_channels: int,
channels: list,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
norm_act: callable = bnrelu,
dropout: callable = None,
dist_bn: bool = False,
) -> None:
"""Configurable identity-mapping residual block
Parameters
----------
in_channels : int
Number of input channels.
channels : list of int
Number of channels in the internal feature maps.
Can either have two or three elements: if three construct
a residual block with two `3 x 3` convolutions,
otherwise construct a bottleneck block with `1 x 1`, then
`3 x 3` then `1 x 1` convolutions.
stride : int
Stride of the first `3 x 3` convolution
dilation : int
Dilation to apply to the `3 x 3` convolutions.
groups : int
Number of convolution groups.
This is used to create ResNeXt-style blocks and is only compatible with
bottleneck blocks.
norm_act : callable
Function to create normalization / activation Module.
dropout: callable
Function to create Dropout Module.
dist_bn: Boolean
A variable to enable or disable use of distributed BN
"""
super(IdentityResidualBlock, self).__init__()
self.dist_bn = dist_bn
# Check if we are using distributed BN and use the nn from encoding.nn
# library rather than using standard pytorch.nn
# Check parameters for inconsistencies
if len(channels) != 2 and len(channels) != 3:
raise ValueError("channels must contain either two or three values")
if len(channels) == 2 and groups != 1:
raise ValueError("groups > 1 are only valid if len(channels) == 3")
is_bottleneck = len(channels) == 3
need_proj_conv = stride != 1 or in_channels != channels[-1]
self.bn1 = norm_act(in_channels)
if not is_bottleneck:
layers = [
(
"conv1",
nn.Conv2d(
in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, dilation=dilation
),
),
("bn2", norm_act(channels[0])),
(
"conv2",
nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, dilation=dilation),
),
]
if dropout is not None:
layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
else:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),
("bn2", norm_act(channels[0])),
(
"conv2",
nn.Conv2d(
channels[0],
channels[1],
3,
stride=1,
padding=dilation,
bias=False,
groups=groups,
dilation=dilation,
),
),
("bn3", norm_act(channels[1])),
("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)),
]
if dropout is not None:
layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
This is the standard forward function for non-distributed batch norm
"""
if hasattr(self, "proj_conv"):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out
def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class ResNetBasicBlock(nn.Module):
"""
Basic Block for Resnet
"""
expansion = 1
def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: callable = None):
super(ResNetBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = Norm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = Norm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBottleneck(nn.Module):
"""
Bottleneck Layer for Resnet
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out | src/daain/backbones/esp_net/layers.py | import math
from collections import OrderedDict
import torch
import torch.nn as nn
from torch.utils import model_zoo
class CBR(nn.Module):
"""
This class defines the convolution layer with batch normalization and PReLU activation
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): stride rate for down-sampling. Default is 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
self.act = nn.PReLU(n_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
output = self.bn(output)
output = self.act(output)
return output
class BR(nn.Module):
"""
This class groups the batch normalization and PReLU activation
"""
def __init__(self, n_out: int) -> None:
"""
Args:
n_out (int): output feature maps
"""
super().__init__()
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
self.act = nn.PReLU(n_out)
def forward(self, x):
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): normalized and thresholded feature map
"""
output = self.bn(x)
output = self.act(output)
return output
class CB(nn.Module):
"""
This class groups the convolution and batch normalization
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
self.bn = nn.BatchNorm2d(n_out, eps=1e-03)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
output = self.bn(output)
return output
class C(nn.Module):
"""
This class is for a convolutional layer.
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2)
self.conv = nn.Conv2d(n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
return output
class CDilated(nn.Module):
"""
This class defines the dilated convolution.
"""
def __init__(self, n_in: int, n_out: int, k_size: int, stride: int = 1, d: int = 1) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
k_size (int): kernel size
stride (int): optional stride for down-sampling. Default 1
d (int): optional dilation rate. Default 1
"""
super().__init__()
padding = int((k_size - 1) / 2) * d
self.conv = nn.Conv2d(
n_in, n_out, (k_size, k_size), stride=stride, padding=(padding, padding), bias=False, dilation=d
)
def forward(self, x):
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.conv(x)
return output
class DownSamplerB(nn.Module):
def __init__(self, n_in: int, n_out: int) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
"""
super().__init__()
n = int(n_out / 5)
n1 = n_out - 4 * n
self.c1 = C(n_in, n, 3, 2)
self.d1 = CDilated(n, n1, 3, 1, 1)
self.d2 = CDilated(n, n, 3, 1, 2)
self.d4 = CDilated(n, n, 3, 1, 4)
self.d8 = CDilated(n, n, 3, 1, 8)
self.d16 = CDilated(n, n, 3, 1, 16)
self.bn = nn.BatchNorm2d(n_out, eps=1e-3)
self.act = nn.PReLU(n_out)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.c1(x)
d1 = self.d1(output)
d2 = self.d2(output)
d4 = self.d4(output)
d8 = self.d8(output)
d16 = self.d16(output)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3, add4], 1)
output = self.bn(combine)
output = self.act(output)
return output
class DilatedParllelResidualBlockB(nn.Module):
"""
This class defines the ESP block, which is based on the following principle
Reduce ---> Split ---> Transform --> Merge
"""
def __init__(self, n_in: int, n_out: int, add: bool = True) -> None:
"""
Args:
n_in (int): number of input channels
n_out (int): number of output channels
add (bool): if true, add a residual connection through identity operation. You can use
projection too as in ResNet paper, but we avoid to use it if the dimensions
are not the same because we do not want to increase the module complexity
"""
super().__init__()
n = int(n_out / 5)
n1 = n_out - 4 * n
self.c1 = C(n_in, n, 1, 1)
self.d1 = CDilated(n, n1, 3, 1, 1) # dilation rate of 2^0
self.d2 = CDilated(n, n, 3, 1, 2) # dilation rate of 2^1
self.d4 = CDilated(n, n, 3, 1, 4) # dilation rate of 2^2
self.d8 = CDilated(n, n, 3, 1, 8) # dilation rate of 2^3
self.d16 = CDilated(n, n, 3, 1, 16) # dilation rate of 2^4
self.bn = BR(n_out)
self.add = add
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input feature map
Returns:
output (torch.Tensor): transformed feature map
"""
output = self.c1(x)
d1 = self.d1(output)
d2 = self.d2(output)
d4 = self.d4(output)
d8 = self.d8(output)
d16 = self.d16(output)
add1 = d2
add2 = add1 + d4
add3 = add2 + d8
add4 = add3 + d16
combine = torch.cat([d1, add1, add2, add3, add4], 1)
if self.add:
combine = x + combine
output = self.bn(combine)
return output
class _AtrousSpatialPyramidPoolingModule(nn.Module):
"""
operations performed:
1x1 x depth
3x3 x depth dilation 6
3x3 x depth dilation 12
3x3 x depth dilation 18
image pooling
concatenate all together
Final 1x1 conv
"""
def __init__(self, in_dim, reduction_dim=256, output_stride=16, rates=(6, 12, 18)):
super(_AtrousSpatialPyramidPoolingModule, self).__init__()
# Check if we are using distributed BN and use the nn from encoding.nn
# library rather than using standard pytorch.nn
if output_stride == 8:
rates = [2 * r for r in rates]
elif output_stride == 16:
pass
else:
raise "output stride of {} not supported".format(output_stride)
self.features = []
# 1x1
self.features.append(
nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False),
Norm2d(reduction_dim),
nn.ReLU(inplace=True),
)
)
# other rates
for r in rates:
self.features.append(
nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=3, dilation=r, padding=r, bias=False),
Norm2d(reduction_dim),
nn.ReLU(inplace=True),
)
)
self.features = torch.nn.ModuleList(self.features)
# img level features
self.img_pooling = nn.AdaptiveAvgPool2d(1)
self.img_conv = nn.Sequential(
nn.Conv2d(in_dim, reduction_dim, kernel_size=1, bias=False), Norm2d(reduction_dim), nn.ReLU(inplace=True)
)
def forward(self, x):
x_size = x.size()
img_features = self.img_pooling(x)
img_features = self.img_conv(img_features)
img_features = Upsample(img_features, x_size[2:])
out = img_features
for f in self.features:
y = f(x)
out = torch.cat((out, y), 1)
return out
class InputProjectionA(nn.Module):
"""
This class projects the input image to the same spatial dimensions as the feature map.
For example, if the input image is 512 x512 x3 and spatial dimensions of feature map size are 56x56xF, then
this class will generate an output of 56x56x3
"""
def __init__(self, sampling_times: int) -> None:
"""
Args:
sampling_times (int): the rate at which one wants to down-sample the image
"""
super().__init__()
self.pool = nn.ModuleList()
for i in range(0, sampling_times):
self.pool.append(nn.AvgPool2d(3, stride=2, padding=1))
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x (torch.Tensor): input RGB Image
Returns:
output (torch.Tensor): down-sampled image (pyramid-based approach)
"""
for pool in self.pool:
x = pool(x)
return x
def Norm2d(in_channels):
"""
Custom Norm Function to allow flexible switching
"""
normalization_layer = nn.BatchNorm2d(in_channels)
return normalization_layer
def Upsample(x, size):
"""
Wrapper Around the Upsample Call
"""
return nn.functional.interpolate(x, size=size, mode="bilinear", align_corners=True)
def initialize_weights(*models):
"""
Initialize Model Weights
"""
for model in models:
for module in model.modules():
if isinstance(module, (nn.Conv2d, nn.Linear)):
nn.init.kaiming_normal_(module.weight)
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
def initialize_pretrained_model(model, num_classes, settings):
"""
Initialize Pretrain Model Information,
Download weights, load weights, set variables
"""
assert num_classes == settings["num_classes"], "num_classes should be {}, but is {}".format(
settings["num_classes"], num_classes
)
weights = model_zoo.load_url(settings["url"])
model.load_state_dict(weights)
model.input_space = settings["input_space"]
model.input_size = settings["input_size"]
model.input_range = settings["input_range"]
model.mean = settings["mean"]
model.std = settings["std"]
class SEModule(nn.Module):
"""
Squeeze Excitation Module.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
def __init__(self, channels, reduction):
super(SEModule, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0)
self.relu = nn.ReLU(inplace=True)
self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
module_input = x
x = self.avg_pool(x)
x = self.fc1(x)
x = self.relu(x)
x = self.fc2(x)
x = self.sigmoid(x)
return module_input * x
class SEResNetBottleneckBase(nn.Module):
"""
Base class for bottlenecks that implements `forward()` method.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out = self.se_module(out) + residual
out = self.relu(out)
return out
class SEBottleneck(SEResNetBottleneckBase):
"""
Bottleneck for SENet154.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes * 2, kernel_size=1, bias=False)
self.bn1 = Norm2d(planes * 2)
self.conv2 = nn.Conv2d(
planes * 2, planes * 4, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False
)
self.bn2 = Norm2d(planes * 4)
self.conv3 = nn.Conv2d(planes * 4, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNetBottleneck(SEResNetBottleneckBase):
"""
ResNet bottleneck with a Squeeze-and-Excitation module. It follows Caffe
implementation and uses `stride=stride` in `conv1` and not in `conv2`
(the latter is used in the torchvision implementation of ResNet).
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None):
super(SEResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False, stride=stride)
self.bn1 = Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, padding=1, groups=groups, bias=False)
self.bn2 = Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
class SEResNeXtBottleneck(SEResNetBottleneckBase):
"""
ResNeXt bottleneck type C with a Squeeze-and-Excitation module.
Code adapted from:
https://github.com/Cadene/pretrained-models.pytorch
BSD 3-Clause License
Copyright (c) 2017, <NAME>
All rights reserved.
"""
expansion = 4
def __init__(self, inplanes, planes, groups, reduction, stride=1, downsample=None, base_width=4):
super(SEResNeXtBottleneck, self).__init__()
width = math.floor(planes * (base_width / 64)) * groups
self.conv1 = nn.Conv2d(inplanes, width, kernel_size=1, bias=False, stride=1)
self.bn1 = Norm2d(width)
self.conv2 = nn.Conv2d(width, width, kernel_size=3, stride=stride, padding=1, groups=groups, bias=False)
self.bn2 = Norm2d(width)
self.conv3 = nn.Conv2d(width, planes * 4, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.se_module = SEModule(planes * 4, reduction=reduction)
self.downsample = downsample
self.stride = stride
def bnrelu(channels: int) -> nn.Sequential:
"""
Single Layer BN and Relu
"""
return nn.Sequential(Norm2d(channels), nn.ReLU(inplace=True))
class GlobalAvgPool2d(nn.Module):
"""
Global average pooling over the input's spatial dimensions.
Code adapted from:
https://github.com/mapillary/inplace_abn/
BSD 3-Clause License
Copyright (c) 2017, mapillary
All rights reserved.
"""
def __init__(self):
super(GlobalAvgPool2d, self).__init__()
@staticmethod
def forward(inputs: torch.Tensor) -> torch.Tensor:
in_size = inputs.size()
return inputs.view((in_size[0], in_size[1], -1)).mean(dim=2)
class IdentityResidualBlock(nn.Module):
"""
Identity Residual Block for WideResnet.
Code adapted from:
https://github.com/mapillary/inplace_abn/
BSD 3-Clause License
Copyright (c) 2017, mapillary
All rights reserved.
"""
def __init__(
self,
in_channels: int,
channels: list,
stride: int = 1,
dilation: int = 1,
groups: int = 1,
norm_act: callable = bnrelu,
dropout: callable = None,
dist_bn: bool = False,
) -> None:
"""Configurable identity-mapping residual block
Parameters
----------
in_channels : int
Number of input channels.
channels : list of int
Number of channels in the internal feature maps.
Can either have two or three elements: if three construct
a residual block with two `3 x 3` convolutions,
otherwise construct a bottleneck block with `1 x 1`, then
`3 x 3` then `1 x 1` convolutions.
stride : int
Stride of the first `3 x 3` convolution
dilation : int
Dilation to apply to the `3 x 3` convolutions.
groups : int
Number of convolution groups.
This is used to create ResNeXt-style blocks and is only compatible with
bottleneck blocks.
norm_act : callable
Function to create normalization / activation Module.
dropout: callable
Function to create Dropout Module.
dist_bn: Boolean
A variable to enable or disable use of distributed BN
"""
super(IdentityResidualBlock, self).__init__()
self.dist_bn = dist_bn
# Check if we are using distributed BN and use the nn from encoding.nn
# library rather than using standard pytorch.nn
# Check parameters for inconsistencies
if len(channels) != 2 and len(channels) != 3:
raise ValueError("channels must contain either two or three values")
if len(channels) == 2 and groups != 1:
raise ValueError("groups > 1 are only valid if len(channels) == 3")
is_bottleneck = len(channels) == 3
need_proj_conv = stride != 1 or in_channels != channels[-1]
self.bn1 = norm_act(in_channels)
if not is_bottleneck:
layers = [
(
"conv1",
nn.Conv2d(
in_channels, channels[0], 3, stride=stride, padding=dilation, bias=False, dilation=dilation
),
),
("bn2", norm_act(channels[0])),
(
"conv2",
nn.Conv2d(channels[0], channels[1], 3, stride=1, padding=dilation, bias=False, dilation=dilation),
),
]
if dropout is not None:
layers = layers[0:2] + [("dropout", dropout())] + layers[2:]
else:
layers = [
("conv1", nn.Conv2d(in_channels, channels[0], 1, stride=stride, padding=0, bias=False)),
("bn2", norm_act(channels[0])),
(
"conv2",
nn.Conv2d(
channels[0],
channels[1],
3,
stride=1,
padding=dilation,
bias=False,
groups=groups,
dilation=dilation,
),
),
("bn3", norm_act(channels[1])),
("conv3", nn.Conv2d(channels[1], channels[2], 1, stride=1, padding=0, bias=False)),
]
if dropout is not None:
layers = layers[0:4] + [("dropout", dropout())] + layers[4:]
self.convs = nn.Sequential(OrderedDict(layers))
if need_proj_conv:
self.proj_conv = nn.Conv2d(in_channels, channels[-1], 1, stride=stride, padding=0, bias=False)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
This is the standard forward function for non-distributed batch norm
"""
if hasattr(self, "proj_conv"):
bn1 = self.bn1(x)
shortcut = self.proj_conv(bn1)
else:
shortcut = x.clone()
bn1 = self.bn1(x)
out = self.convs(bn1)
out.add_(shortcut)
return out
def conv3x3(in_planes: int, out_planes: int, stride: int = 1) -> nn.Conv2d:
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
class ResNetBasicBlock(nn.Module):
"""
Basic Block for Resnet
"""
expansion = 1
def __init__(self, inplanes: int, planes: int, stride: int = 1, downsample: callable = None):
super(ResNetBasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = Norm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = Norm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNetBottleneck(nn.Module):
"""
Bottleneck Layer for Resnet
"""
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(ResNetBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = Norm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = Norm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = Norm2d(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out | 0.967333 | 0.606061 |
import uuid
from datetime import date, datetime, timedelta
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
import pyarrow
from pydantic import StrictStr
from pydantic.typing import Literal
from tenacity import Retrying, retry_if_exception_type, stop_after_delay, wait_fixed
from feast.data_source import DataSource
from feast.errors import (
FeastProviderLoginError,
InvalidEntityType,
MaxcomputeJobCancelled,
MaxcomputeJobStillRunning,
MaxcomputeQueryError,
MaxcomputeUploadError,
)
from feast.feature_view import FeatureView
from feast.infra.offline_stores import offline_utils
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.infra.utils import aliyun_utils
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from .maxcompute_source import MaxcomputeSource
try:
import odps
from odps import ODPS, options
options.sql.use_odps2_extension = True
options.tunnel.use_instance_tunnel = True
options.tunnel.limit_instance_tunnel = False
except ImportError as e:
from feast.errors import FeastExtrasDependencyImportError
raise FeastExtrasDependencyImportError("aliyun", str(e))
class MaxcomputeOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for Aliyun Maxcompute """
type: Literal["maxcompute"] = "maxcompute"
""" Offline store type selector"""
region: Optional[StrictStr] = None
""" (optional)Macompute region name"""
project: StrictStr
""" Maxcompute project name"""
access_key: StrictStr
""" Maxcompute access key"""
secret_access_key: StrictStr
""" Maxcompute secret access key"""
end_point: Optional[StrictStr] = None
""" (optional)Maxcompute endpoint"""
class MaxcomputeOfflineStore(OfflineStore):
@staticmethod
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
assert isinstance(data_source, MaxcomputeSource)
from_expression = data_source.get_table_query_string()
partition_by_join_key_string = ", ".join(join_key_columns)
if partition_by_join_key_string != "":
partition_by_join_key_string = (
"PARTITION BY " + partition_by_join_key_string
)
timestamps = [event_timestamp_column]
if created_timestamp_column:
timestamps.append(created_timestamp_column)
timestamp_desc_string = " DESC, ".join(timestamps) + " DESC"
field_string = ", ".join(join_key_columns + feature_name_columns + timestamps)
# client = aliyun_utils.get_maxcompute_client(project=config.offline_store.project)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
query = f"""
SELECT {field_string}
FROM (
SELECT {field_string},
ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS _feast_row
FROM {from_expression}
WHERE cast({event_timestamp_column} as TIMESTAMP) BETWEEN TIMESTAMP('{start_date}') AND TIMESTAMP('{end_date}')
)
WHERE _feast_row = 1
"""
# When materializing a single feature view, we don't need full feature names. On demand transforms aren't materialized
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=False,
on_demand_feature_views=None,
)
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
# TODO: Add entity_df validation in order to fail before interacting with Maxcompute
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
# local pandas data frame need upload
if isinstance(entity_df, str):
table_reference = entity_df
else:
table_reference = _get_table_reference_for_new_entity(
client, config.offline_store.project
)
entity_schema = _upload_entity_df_and_get_entity_schema(
client=client, table_name=table_reference, entity_df=entity_df
)
entity_df_event_timestamp_col = offline_utils.infer_event_timestamp_from_entity_df(
entity_schema
)
expected_join_keys = offline_utils.get_expected_join_keys(
project, feature_views, registry
)
offline_utils.assert_expected_columns_in_entity_df(
entity_schema, expected_join_keys, entity_df_event_timestamp_col
)
# Build a query context containing all information required to template the Maxcompute SQL query
query_context = offline_utils.get_feature_view_query_context(
feature_refs, feature_views, registry, project
)
# Generate the Maxcompute SQL query from the query context
query = offline_utils.build_point_in_time_query(
query_context,
left_table_query_string=table_reference,
entity_df_event_timestamp_col=entity_df_event_timestamp_col,
query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,
full_feature_names=full_feature_names,
)
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=full_feature_names,
on_demand_feature_views=registry.list_on_demand_feature_views(
project, allow_cache=True
),
)
class MaxcomputeRetrievalJob(RetrievalJob):
def __init__(
self,
query: str,
client: ODPS,
config: RepoConfig,
full_feature_names: bool,
on_demand_feature_views: Optional[List[OnDemandFeatureView]],
):
self.query = query
self.client = client
self.config = config
self._full_feature_names = full_feature_names
self._on_demand_feature_views = on_demand_feature_views
@property
def full_feature_names(self) -> bool:
return self._full_feature_names
@property
def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:
return self._on_demand_feature_views
def to_df_internal(self) -> pd.DataFrame:
# TODO: Ideally only start this job when the user runs "get_historical_features", not when they run to_df()
df = self._to_df()
return df
def to_sql(self) -> str:
"""
Returns the SQL query that will be executed in Maxcompute to build the historical feature table.
"""
return self.query
def to_maxcompute(self, table_name: str,overwrite:bool = True) -> None:
"""
Triggers the execution of a historical feature retrieval query and exports the results to a Maxcompute table.
Args:
table_name: specify name of destination table
"""
if overwrite:
sql = f"DROP TABLE IF EXISTS {table_name}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
query = self.query
sql = f"CREATE TABLE {table_name} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
def _to_df(self) -> pd.DataFrame:
table_reference = _get_table_reference_for_new_entity(
self.client, self.config.offline_store.project
)
query = self.query
sql = f"CREATE TABLE {table_reference} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
table = odps.df.DataFrame(self.client.get_table(table_reference))
return table.to_pandas()
def to_arrow(self) -> pyarrow.Table:
df = self._to_df()
return pyarrow.Table.from_pandas(df)
def _get_table_reference_for_new_entity(client: ODPS, dataset_project: str) -> str:
"""Gets the table_id for the new entity to be uploaded."""
table_name = offline_utils.get_temp_entity_table_name()
return f"{dataset_project}.{table_name}"
def _upload_entity_df_and_get_entity_schema(
client: ODPS,
table_name: str,
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
) -> Dict[str, np.dtype]:
"""Uploads a Pandas entity dataframe into a Maxcompute table and returns the resulting table"""
if type(entity_df) is str:
limited_entity_df = (
odps.df.DataFrame(client.get_table(table_name)).limit(1).execute()
)
entity_schema = dict(
zip(limited_entity_df.schema.names, limited_entity_df.schema.types)
)
elif isinstance(entity_df, pd.DataFrame):
# Drop the index so that we dont have unnecessary columns
entity_df.reset_index(drop=True, inplace=True)
# Upload the dataframe into Maxcompute, creating a temporary table
upload_df = odps.df.DataFrame(entity_df)
try:
upload_df.persist(table_name, odps=client, lifecycle=1)
except Exception as e:
raise MaxcomputeUploadError(e)
entity_schema = dict(zip(upload_df.dtypes.names, upload_df.dtypes.types))
elif isinstance(entity_df, odps.df.DataFrame):
# Just return the Maxcompute schema
entity_schema = dict(zip(entity_df.dtypes.names, entity_df.dtypes.types))
else:
raise InvalidEntityType(type(entity_df))
return entity_schema
# TODO: Optimizations
# * Use GENERATE_UUID() instead of ROW_NUMBER(), or join on entity columns directly
# * Precompute ROW_NUMBER() so that it doesn't have to be recomputed for every query on entity_dataframe
# * Create temporary tables instead of keeping all tables in memory
# Note: Keep this in sync with sdk/python/feast/infra/offline_stores/redshift.py:MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN
MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """
--Compute a deterministic hash for the `left_table_query_string` that will be used throughout
--all the logic as the field to GROUP BY the data
WITH entity_dataframe AS (
SELECT *,
CAST({{entity_df_event_timestamp_col}} AS TIMESTAMP) AS entity_timestamp
{% for featureview in featureviews %}
,CONCAT(
{% for entity in featureview.entities %}
CAST({{entity}} AS STRING),
{% endfor %}
CAST({{entity_df_event_timestamp_col}} AS STRING)
) AS {{featureview.name}}__entity_row_unique_id
{% endfor %}
FROM {{ left_table_query_string }}
),
{% for featureview in featureviews %}
{{ featureview.name }}__entity_dataframe AS (
SELECT
{{ featureview.entities | join(', ')}},
cast(entity_timestamp as TIMESTAMP),
{{featureview.name}}__entity_row_unique_id
FROM entity_dataframe
GROUP BY {{ featureview.entities | join(', ')}}, entity_timestamp, {{featureview.name}}__entity_row_unique_id
),
-- This query template performs the point-in-time correctness join for a single feature set table
-- to the provided entity table.
--
-- 1. We first join the current feature_view to the entity dataframe that has been passed.
-- This JOIN has the following logic:
-- - For each row of the entity dataframe, only keep the rows where the `event_timestamp_column`
-- is less than the one provided in the entity dataframe
-- - If there a TTL for the current feature_view, also keep the rows where the `event_timestamp_column`
-- is higher the the one provided minus the TTL
-- - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been
-- computed previously
--
-- The output of this CTE will contain all the necessary information and already filtered out most
-- of the data that is not relevant.
{{ featureview.name }}__subquery AS (
SELECT
cast({{ featureview.event_timestamp_column }} as TIMESTAMP) as event_timestamp,
{{ featureview.created_timestamp_column ~ ' as created_timestamp,' if featureview.created_timestamp_column else '' }}
{{ featureview.entity_selections | join(', ')}},
{% for feature in featureview.features %}
{{ feature }} as {% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}{% if loop.last %}{% else %}, {% endif %}
{% endfor %}
FROM {{ featureview.table_subquery }}
WHERE cast({{ featureview.event_timestamp_column }} as TIMESTAMP) <= (SELECT MAX(entity_timestamp) FROM entity_dataframe)
{% if featureview.ttl == 0 %}{% else %}
AND cast({{ featureview.event_timestamp_column }} as TIMESTAMP) >= DATEADD((SELECT MIN(entity_timestamp) FROM entity_dataframe), -{{ featureview.ttl }}, "ss")
{% endif %}
),
{{ featureview.name }}__base AS (
SELECT
/*+ mapjoin({{ featureview.name }}__entity_dataframe)*/ subquery.*,
entity_dataframe.entity_timestamp,
entity_dataframe.{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__subquery AS subquery
JOIN {{ featureview.name }}__entity_dataframe AS entity_dataframe
ON TRUE
AND subquery.event_timestamp <= entity_dataframe.entity_timestamp
{% if featureview.ttl == 0 %}{% else %}
AND subquery.event_timestamp >= DATEADD(entity_dataframe.entity_timestamp, -{{ featureview.ttl }}, "ss")
{% endif %}
{% for entity in featureview.entities %}
AND subquery.{{ entity }} = entity_dataframe.{{ entity }}
{% endfor %}
),
-- 2. If the `created_timestamp_column` has been set, we need to
-- deduplicate the data first. This is done by calculating the
-- `MAX(created_at_timestamp)` for each event_timestamp.
-- We then join the data on the next CTE
{% if featureview.created_timestamp_column %}
{{ featureview.name }}__dedup AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
MAX(created_timestamp) as created_timestamp
FROM {{ featureview.name }}__base
GROUP BY {{featureview.name}}__entity_row_unique_id, event_timestamp
),
{% endif %}
-- 3. The data has been filtered during the first CTE "*__base"
-- Thus we only need to compute the latest timestamp of each feature.
{{ featureview.name }}__latest AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
created_timestamp
FROM
(
SELECT *,
ROW_NUMBER() OVER(
PARTITION BY {{featureview.name}}__entity_row_unique_id
ORDER BY event_timestamp DESC{% if featureview.created_timestamp_column %},created_timestamp DESC{% endif %}
) AS row_number
FROM {{ featureview.name }}__base
{% if featureview.created_timestamp_column %}
JOIN {{ featureview.name }}__dedup
USING ({{featureview.name}}__entity_row_unique_id, event_timestamp, created_timestamp)
{% endif %}
)
WHERE row_number = 1
),
-- 4. Once we know the latest value of each feature for a given timestamp,
-- we can join again the data back to the original "base" dataset
{{ featureview.name }}__cleaned AS (
SELECT base.*,
{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__base as base
JOIN {{ featureview.name }}__latest
USING(
{{featureview.name}}__entity_row_unique_id,
event_timestamp
{% if featureview.created_timestamp_column %}
,created_timestamp
{% endif %}
)
){% if loop.last %}{% else %}, {% endif %}
{% endfor %}
-- Joins the outputs of multiple time travel joins to a single table.
-- The entity_dataframe dataset being our source of truth here.
SELECT *
FROM entity_dataframe
{% for featureview in featureviews %}
LEFT JOIN (
SELECT
{{featureview.name}}__entity_row_unique_id
{% for feature in featureview.features %}
,{% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}
{% endfor %}
FROM {{ featureview.name }}__cleaned
) {{ featureview.name }}__u USING ({{featureview.name}}__entity_row_unique_id)
{% endfor %}
""" | sdk/python/feast/infra/offline_stores/maxcompute.py | import uuid
from datetime import date, datetime, timedelta
from typing import Dict, List, Optional, Union
import numpy as np
import pandas as pd
import pyarrow
from pydantic import StrictStr
from pydantic.typing import Literal
from tenacity import Retrying, retry_if_exception_type, stop_after_delay, wait_fixed
from feast.data_source import DataSource
from feast.errors import (
FeastProviderLoginError,
InvalidEntityType,
MaxcomputeJobCancelled,
MaxcomputeJobStillRunning,
MaxcomputeQueryError,
MaxcomputeUploadError,
)
from feast.feature_view import FeatureView
from feast.infra.offline_stores import offline_utils
from feast.infra.offline_stores.offline_store import OfflineStore, RetrievalJob
from feast.infra.utils import aliyun_utils
from feast.on_demand_feature_view import OnDemandFeatureView
from feast.registry import Registry
from feast.repo_config import FeastConfigBaseModel, RepoConfig
from .maxcompute_source import MaxcomputeSource
try:
import odps
from odps import ODPS, options
options.sql.use_odps2_extension = True
options.tunnel.use_instance_tunnel = True
options.tunnel.limit_instance_tunnel = False
except ImportError as e:
from feast.errors import FeastExtrasDependencyImportError
raise FeastExtrasDependencyImportError("aliyun", str(e))
class MaxcomputeOfflineStoreConfig(FeastConfigBaseModel):
""" Offline store config for Aliyun Maxcompute """
type: Literal["maxcompute"] = "maxcompute"
""" Offline store type selector"""
region: Optional[StrictStr] = None
""" (optional)Macompute region name"""
project: StrictStr
""" Maxcompute project name"""
access_key: StrictStr
""" Maxcompute access key"""
secret_access_key: StrictStr
""" Maxcompute secret access key"""
end_point: Optional[StrictStr] = None
""" (optional)Maxcompute endpoint"""
class MaxcomputeOfflineStore(OfflineStore):
@staticmethod
def pull_latest_from_table_or_query(
config: RepoConfig,
data_source: DataSource,
join_key_columns: List[str],
feature_name_columns: List[str],
event_timestamp_column: str,
created_timestamp_column: Optional[str],
start_date: datetime,
end_date: datetime,
) -> RetrievalJob:
assert isinstance(data_source, MaxcomputeSource)
from_expression = data_source.get_table_query_string()
partition_by_join_key_string = ", ".join(join_key_columns)
if partition_by_join_key_string != "":
partition_by_join_key_string = (
"PARTITION BY " + partition_by_join_key_string
)
timestamps = [event_timestamp_column]
if created_timestamp_column:
timestamps.append(created_timestamp_column)
timestamp_desc_string = " DESC, ".join(timestamps) + " DESC"
field_string = ", ".join(join_key_columns + feature_name_columns + timestamps)
# client = aliyun_utils.get_maxcompute_client(project=config.offline_store.project)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
query = f"""
SELECT {field_string}
FROM (
SELECT {field_string},
ROW_NUMBER() OVER({partition_by_join_key_string} ORDER BY {timestamp_desc_string}) AS _feast_row
FROM {from_expression}
WHERE cast({event_timestamp_column} as TIMESTAMP) BETWEEN TIMESTAMP('{start_date}') AND TIMESTAMP('{end_date}')
)
WHERE _feast_row = 1
"""
# When materializing a single feature view, we don't need full feature names. On demand transforms aren't materialized
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=False,
on_demand_feature_views=None,
)
@staticmethod
def get_historical_features(
config: RepoConfig,
feature_views: List[FeatureView],
feature_refs: List[str],
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
registry: Registry,
project: str,
full_feature_names: bool = False,
) -> RetrievalJob:
# TODO: Add entity_df validation in order to fail before interacting with Maxcompute
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
client = aliyun_utils.get_maxcompute_client(
ak=config.offline_store.access_key,
sk=config.offline_store.secret_access_key,
project=config.offline_store.project,
region=config.offline_store.region,
endpoint=config.offline_store.end_point,
)
assert isinstance(config.offline_store, MaxcomputeOfflineStoreConfig)
# local pandas data frame need upload
if isinstance(entity_df, str):
table_reference = entity_df
else:
table_reference = _get_table_reference_for_new_entity(
client, config.offline_store.project
)
entity_schema = _upload_entity_df_and_get_entity_schema(
client=client, table_name=table_reference, entity_df=entity_df
)
entity_df_event_timestamp_col = offline_utils.infer_event_timestamp_from_entity_df(
entity_schema
)
expected_join_keys = offline_utils.get_expected_join_keys(
project, feature_views, registry
)
offline_utils.assert_expected_columns_in_entity_df(
entity_schema, expected_join_keys, entity_df_event_timestamp_col
)
# Build a query context containing all information required to template the Maxcompute SQL query
query_context = offline_utils.get_feature_view_query_context(
feature_refs, feature_views, registry, project
)
# Generate the Maxcompute SQL query from the query context
query = offline_utils.build_point_in_time_query(
query_context,
left_table_query_string=table_reference,
entity_df_event_timestamp_col=entity_df_event_timestamp_col,
query_template=MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN,
full_feature_names=full_feature_names,
)
return MaxcomputeRetrievalJob(
query=query,
client=client,
config=config,
full_feature_names=full_feature_names,
on_demand_feature_views=registry.list_on_demand_feature_views(
project, allow_cache=True
),
)
class MaxcomputeRetrievalJob(RetrievalJob):
def __init__(
self,
query: str,
client: ODPS,
config: RepoConfig,
full_feature_names: bool,
on_demand_feature_views: Optional[List[OnDemandFeatureView]],
):
self.query = query
self.client = client
self.config = config
self._full_feature_names = full_feature_names
self._on_demand_feature_views = on_demand_feature_views
@property
def full_feature_names(self) -> bool:
return self._full_feature_names
@property
def on_demand_feature_views(self) -> Optional[List[OnDemandFeatureView]]:
return self._on_demand_feature_views
def to_df_internal(self) -> pd.DataFrame:
# TODO: Ideally only start this job when the user runs "get_historical_features", not when they run to_df()
df = self._to_df()
return df
def to_sql(self) -> str:
"""
Returns the SQL query that will be executed in Maxcompute to build the historical feature table.
"""
return self.query
def to_maxcompute(self, table_name: str,overwrite:bool = True) -> None:
"""
Triggers the execution of a historical feature retrieval query and exports the results to a Maxcompute table.
Args:
table_name: specify name of destination table
"""
if overwrite:
sql = f"DROP TABLE IF EXISTS {table_name}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
query = self.query
sql = f"CREATE TABLE {table_name} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
def _to_df(self) -> pd.DataFrame:
table_reference = _get_table_reference_for_new_entity(
self.client, self.config.offline_store.project
)
query = self.query
sql = f"CREATE TABLE {table_reference} LIFECYCLE 1 AS {query}"
job = self.client.run_sql(sql)
print("logview url:", job.get_logview_address())
job.wait_for_success()
table = odps.df.DataFrame(self.client.get_table(table_reference))
return table.to_pandas()
def to_arrow(self) -> pyarrow.Table:
df = self._to_df()
return pyarrow.Table.from_pandas(df)
def _get_table_reference_for_new_entity(client: ODPS, dataset_project: str) -> str:
"""Gets the table_id for the new entity to be uploaded."""
table_name = offline_utils.get_temp_entity_table_name()
return f"{dataset_project}.{table_name}"
def _upload_entity_df_and_get_entity_schema(
client: ODPS,
table_name: str,
entity_df: Union[pd.DataFrame, odps.df.DataFrame, str],
) -> Dict[str, np.dtype]:
"""Uploads a Pandas entity dataframe into a Maxcompute table and returns the resulting table"""
if type(entity_df) is str:
limited_entity_df = (
odps.df.DataFrame(client.get_table(table_name)).limit(1).execute()
)
entity_schema = dict(
zip(limited_entity_df.schema.names, limited_entity_df.schema.types)
)
elif isinstance(entity_df, pd.DataFrame):
# Drop the index so that we dont have unnecessary columns
entity_df.reset_index(drop=True, inplace=True)
# Upload the dataframe into Maxcompute, creating a temporary table
upload_df = odps.df.DataFrame(entity_df)
try:
upload_df.persist(table_name, odps=client, lifecycle=1)
except Exception as e:
raise MaxcomputeUploadError(e)
entity_schema = dict(zip(upload_df.dtypes.names, upload_df.dtypes.types))
elif isinstance(entity_df, odps.df.DataFrame):
# Just return the Maxcompute schema
entity_schema = dict(zip(entity_df.dtypes.names, entity_df.dtypes.types))
else:
raise InvalidEntityType(type(entity_df))
return entity_schema
# TODO: Optimizations
# * Use GENERATE_UUID() instead of ROW_NUMBER(), or join on entity columns directly
# * Precompute ROW_NUMBER() so that it doesn't have to be recomputed for every query on entity_dataframe
# * Create temporary tables instead of keeping all tables in memory
# Note: Keep this in sync with sdk/python/feast/infra/offline_stores/redshift.py:MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN
MULTIPLE_FEATURE_VIEW_POINT_IN_TIME_JOIN = """
--Compute a deterministic hash for the `left_table_query_string` that will be used throughout
--all the logic as the field to GROUP BY the data
WITH entity_dataframe AS (
SELECT *,
CAST({{entity_df_event_timestamp_col}} AS TIMESTAMP) AS entity_timestamp
{% for featureview in featureviews %}
,CONCAT(
{% for entity in featureview.entities %}
CAST({{entity}} AS STRING),
{% endfor %}
CAST({{entity_df_event_timestamp_col}} AS STRING)
) AS {{featureview.name}}__entity_row_unique_id
{% endfor %}
FROM {{ left_table_query_string }}
),
{% for featureview in featureviews %}
{{ featureview.name }}__entity_dataframe AS (
SELECT
{{ featureview.entities | join(', ')}},
cast(entity_timestamp as TIMESTAMP),
{{featureview.name}}__entity_row_unique_id
FROM entity_dataframe
GROUP BY {{ featureview.entities | join(', ')}}, entity_timestamp, {{featureview.name}}__entity_row_unique_id
),
-- This query template performs the point-in-time correctness join for a single feature set table
-- to the provided entity table.
--
-- 1. We first join the current feature_view to the entity dataframe that has been passed.
-- This JOIN has the following logic:
-- - For each row of the entity dataframe, only keep the rows where the `event_timestamp_column`
-- is less than the one provided in the entity dataframe
-- - If there a TTL for the current feature_view, also keep the rows where the `event_timestamp_column`
-- is higher the the one provided minus the TTL
-- - For each row, Join on the entity key and retrieve the `entity_row_unique_id` that has been
-- computed previously
--
-- The output of this CTE will contain all the necessary information and already filtered out most
-- of the data that is not relevant.
{{ featureview.name }}__subquery AS (
SELECT
cast({{ featureview.event_timestamp_column }} as TIMESTAMP) as event_timestamp,
{{ featureview.created_timestamp_column ~ ' as created_timestamp,' if featureview.created_timestamp_column else '' }}
{{ featureview.entity_selections | join(', ')}},
{% for feature in featureview.features %}
{{ feature }} as {% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}{% if loop.last %}{% else %}, {% endif %}
{% endfor %}
FROM {{ featureview.table_subquery }}
WHERE cast({{ featureview.event_timestamp_column }} as TIMESTAMP) <= (SELECT MAX(entity_timestamp) FROM entity_dataframe)
{% if featureview.ttl == 0 %}{% else %}
AND cast({{ featureview.event_timestamp_column }} as TIMESTAMP) >= DATEADD((SELECT MIN(entity_timestamp) FROM entity_dataframe), -{{ featureview.ttl }}, "ss")
{% endif %}
),
{{ featureview.name }}__base AS (
SELECT
/*+ mapjoin({{ featureview.name }}__entity_dataframe)*/ subquery.*,
entity_dataframe.entity_timestamp,
entity_dataframe.{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__subquery AS subquery
JOIN {{ featureview.name }}__entity_dataframe AS entity_dataframe
ON TRUE
AND subquery.event_timestamp <= entity_dataframe.entity_timestamp
{% if featureview.ttl == 0 %}{% else %}
AND subquery.event_timestamp >= DATEADD(entity_dataframe.entity_timestamp, -{{ featureview.ttl }}, "ss")
{% endif %}
{% for entity in featureview.entities %}
AND subquery.{{ entity }} = entity_dataframe.{{ entity }}
{% endfor %}
),
-- 2. If the `created_timestamp_column` has been set, we need to
-- deduplicate the data first. This is done by calculating the
-- `MAX(created_at_timestamp)` for each event_timestamp.
-- We then join the data on the next CTE
{% if featureview.created_timestamp_column %}
{{ featureview.name }}__dedup AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
MAX(created_timestamp) as created_timestamp
FROM {{ featureview.name }}__base
GROUP BY {{featureview.name}}__entity_row_unique_id, event_timestamp
),
{% endif %}
-- 3. The data has been filtered during the first CTE "*__base"
-- Thus we only need to compute the latest timestamp of each feature.
{{ featureview.name }}__latest AS (
SELECT
{{featureview.name}}__entity_row_unique_id,
event_timestamp,
created_timestamp
FROM
(
SELECT *,
ROW_NUMBER() OVER(
PARTITION BY {{featureview.name}}__entity_row_unique_id
ORDER BY event_timestamp DESC{% if featureview.created_timestamp_column %},created_timestamp DESC{% endif %}
) AS row_number
FROM {{ featureview.name }}__base
{% if featureview.created_timestamp_column %}
JOIN {{ featureview.name }}__dedup
USING ({{featureview.name}}__entity_row_unique_id, event_timestamp, created_timestamp)
{% endif %}
)
WHERE row_number = 1
),
-- 4. Once we know the latest value of each feature for a given timestamp,
-- we can join again the data back to the original "base" dataset
{{ featureview.name }}__cleaned AS (
SELECT base.*,
{{featureview.name}}__entity_row_unique_id
FROM {{ featureview.name }}__base as base
JOIN {{ featureview.name }}__latest
USING(
{{featureview.name}}__entity_row_unique_id,
event_timestamp
{% if featureview.created_timestamp_column %}
,created_timestamp
{% endif %}
)
){% if loop.last %}{% else %}, {% endif %}
{% endfor %}
-- Joins the outputs of multiple time travel joins to a single table.
-- The entity_dataframe dataset being our source of truth here.
SELECT *
FROM entity_dataframe
{% for featureview in featureviews %}
LEFT JOIN (
SELECT
{{featureview.name}}__entity_row_unique_id
{% for feature in featureview.features %}
,{% if full_feature_names %}{{ featureview.name }}__{{feature}}{% else %}{{ feature }}{% endif %}
{% endfor %}
FROM {{ featureview.name }}__cleaned
) {{ featureview.name }}__u USING ({{featureview.name}}__entity_row_unique_id)
{% endfor %}
""" | 0.730482 | 0.225566 |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('general_business', '0001_initial'),
('product', '0002_productpriceobj'),
]
operations = [
migrations.CreateModel(
name='PriceObj',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, default='default title', max_length=500, null=True)),
('created_at', models.DateTimeField(auto_now=True)),
('amount', models.IntegerField(blank=True, null=True)),
('is_recurring', models.BooleanField(verbose_name='Is the price a recurring amount?')),
('reccurence_freq', models.CharField(blank=True, choices=[('daily', 'daily'), ('weekly', 'weekly'), ('monthly', 'monthly'), ('annually', 'annually')], max_length=500, null=True)),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='product',
old_name='uploaded_at',
new_name='created_at',
),
migrations.RenameField(
model_name='service',
old_name='uploaded_at',
new_name='created_at',
),
migrations.RemoveField(
model_name='service',
name='is_recurring',
),
migrations.AlterField(
model_name='product',
name='parent_organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='general_business.organization'),
),
migrations.AlterField(
model_name='product',
name='subtitle',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='product',
name='title',
field=models.CharField(blank=True, default='default title', max_length=500, null=True),
),
migrations.AlterField(
model_name='service',
name='parent_organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='general_business.organization'),
),
migrations.AlterField(
model_name='service',
name='title',
field=models.CharField(blank=True, default='default title', max_length=500, null=True),
),
migrations.DeleteModel(
name='ProductPriceObj',
),
] | product/migrations/0003_auto_20210616_0915.py |
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('general_business', '0001_initial'),
('product', '0002_productpriceobj'),
]
operations = [
migrations.CreateModel(
name='PriceObj',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(blank=True, default='default title', max_length=500, null=True)),
('created_at', models.DateTimeField(auto_now=True)),
('amount', models.IntegerField(blank=True, null=True)),
('is_recurring', models.BooleanField(verbose_name='Is the price a recurring amount?')),
('reccurence_freq', models.CharField(blank=True, choices=[('daily', 'daily'), ('weekly', 'weekly'), ('monthly', 'monthly'), ('annually', 'annually')], max_length=500, null=True)),
],
options={
'abstract': False,
},
),
migrations.RenameField(
model_name='product',
old_name='uploaded_at',
new_name='created_at',
),
migrations.RenameField(
model_name='service',
old_name='uploaded_at',
new_name='created_at',
),
migrations.RemoveField(
model_name='service',
name='is_recurring',
),
migrations.AlterField(
model_name='product',
name='parent_organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='general_business.organization'),
),
migrations.AlterField(
model_name='product',
name='subtitle',
field=models.CharField(blank=True, max_length=500, null=True),
),
migrations.AlterField(
model_name='product',
name='title',
field=models.CharField(blank=True, default='default title', max_length=500, null=True),
),
migrations.AlterField(
model_name='service',
name='parent_organization',
field=models.ForeignKey(on_delete=django.db.models.deletion.DO_NOTHING, to='general_business.organization'),
),
migrations.AlterField(
model_name='service',
name='title',
field=models.CharField(blank=True, default='default title', max_length=500, null=True),
),
migrations.DeleteModel(
name='ProductPriceObj',
),
] | 0.506103 | 0.120387 |
import logging
import os
import webapp2
from webapp2_extras import jinja2
from appengine_module.cr_rev import controller
class BaseHandler(webapp2.RequestHandler):
"""Provide a cached Jinja environment to each request."""
def __init__(self, *args, **kwargs):
webapp2.RequestHandler.__init__(self, *args, **kwargs)
@staticmethod
def jinja2_factory(app):
template_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates'))
config = {'template_path': template_dir}
jinja = jinja2.Jinja2(app, config=config)
return jinja
@webapp2.cached_property
def jinja2(self):
# Returns a Jinja2 renderer cached in the app registry.
return jinja2.get_jinja2(app=self.app, factory=BaseHandler.jinja2_factory)
def render_response(self, _template, **context):
# Renders a template and writes the result to the response.
rv = self.jinja2.render_template(_template, **context)
self.response.write(rv)
class StartPage(BaseHandler):
def get(self):
context = {'title': 'cr-rev' }
self.render_response('main.html', **context)
class MainPage(BaseHandler):
def get(self):
context = {'title': 'cr-rev' }
self.render_response('main.html', **context)
class ScanProjects(BaseHandler):
def get(self):
projects = controller.scan_projects_for_repos()
for project in projects: # pragma: no cover
logging.info('launching pipeline: %s' % project)
self.response.write('pipelines: %s' % '<br>'.join(projects))
class ScanRepos(BaseHandler):
def get(self):
projects = controller.scan_repos()
for project in projects: # pragma: no cover
logging.info('launching pipeline: %s' % project)
self.response.write('pipelines: %s' % '<br>'.join(projects))
class Redirect(BaseHandler):
def get(self, query, extra_paths):
if self.request.referer:
logging.info('referer is %s' % self.request.referer)
redirect = controller.calculate_redirect(query)
if redirect:
redirect_url = str(redirect.redirect_url)
if extra_paths:
redirect_url = redirect_url + extra_paths
if self.request.query_string:
redirect_url = redirect_url + '?' + self.request.query_string
logging.info('redirecting to %s' % redirect_url)
self.redirect(redirect_url)
else:
self.abort(404)
def get_routes():
return webapp2.WSGIApplication([
('/_ah/warmup', StartPage),
('/_ah/start', StartPage),
('/admin/scan_projects', ScanProjects),
('/admin/scan_repos', ScanRepos),
(r'/([^/]+)(/.*)?', Redirect),
('/', MainPage),
]) | appengine/cr_rev/appengine_module/cr_rev/views.py |
import logging
import os
import webapp2
from webapp2_extras import jinja2
from appengine_module.cr_rev import controller
class BaseHandler(webapp2.RequestHandler):
"""Provide a cached Jinja environment to each request."""
def __init__(self, *args, **kwargs):
webapp2.RequestHandler.__init__(self, *args, **kwargs)
@staticmethod
def jinja2_factory(app):
template_dir = os.path.abspath(
os.path.join(os.path.dirname(__file__), 'templates'))
config = {'template_path': template_dir}
jinja = jinja2.Jinja2(app, config=config)
return jinja
@webapp2.cached_property
def jinja2(self):
# Returns a Jinja2 renderer cached in the app registry.
return jinja2.get_jinja2(app=self.app, factory=BaseHandler.jinja2_factory)
def render_response(self, _template, **context):
# Renders a template and writes the result to the response.
rv = self.jinja2.render_template(_template, **context)
self.response.write(rv)
class StartPage(BaseHandler):
def get(self):
context = {'title': 'cr-rev' }
self.render_response('main.html', **context)
class MainPage(BaseHandler):
def get(self):
context = {'title': 'cr-rev' }
self.render_response('main.html', **context)
class ScanProjects(BaseHandler):
def get(self):
projects = controller.scan_projects_for_repos()
for project in projects: # pragma: no cover
logging.info('launching pipeline: %s' % project)
self.response.write('pipelines: %s' % '<br>'.join(projects))
class ScanRepos(BaseHandler):
def get(self):
projects = controller.scan_repos()
for project in projects: # pragma: no cover
logging.info('launching pipeline: %s' % project)
self.response.write('pipelines: %s' % '<br>'.join(projects))
class Redirect(BaseHandler):
def get(self, query, extra_paths):
if self.request.referer:
logging.info('referer is %s' % self.request.referer)
redirect = controller.calculate_redirect(query)
if redirect:
redirect_url = str(redirect.redirect_url)
if extra_paths:
redirect_url = redirect_url + extra_paths
if self.request.query_string:
redirect_url = redirect_url + '?' + self.request.query_string
logging.info('redirecting to %s' % redirect_url)
self.redirect(redirect_url)
else:
self.abort(404)
def get_routes():
return webapp2.WSGIApplication([
('/_ah/warmup', StartPage),
('/_ah/start', StartPage),
('/admin/scan_projects', ScanProjects),
('/admin/scan_repos', ScanRepos),
(r'/([^/]+)(/.*)?', Redirect),
('/', MainPage),
]) | 0.709321 | 0.06724 |
import os
import cv2
import numpy as np
class Image:
'''
This class contains all the image utils for the package
but You can also use it.
Methods:
bgr_to_grey
bgr_to_rgb
resize
crop
read_img
read_video
'''
def __init__(self):
pass
def bgr_to_grey(self, frame):
"""
Converts image to greyscale.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
c_frame: numpy array
converted image
"""
c_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return c_frame
def bgr_to_rgb(self, frame):
"""
Converts image to rgb format.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
c_frame: numpy array
converted image
"""
c_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return c_frame
def resize(self, frame, x, y):
"""
Resizes the image.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
r_frame: numpy array
Resized image
"""
r_frame = cv2.resize(frame, dsize=(x, y), interpolation=cv2.INTER_AREA)
return r_frame
def crop(self, img, cords):
"""
Crops the image.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
Cropped image
"""
crop_image = img[cords[1]:cords[3], cords[0]:cords[2]]
return crop_image
def read_img(self, img_path):
"""
Reads the image from path.
Args:
img_path: str
Takes an absolute path to the image.
Returns:
frame: numpy array
Returns an instance of the image if the
path given is correct else None.
"""
file_exists = os.path.exists(img_path)
if file_exists is True:
self.frame = cv2.imread(img_path)
return self.frame
else:
return None
def read_video(self, video_path):
"""
Reads the video from path.
Args:
video_path: str
Takes an absolute path to the video.
Set video path to 0 for webcam.
Returns:
frame: numpy array
Returns an instance of the video if the
path given is correct else None.
"""
file_exists = os.path.exists(video_path)
if file_exists is True:
video = cv2.VideoCapture(video_path)
return video
else:
return None | visionlib/utils/imgutils.py | import os
import cv2
import numpy as np
class Image:
'''
This class contains all the image utils for the package
but You can also use it.
Methods:
bgr_to_grey
bgr_to_rgb
resize
crop
read_img
read_video
'''
def __init__(self):
pass
def bgr_to_grey(self, frame):
"""
Converts image to greyscale.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
c_frame: numpy array
converted image
"""
c_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
return c_frame
def bgr_to_rgb(self, frame):
"""
Converts image to rgb format.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
c_frame: numpy array
converted image
"""
c_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
return c_frame
def resize(self, frame, x, y):
"""
Resizes the image.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
r_frame: numpy array
Resized image
"""
r_frame = cv2.resize(frame, dsize=(x, y), interpolation=cv2.INTER_AREA)
return r_frame
def crop(self, img, cords):
"""
Crops the image.
Args:
frame: numpy array
frame should be a numpy like array.
Returns:
Cropped image
"""
crop_image = img[cords[1]:cords[3], cords[0]:cords[2]]
return crop_image
def read_img(self, img_path):
"""
Reads the image from path.
Args:
img_path: str
Takes an absolute path to the image.
Returns:
frame: numpy array
Returns an instance of the image if the
path given is correct else None.
"""
file_exists = os.path.exists(img_path)
if file_exists is True:
self.frame = cv2.imread(img_path)
return self.frame
else:
return None
def read_video(self, video_path):
"""
Reads the video from path.
Args:
video_path: str
Takes an absolute path to the video.
Set video path to 0 for webcam.
Returns:
frame: numpy array
Returns an instance of the video if the
path given is correct else None.
"""
file_exists = os.path.exists(video_path)
if file_exists is True:
video = cv2.VideoCapture(video_path)
return video
else:
return None | 0.825097 | 0.465873 |
from argparse import ArgumentParser
from tabulate import tabulate
from termcolor import colored
from taoist.read_project_dict import read_project_dict
from taoist.read_label_dict import read_label_dict
from taoist.parent_project import parent_project
async def run_task(args: ArgumentParser) -> None:
"""
Run the task command
"""
# Read config and project list
api, project_dict = await read_project_dict()
# Read label list into dictionary
label_dict = await read_label_dict(api)
# Process subcommand
if args.subcommand == "list":
try:
tasks = await api.get_tasks()
except Exception as error:
raise error
table_header = ["id", "content", "project", "status", "due", "labels"]
task_list = []
for task in tasks:
label_list = []
status = "Done" if task.completed else "Open"
pass_label_filter = False if args.label_id else True
pass_project_filter = False if args.project_id else True
if pass_project_filter == False and task.project_id == args.project_id:
pass_project_filter = True
for lab in task.label_ids:
if pass_label_filter == False and lab == args.label_id:
pass_label_filter = True
label_list.append(label_dict[lab].name)
label_string = ','.join(label_list)
if task.due:
due_date = task.due.date
else:
due_date = ""
project_path_string = parent_project(task.project_id, project_dict)
if pass_label_filter and pass_project_filter:
row = [
task.id,
colored(task.content, 'white', attrs=['bold']),
project_path_string,
status,
due_date,
label_string
]
task_list.append(row)
task_list.sort(key=lambda x: x[4])
print(tabulate(task_list, headers=table_header))
elif args.subcommand == "delete":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
try:
is_success = await api.delete_task(task_id=task_id)
except Exception as error:
raise error
if is_success:
print(f"Successfully deleted task {task_id}")
elif args.subcommand == "done":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
try:
is_success = await api.close_task(task_id=task_id)
except Exception as error:
raise error
if is_success:
print(f"Successfully marked task {task_id} as done")
elif args.subcommand == "view":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
view_list = []
try:
task = await api.get_task(task_id=task_id)
except Exception as error:
raise error
task_dict = task.to_dict()
view_list.append(["Name", task_dict['content']])
project_path_string = parent_project(task.project_id, project_dict)
view_list.append(["Project", project_path_string])
due_dict = task_dict['due']
if due_dict:
view_list.append(["Due", due_dict['date']])
view_list.append(["Recurring", due_dict['recurring']])
view_list.append(["Priority", task_dict['priority']])
label_list = []
for lab in task_dict['label_ids']:
label_list.append(label_dict[lab].name)
if len(label_list) > 0:
label_string = ','.join(label_list)
view_list.append(["Labels", label_string])
print(tabulate(view_list))
elif args.subcommand == "label":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
label_id = args.label_id if args.label_id else int(input("Enter label ID: "))
try:
task = await api.get_task(task_id=task_id)
except Exception as error:
raise error
new_list = task.label_ids
new_list.append(label_id)
try:
is_success = await api.update_task(task_id=task_id, label_ids=new_list)
except Exception as error:
raise error
if is_success:
print(f"Successfully added label {label_id} to task {task_id}")
elif args.subcommand == "create":
task_name = args.task_name if args.task_name else input("Enter task name: ")
for key, val in project_dict.items():
if val.name == args.project_name:
project_id = key
try:
task = await api.add_task(
content=task_name,
due_string=args.due,
project_id=project_id,
due_lang='en',
priority=args.priority,
)
except Exception as error:
raise error
print(f"Successfully created task \"{task_name}\"") | taoist/run_task.py |
from argparse import ArgumentParser
from tabulate import tabulate
from termcolor import colored
from taoist.read_project_dict import read_project_dict
from taoist.read_label_dict import read_label_dict
from taoist.parent_project import parent_project
async def run_task(args: ArgumentParser) -> None:
"""
Run the task command
"""
# Read config and project list
api, project_dict = await read_project_dict()
# Read label list into dictionary
label_dict = await read_label_dict(api)
# Process subcommand
if args.subcommand == "list":
try:
tasks = await api.get_tasks()
except Exception as error:
raise error
table_header = ["id", "content", "project", "status", "due", "labels"]
task_list = []
for task in tasks:
label_list = []
status = "Done" if task.completed else "Open"
pass_label_filter = False if args.label_id else True
pass_project_filter = False if args.project_id else True
if pass_project_filter == False and task.project_id == args.project_id:
pass_project_filter = True
for lab in task.label_ids:
if pass_label_filter == False and lab == args.label_id:
pass_label_filter = True
label_list.append(label_dict[lab].name)
label_string = ','.join(label_list)
if task.due:
due_date = task.due.date
else:
due_date = ""
project_path_string = parent_project(task.project_id, project_dict)
if pass_label_filter and pass_project_filter:
row = [
task.id,
colored(task.content, 'white', attrs=['bold']),
project_path_string,
status,
due_date,
label_string
]
task_list.append(row)
task_list.sort(key=lambda x: x[4])
print(tabulate(task_list, headers=table_header))
elif args.subcommand == "delete":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
try:
is_success = await api.delete_task(task_id=task_id)
except Exception as error:
raise error
if is_success:
print(f"Successfully deleted task {task_id}")
elif args.subcommand == "done":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
try:
is_success = await api.close_task(task_id=task_id)
except Exception as error:
raise error
if is_success:
print(f"Successfully marked task {task_id} as done")
elif args.subcommand == "view":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
view_list = []
try:
task = await api.get_task(task_id=task_id)
except Exception as error:
raise error
task_dict = task.to_dict()
view_list.append(["Name", task_dict['content']])
project_path_string = parent_project(task.project_id, project_dict)
view_list.append(["Project", project_path_string])
due_dict = task_dict['due']
if due_dict:
view_list.append(["Due", due_dict['date']])
view_list.append(["Recurring", due_dict['recurring']])
view_list.append(["Priority", task_dict['priority']])
label_list = []
for lab in task_dict['label_ids']:
label_list.append(label_dict[lab].name)
if len(label_list) > 0:
label_string = ','.join(label_list)
view_list.append(["Labels", label_string])
print(tabulate(view_list))
elif args.subcommand == "label":
task_id = args.task_id if args.task_id else int(input("Enter task ID: "))
label_id = args.label_id if args.label_id else int(input("Enter label ID: "))
try:
task = await api.get_task(task_id=task_id)
except Exception as error:
raise error
new_list = task.label_ids
new_list.append(label_id)
try:
is_success = await api.update_task(task_id=task_id, label_ids=new_list)
except Exception as error:
raise error
if is_success:
print(f"Successfully added label {label_id} to task {task_id}")
elif args.subcommand == "create":
task_name = args.task_name if args.task_name else input("Enter task name: ")
for key, val in project_dict.items():
if val.name == args.project_name:
project_id = key
try:
task = await api.add_task(
content=task_name,
due_string=args.due,
project_id=project_id,
due_lang='en',
priority=args.priority,
)
except Exception as error:
raise error
print(f"Successfully created task \"{task_name}\"") | 0.348645 | 0.1382 |
from pipeline import phot_pipeline
from pipeline import spec_pipeline
from pipeline import analysis
from astropy.io import fits, ascii
import os
import matplotlib.pyplot as plt
import pdb
import numpy as np
def test_binning():
""" Test the binning function"""
x = np.linspace(0,10,1024)
y = np.random.randn(1024)
plt.plot(x,y,'.')
xBin, yBin, yErr = phot_pipeline.do_binning(x,y)
stdevOrig = np.std(y)
print('Stdev orig = {}, theoretically 1.0'.format(stdevOrig))
ptsPerBin = np.float(len(x)) / np.float(len(xBin))
print("pts per bin = {}".format(ptsPerBin))
expectedStd = 1./np.sqrt(ptsPerBin)
print("expected stdev of binned = {}".format(expectedStd))
print("measured stdev of binned = {}".format(np.std(yBin)))
print("median errorbar = {}".format(np.median(yErr)))
plt.errorbar(xBin,yBin,yErr,fmt='o')
plt.show()
def test_allan_variance(doYerr=True):
""" Test the binning function"""
yMultiplier = 500.
x = np.linspace(0,100,2048)
y = np.random.randn(2048) * yMultiplier
if doYerr == True:
yerr = np.ones_like(x) * yMultiplier
else:
yerr = None
phot_pipeline.allan_variance(x,y,yerr)
def test_poly_sub():
phot = phot_pipeline.phot(paramFile='parameters/phot_params/test_parameters/phot_param_k2_22_colrow.yaml')
phot.param['diagnosticMode'] = True
phot.do_phot()
def compare_colrow_and_annulus_backsub(recalculate=False):
descriptions = ['Background Annulus','Col-Row Sub']
fig, axArr = plt.subplots(3,sharex=True)
for ind,oneName in enumerate(['phot_param_k2_22_annulus.yaml','phot_param_k2_22_colrow.yaml']):
path = os.path.join('parameters','phot_params','test_parameters',oneName)
phot = phot_pipeline.phot(paramFile=path)
if (os.path.exists(phot.photFile) == False) | (recalculate == True):
phot.do_phot(useMultiprocessing=True)
print("***************************")
print(descriptions[ind])
print("***************************")
stats = phot.print_phot_statistics(refCorrect=False)
HDUList = fits.open(phot.photFile)
jdHDU = HDUList['TIME']
jdArr = jdHDU.data
t = jdArr - np.round(np.min(jdArr))
jdHDU = HDUList['TIME']
backData = HDUList['BACKG PHOT'].data
srcData = HDUList['PHOTOMETRY'].data
raw_src = srcData + backData
linestyles=['-','-.']
for oneSrc in np.arange(phot.nsrc):
thisLabel = "{} src {}".format(descriptions[ind],oneSrc)
for plot_ind,oneData in enumerate([raw_src,srcData,backData]):
ax = axArr[plot_ind]
ax.plot(t,oneData[:,oneSrc],label=thisLabel,linestyle=linestyles[oneSrc])
HDUList.close()
axArr[2].legend()
axArr[0].set_ylabel("Raw Src")
axArr[1].set_ylabel("Raw - Back")
axArr[2].set_ylabel("Backg Flux")
fig.show()
def test_spec_apsweep():
"""
Test the spectroscopic aperture sweep
"""
spec = spec_pipeline.spec('parameters/spec_params/test_parameters/corot1_for_ap_sweep.yaml')
analysis.aperture_size_sweep(spec) | tshirt/tser_tests.py | from pipeline import phot_pipeline
from pipeline import spec_pipeline
from pipeline import analysis
from astropy.io import fits, ascii
import os
import matplotlib.pyplot as plt
import pdb
import numpy as np
def test_binning():
""" Test the binning function"""
x = np.linspace(0,10,1024)
y = np.random.randn(1024)
plt.plot(x,y,'.')
xBin, yBin, yErr = phot_pipeline.do_binning(x,y)
stdevOrig = np.std(y)
print('Stdev orig = {}, theoretically 1.0'.format(stdevOrig))
ptsPerBin = np.float(len(x)) / np.float(len(xBin))
print("pts per bin = {}".format(ptsPerBin))
expectedStd = 1./np.sqrt(ptsPerBin)
print("expected stdev of binned = {}".format(expectedStd))
print("measured stdev of binned = {}".format(np.std(yBin)))
print("median errorbar = {}".format(np.median(yErr)))
plt.errorbar(xBin,yBin,yErr,fmt='o')
plt.show()
def test_allan_variance(doYerr=True):
""" Test the binning function"""
yMultiplier = 500.
x = np.linspace(0,100,2048)
y = np.random.randn(2048) * yMultiplier
if doYerr == True:
yerr = np.ones_like(x) * yMultiplier
else:
yerr = None
phot_pipeline.allan_variance(x,y,yerr)
def test_poly_sub():
phot = phot_pipeline.phot(paramFile='parameters/phot_params/test_parameters/phot_param_k2_22_colrow.yaml')
phot.param['diagnosticMode'] = True
phot.do_phot()
def compare_colrow_and_annulus_backsub(recalculate=False):
descriptions = ['Background Annulus','Col-Row Sub']
fig, axArr = plt.subplots(3,sharex=True)
for ind,oneName in enumerate(['phot_param_k2_22_annulus.yaml','phot_param_k2_22_colrow.yaml']):
path = os.path.join('parameters','phot_params','test_parameters',oneName)
phot = phot_pipeline.phot(paramFile=path)
if (os.path.exists(phot.photFile) == False) | (recalculate == True):
phot.do_phot(useMultiprocessing=True)
print("***************************")
print(descriptions[ind])
print("***************************")
stats = phot.print_phot_statistics(refCorrect=False)
HDUList = fits.open(phot.photFile)
jdHDU = HDUList['TIME']
jdArr = jdHDU.data
t = jdArr - np.round(np.min(jdArr))
jdHDU = HDUList['TIME']
backData = HDUList['BACKG PHOT'].data
srcData = HDUList['PHOTOMETRY'].data
raw_src = srcData + backData
linestyles=['-','-.']
for oneSrc in np.arange(phot.nsrc):
thisLabel = "{} src {}".format(descriptions[ind],oneSrc)
for plot_ind,oneData in enumerate([raw_src,srcData,backData]):
ax = axArr[plot_ind]
ax.plot(t,oneData[:,oneSrc],label=thisLabel,linestyle=linestyles[oneSrc])
HDUList.close()
axArr[2].legend()
axArr[0].set_ylabel("Raw Src")
axArr[1].set_ylabel("Raw - Back")
axArr[2].set_ylabel("Backg Flux")
fig.show()
def test_spec_apsweep():
"""
Test the spectroscopic aperture sweep
"""
spec = spec_pipeline.spec('parameters/spec_params/test_parameters/corot1_for_ap_sweep.yaml')
analysis.aperture_size_sweep(spec) | 0.546254 | 0.387227 |
import os
from pypeline.common.fileutils import missing_files
from pypeline.atomiccmd.builder import apply_options
from pypeline.nodes.adapterremoval import \
SE_AdapterRemovalNode, \
PE_AdapterRemovalNode, \
VERSION_14, \
VERSION_15
from pypeline.nodes.validation import \
ValidateFASTQFilesNode
class Reads(object):
def __init__(self, config, record, quality_offset):
self.quality_offset = quality_offset
self.files = {}
self.stats = None
self.nodes = ()
tags = record["Tags"]
self.folder = os.path.join(config.destination, tags["Target"], "reads",
tags["SM"], tags["LB"], tags["PU_cur"])
lane_type = record.get("Type")
if lane_type == "Raw":
self._init_raw_reads(record)
elif lane_type == "Trimmed":
self._init_pretrimmed_reads(record)
else:
assert False, "Unexpected data type in Reads(): %s" \
% (repr(lane_type))
for name in record["Options"]["ExcludeReads"]:
self.files.pop(name, None)
if config.allow_missing_input_files and self.nodes:
input_missing = missing_files(self.nodes[0].input_files)
output_missing = missing_files(self.nodes[0].output_files)
if input_missing and not output_missing:
self.nodes = ()
def _init_pretrimmed_reads(self, record):
self.files.update(record["Data"])
output_file = os.path.join(self.folder, "reads.pretrimmed.validated")
input_files = set()
for (read_type, filename) in self.files.iteritems():
if read_type == "Paired":
input_files.add(filename.format(Pair=1))
input_files.add(filename.format(Pair=2))
else:
input_files.add(filename)
node = ValidateFASTQFilesNode(input_files=input_files,
output_file=output_file,
offset=self.quality_offset)
self.nodes = (node,)
def _init_raw_reads(self, record):
# Support for older versions of the pipeline, which used ARv1.0 - 1.4
version = VERSION_14
if record["Options"]["AdapterRemoval"]["Version"] == "v1.5+":
version = VERSION_15
quality_offset = self.quality_offset
if quality_offset == "Solexa":
quality_offset = 64
ar_options = dict(record["Options"]["AdapterRemoval"])
# Setup of "--collapsed" is handled by the node itself
collapse_reads = ar_options.pop("--collapse")
collapse_reads = collapse_reads or collapse_reads is None
init_args = {"output_prefix": os.path.join(self.folder, "reads"),
"output_format": record["Options"]["CompressionFormat"],
"quality_offset": quality_offset,
"version": version}
output_tmpl = "{output_prefix}.%s.{output_format}".format(**init_args)
if ("SE" in record["Data"]):
self.files["Single"] = output_tmpl % ("truncated",)
init_args["input_files"] = record["Data"]["SE"]
command = SE_AdapterRemovalNode.customize(**init_args)
else:
if version is VERSION_14:
self._set_adapterrm_v14_files(self.files, output_tmpl)
else:
self._set_adapterrm_v15_files(self.files, output_tmpl,
collapse_reads)
init_args["collapse"] = collapse_reads
init_args["input_files_1"] = record["Data"]["PE_1"]
init_args["input_files_2"] = record["Data"]["PE_2"]
command = PE_AdapterRemovalNode.customize(**init_args)
apply_options(command.command, ar_options)
self.stats = os.path.join(self.folder, "reads.settings")
self.nodes = (command.build_node(),)
@classmethod
def _set_adapterrm_v14_files(cls, files, output_tmpl):
files["Single"] = output_tmpl % ("singleton.unaln.truncated",)
files["Collapsed"] = output_tmpl % ("singleton.aln.truncated",)
files["Paired"] = output_tmpl % ("pair{Pair}.truncated",)
@classmethod
def _set_adapterrm_v15_files(cls, files, output_tmpl, collapse_reads):
files["Single"] = output_tmpl % ("singleton.truncated",)
files["Paired"] = output_tmpl % ("pair{Pair}.truncated",)
if collapse_reads:
files["Collapsed"] = output_tmpl % ("collapsed",)
files["CollapsedTruncated"] = output_tmpl % ("collapsed.truncated",) | pypeline/tools/bam_pipeline/parts/reads.py | import os
from pypeline.common.fileutils import missing_files
from pypeline.atomiccmd.builder import apply_options
from pypeline.nodes.adapterremoval import \
SE_AdapterRemovalNode, \
PE_AdapterRemovalNode, \
VERSION_14, \
VERSION_15
from pypeline.nodes.validation import \
ValidateFASTQFilesNode
class Reads(object):
def __init__(self, config, record, quality_offset):
self.quality_offset = quality_offset
self.files = {}
self.stats = None
self.nodes = ()
tags = record["Tags"]
self.folder = os.path.join(config.destination, tags["Target"], "reads",
tags["SM"], tags["LB"], tags["PU_cur"])
lane_type = record.get("Type")
if lane_type == "Raw":
self._init_raw_reads(record)
elif lane_type == "Trimmed":
self._init_pretrimmed_reads(record)
else:
assert False, "Unexpected data type in Reads(): %s" \
% (repr(lane_type))
for name in record["Options"]["ExcludeReads"]:
self.files.pop(name, None)
if config.allow_missing_input_files and self.nodes:
input_missing = missing_files(self.nodes[0].input_files)
output_missing = missing_files(self.nodes[0].output_files)
if input_missing and not output_missing:
self.nodes = ()
def _init_pretrimmed_reads(self, record):
self.files.update(record["Data"])
output_file = os.path.join(self.folder, "reads.pretrimmed.validated")
input_files = set()
for (read_type, filename) in self.files.iteritems():
if read_type == "Paired":
input_files.add(filename.format(Pair=1))
input_files.add(filename.format(Pair=2))
else:
input_files.add(filename)
node = ValidateFASTQFilesNode(input_files=input_files,
output_file=output_file,
offset=self.quality_offset)
self.nodes = (node,)
def _init_raw_reads(self, record):
# Support for older versions of the pipeline, which used ARv1.0 - 1.4
version = VERSION_14
if record["Options"]["AdapterRemoval"]["Version"] == "v1.5+":
version = VERSION_15
quality_offset = self.quality_offset
if quality_offset == "Solexa":
quality_offset = 64
ar_options = dict(record["Options"]["AdapterRemoval"])
# Setup of "--collapsed" is handled by the node itself
collapse_reads = ar_options.pop("--collapse")
collapse_reads = collapse_reads or collapse_reads is None
init_args = {"output_prefix": os.path.join(self.folder, "reads"),
"output_format": record["Options"]["CompressionFormat"],
"quality_offset": quality_offset,
"version": version}
output_tmpl = "{output_prefix}.%s.{output_format}".format(**init_args)
if ("SE" in record["Data"]):
self.files["Single"] = output_tmpl % ("truncated",)
init_args["input_files"] = record["Data"]["SE"]
command = SE_AdapterRemovalNode.customize(**init_args)
else:
if version is VERSION_14:
self._set_adapterrm_v14_files(self.files, output_tmpl)
else:
self._set_adapterrm_v15_files(self.files, output_tmpl,
collapse_reads)
init_args["collapse"] = collapse_reads
init_args["input_files_1"] = record["Data"]["PE_1"]
init_args["input_files_2"] = record["Data"]["PE_2"]
command = PE_AdapterRemovalNode.customize(**init_args)
apply_options(command.command, ar_options)
self.stats = os.path.join(self.folder, "reads.settings")
self.nodes = (command.build_node(),)
@classmethod
def _set_adapterrm_v14_files(cls, files, output_tmpl):
files["Single"] = output_tmpl % ("singleton.unaln.truncated",)
files["Collapsed"] = output_tmpl % ("singleton.aln.truncated",)
files["Paired"] = output_tmpl % ("pair{Pair}.truncated",)
@classmethod
def _set_adapterrm_v15_files(cls, files, output_tmpl, collapse_reads):
files["Single"] = output_tmpl % ("singleton.truncated",)
files["Paired"] = output_tmpl % ("pair{Pair}.truncated",)
if collapse_reads:
files["Collapsed"] = output_tmpl % ("collapsed",)
files["CollapsedTruncated"] = output_tmpl % ("collapsed.truncated",) | 0.41561 | 0.255077 |
import sys
import queue
import numbers
import math
if len(sys.argv) != 2:
print("Help: {} <filename>".format(sys.argv[0]))
sys.exit(0)
class Number:
def __init__(self, number_string):
self.arr = [int(x) if x.isnumeric() else x for x in list(number_string)]
def process(self, split):
rhs = False
stack = queue.LifoQueue() # stack of bools, False=left side, True=right side
for i in range(len(self.arr)):
if self.arr[i]=="[":
stack.put(rhs)
rhs = False
elif self.arr[i]=="]":
stack.get()
elif self.arr[i]==",":
rhs = True
else:
if not split and rhs and stack.qsize()>4:
for j in range(i-3,0,-1): # don't need to check 0 as it must be a "["
if isinstance(self.arr[j], numbers.Number):
self.arr[j] += self.arr[i-2]
break
for j in range(i+1,len(self.arr)):
if isinstance(self.arr[j], numbers.Number):
self.arr[j] += self.arr[i]
break
self.arr = self.arr[:i-3]+[0]+self.arr[i+2:]
return True
elif split and self.arr[i]>9:
new_pair = ["[", math.floor(self.arr[i]/2.0), ",", math.ceil(self.arr[i]/2.0), "]"]
self.arr = self.arr[:i]+new_pair+self.arr[i+1:]
return True
rhs = True
return False
def reduce(self):
return (self.process(False) or self.process(True))
def add(self, number):
self.arr.insert(0,"[")
self.arr.append(",")
self.arr.extend(number.arr)
self.arr.append("]")
while self.reduce():
pass
def get_string(self):
to_string = ""
for char in self.arr:
to_string+=str(char) if isinstance(char, numbers.Number) else char
return to_string
def get_magnitude(self): # pop pop
while len(self.arr) > 1:
for i in range(len(self.arr)):
if isinstance(self.arr[i], numbers.Number) and i>1 and self.arr[i-1]=="," and isinstance(self.arr[i-2], numbers.Number):
self.arr = self.arr[:i-3]+[(3*self.arr[i-2])+(2*self.arr[i])]+self.arr[i+2:]
break
return self.arr[0]
with open(sys.argv[1]) as file:
lines = file.readlines()
base = None
for line in lines:
if base is None:
base = Number(line.rstrip())
else:
base.add(Number(line.rstrip()))
print("{} has magnitude {}".format(base.get_string(), base.get_magnitude()))
max_magnitude = 0
for i in range(len(lines)):
for j in range(len(lines)):
if i!=j:
trial = Number(lines[i].rstrip())
trial.add(Number(lines[j].rstrip()))
if trial.get_magnitude() > max_magnitude:
max_magnitude = trial.get_magnitude()
print("Max magnitude = {}".format(max_magnitude)) | puzzle/day18.py |
import sys
import queue
import numbers
import math
if len(sys.argv) != 2:
print("Help: {} <filename>".format(sys.argv[0]))
sys.exit(0)
class Number:
def __init__(self, number_string):
self.arr = [int(x) if x.isnumeric() else x for x in list(number_string)]
def process(self, split):
rhs = False
stack = queue.LifoQueue() # stack of bools, False=left side, True=right side
for i in range(len(self.arr)):
if self.arr[i]=="[":
stack.put(rhs)
rhs = False
elif self.arr[i]=="]":
stack.get()
elif self.arr[i]==",":
rhs = True
else:
if not split and rhs and stack.qsize()>4:
for j in range(i-3,0,-1): # don't need to check 0 as it must be a "["
if isinstance(self.arr[j], numbers.Number):
self.arr[j] += self.arr[i-2]
break
for j in range(i+1,len(self.arr)):
if isinstance(self.arr[j], numbers.Number):
self.arr[j] += self.arr[i]
break
self.arr = self.arr[:i-3]+[0]+self.arr[i+2:]
return True
elif split and self.arr[i]>9:
new_pair = ["[", math.floor(self.arr[i]/2.0), ",", math.ceil(self.arr[i]/2.0), "]"]
self.arr = self.arr[:i]+new_pair+self.arr[i+1:]
return True
rhs = True
return False
def reduce(self):
return (self.process(False) or self.process(True))
def add(self, number):
self.arr.insert(0,"[")
self.arr.append(",")
self.arr.extend(number.arr)
self.arr.append("]")
while self.reduce():
pass
def get_string(self):
to_string = ""
for char in self.arr:
to_string+=str(char) if isinstance(char, numbers.Number) else char
return to_string
def get_magnitude(self): # pop pop
while len(self.arr) > 1:
for i in range(len(self.arr)):
if isinstance(self.arr[i], numbers.Number) and i>1 and self.arr[i-1]=="," and isinstance(self.arr[i-2], numbers.Number):
self.arr = self.arr[:i-3]+[(3*self.arr[i-2])+(2*self.arr[i])]+self.arr[i+2:]
break
return self.arr[0]
with open(sys.argv[1]) as file:
lines = file.readlines()
base = None
for line in lines:
if base is None:
base = Number(line.rstrip())
else:
base.add(Number(line.rstrip()))
print("{} has magnitude {}".format(base.get_string(), base.get_magnitude()))
max_magnitude = 0
for i in range(len(lines)):
for j in range(len(lines)):
if i!=j:
trial = Number(lines[i].rstrip())
trial.add(Number(lines[j].rstrip()))
if trial.get_magnitude() > max_magnitude:
max_magnitude = trial.get_magnitude()
print("Max magnitude = {}".format(max_magnitude)) | 0.127925 | 0.125065 |
import os
import time
import json
import argparse
from deeprob.utils.data import DataStandardizer
from deeprob.spn.utils.statistics import compute_statistics
from deeprob.spn.structure.leaf import Bernoulli, Gaussian
from deeprob.spn.learning.wrappers import learn_estimator
from experiments.datasets import load_binary_dataset, load_continuous_dataset
from experiments.datasets import BINARY_DATASETS, CONTINUOUS_DATASETS
from experiments.utils import evaluate_log_likelihoods
if __name__ == '__main__':
# Parse the arguments
parser = argparse.ArgumentParser(
description="Vanilla Sum-Product Networks (SPNs) experiments"
)
parser.add_argument(
'dataset', choices=BINARY_DATASETS + CONTINUOUS_DATASETS, help="The dataset."
)
parser.add_argument(
'--learn-leaf', choices=['mle', 'isotonic', 'binary-clt'], default='mle',
help="The method for leaf learning."
)
parser.add_argument(
'--split-rows', choices=['kmeans', 'kmeans_mb', 'gmm', 'dbscan', 'wald', 'rdc', 'random'], default='gmm',
help="The splitting rows method."
)
parser.add_argument(
'--split-cols', choices=['gvs', 'rgvs', 'wrgvs', 'ebvs', 'ebvs_ae', 'gbvs', 'gbvs_ag', 'rdc', 'random'],
default='gvs', help="The splitting columns method."
)
parser.add_argument(
'--min-rows-slice', type=int, default=256, help="The minimum number of rows for splitting."
)
parser.add_argument(
'--min-cols-slice', type=int, default=2, help="The minimum number of columns for splitting."
)
parser.add_argument(
'--n-clusters', type=int, default=2, help="The number of clusters for rows splitting."
)
parser.add_argument(
'--gtest-threshold', type=float, default=5.0, help="The threshold for the G-Test independence test."
)
parser.add_argument(
'--rdc-threshold', type=float, default=0.3, help="The threshold for the RDC independence test."
)
parser.add_argument(
'--ebvs-threshold', type=float, default=0.3, help='The threshold for the Entropy/Gini column splitting'
)
parser.add_argument(
'--smoothing', type=float, default=0.1, help="The Laplace smoothing value."
)
parser.add_argument(
'--seed', type=int, default=42, help="The seed value to use."
)
parser.add_argument(
'--no-verbose', dest='verbose', action='store_false', help="Whether to disable verbose mode."
)
args = parser.parse_args()
# Load the dataset
if args.dataset in BINARY_DATASETS:
data_train, data_valid, data_test = load_binary_dataset(
'datasets', args.dataset, raw=True
)
else:
transform = DataStandardizer()
data_train, data_valid, data_test = load_continuous_dataset(
'datasets', args.dataset, raw=True, random_state=args.seed
)
transform.fit(data_train)
data_train = transform.forward(data_train)
data_valid = transform.forward(data_valid)
data_test = transform.forward(data_test)
_, n_features = data_train.shape
# Set the data distributions and domains at leaves
if args.dataset in BINARY_DATASETS:
distributions = [Bernoulli] * n_features
domains = [[0, 1]] * n_features
else:
distributions = [Gaussian] * n_features
domains = None # Automatically detect domains for continuous data
# Create the results directory
identifier = time.strftime("%Y%m%d-%H%M%S")
directory = os.path.join('spn', args.dataset, identifier)
os.makedirs(directory, exist_ok=True)
results_filepath = os.path.join(directory, 'results.json')
# Set the learn leaf method parameters
learn_leaf_kwargs = dict()
if args.learn_leaf in ['mle', 'isotonic', 'cltree']:
learn_leaf_kwargs['alpha'] = args.smoothing
# Set the split rows method parameters
split_rows_kwargs = dict()
if args.split_rows in ['kmeans', 'gmm', 'wald', 'kmeans_mb']:
split_rows_kwargs['n'] = args.n_clusters
# Set the split columns method parameters
split_cols_kwargs = dict()
if args.split_cols in ['gvs', 'rgvs', 'wrgvs']:
split_cols_kwargs['p'] = args.gtest_threshold
elif args.split_cols == 'rdc':
split_cols_kwargs['d'] = args.rdc_threshold
elif args.split_cols in ['ebvs', 'gbvs']:
split_cols_kwargs['alpha'] = args.smoothing
split_cols_kwargs['e'] = args.ebvs_threshold
elif args.split_cols in ['ebvs_ae', 'gbvs_ag']:
split_cols_kwargs['alpha'] = args.smoothing
split_cols_kwargs['e'] = args.ebvs_threshold
split_cols_kwargs['size'] = len(data_train)
# Learn a SPN density estimator
start_time = time.perf_counter()
spn = learn_estimator(
data=data_train,
distributions=distributions,
domains=domains,
learn_leaf=args.learn_leaf,
split_rows=args.split_rows,
split_cols=args.split_cols,
min_rows_slice=args.min_rows_slice,
min_cols_slice=args.min_cols_slice,
learn_leaf_kwargs=learn_leaf_kwargs,
split_rows_kwargs=split_rows_kwargs,
split_cols_kwargs=split_cols_kwargs,
random_state=args.seed,
verbose=args.verbose
)
learning_time = time.perf_counter() - start_time
# Compute the log-likelihoods for the validation and test datasets
valid_mean_ll, valid_stddev_ll = evaluate_log_likelihoods(spn, data_valid)
test_mean_ll, test_stddev_ll = evaluate_log_likelihoods(spn, data_test)
# Save the results
results = {
'log_likelihood': {
'valid': {'mean': valid_mean_ll, 'stddev': valid_stddev_ll},
'test': {'mean': test_mean_ll, 'stddev': test_stddev_ll}
},
'learning_time': learning_time,
'statistics': compute_statistics(spn),
'settings': args.__dict__
}
with open(results_filepath, 'w') as f:
json.dump(results, f, indent=4) | experiments/spn.py | import os
import time
import json
import argparse
from deeprob.utils.data import DataStandardizer
from deeprob.spn.utils.statistics import compute_statistics
from deeprob.spn.structure.leaf import Bernoulli, Gaussian
from deeprob.spn.learning.wrappers import learn_estimator
from experiments.datasets import load_binary_dataset, load_continuous_dataset
from experiments.datasets import BINARY_DATASETS, CONTINUOUS_DATASETS
from experiments.utils import evaluate_log_likelihoods
if __name__ == '__main__':
# Parse the arguments
parser = argparse.ArgumentParser(
description="Vanilla Sum-Product Networks (SPNs) experiments"
)
parser.add_argument(
'dataset', choices=BINARY_DATASETS + CONTINUOUS_DATASETS, help="The dataset."
)
parser.add_argument(
'--learn-leaf', choices=['mle', 'isotonic', 'binary-clt'], default='mle',
help="The method for leaf learning."
)
parser.add_argument(
'--split-rows', choices=['kmeans', 'kmeans_mb', 'gmm', 'dbscan', 'wald', 'rdc', 'random'], default='gmm',
help="The splitting rows method."
)
parser.add_argument(
'--split-cols', choices=['gvs', 'rgvs', 'wrgvs', 'ebvs', 'ebvs_ae', 'gbvs', 'gbvs_ag', 'rdc', 'random'],
default='gvs', help="The splitting columns method."
)
parser.add_argument(
'--min-rows-slice', type=int, default=256, help="The minimum number of rows for splitting."
)
parser.add_argument(
'--min-cols-slice', type=int, default=2, help="The minimum number of columns for splitting."
)
parser.add_argument(
'--n-clusters', type=int, default=2, help="The number of clusters for rows splitting."
)
parser.add_argument(
'--gtest-threshold', type=float, default=5.0, help="The threshold for the G-Test independence test."
)
parser.add_argument(
'--rdc-threshold', type=float, default=0.3, help="The threshold for the RDC independence test."
)
parser.add_argument(
'--ebvs-threshold', type=float, default=0.3, help='The threshold for the Entropy/Gini column splitting'
)
parser.add_argument(
'--smoothing', type=float, default=0.1, help="The Laplace smoothing value."
)
parser.add_argument(
'--seed', type=int, default=42, help="The seed value to use."
)
parser.add_argument(
'--no-verbose', dest='verbose', action='store_false', help="Whether to disable verbose mode."
)
args = parser.parse_args()
# Load the dataset
if args.dataset in BINARY_DATASETS:
data_train, data_valid, data_test = load_binary_dataset(
'datasets', args.dataset, raw=True
)
else:
transform = DataStandardizer()
data_train, data_valid, data_test = load_continuous_dataset(
'datasets', args.dataset, raw=True, random_state=args.seed
)
transform.fit(data_train)
data_train = transform.forward(data_train)
data_valid = transform.forward(data_valid)
data_test = transform.forward(data_test)
_, n_features = data_train.shape
# Set the data distributions and domains at leaves
if args.dataset in BINARY_DATASETS:
distributions = [Bernoulli] * n_features
domains = [[0, 1]] * n_features
else:
distributions = [Gaussian] * n_features
domains = None # Automatically detect domains for continuous data
# Create the results directory
identifier = time.strftime("%Y%m%d-%H%M%S")
directory = os.path.join('spn', args.dataset, identifier)
os.makedirs(directory, exist_ok=True)
results_filepath = os.path.join(directory, 'results.json')
# Set the learn leaf method parameters
learn_leaf_kwargs = dict()
if args.learn_leaf in ['mle', 'isotonic', 'cltree']:
learn_leaf_kwargs['alpha'] = args.smoothing
# Set the split rows method parameters
split_rows_kwargs = dict()
if args.split_rows in ['kmeans', 'gmm', 'wald', 'kmeans_mb']:
split_rows_kwargs['n'] = args.n_clusters
# Set the split columns method parameters
split_cols_kwargs = dict()
if args.split_cols in ['gvs', 'rgvs', 'wrgvs']:
split_cols_kwargs['p'] = args.gtest_threshold
elif args.split_cols == 'rdc':
split_cols_kwargs['d'] = args.rdc_threshold
elif args.split_cols in ['ebvs', 'gbvs']:
split_cols_kwargs['alpha'] = args.smoothing
split_cols_kwargs['e'] = args.ebvs_threshold
elif args.split_cols in ['ebvs_ae', 'gbvs_ag']:
split_cols_kwargs['alpha'] = args.smoothing
split_cols_kwargs['e'] = args.ebvs_threshold
split_cols_kwargs['size'] = len(data_train)
# Learn a SPN density estimator
start_time = time.perf_counter()
spn = learn_estimator(
data=data_train,
distributions=distributions,
domains=domains,
learn_leaf=args.learn_leaf,
split_rows=args.split_rows,
split_cols=args.split_cols,
min_rows_slice=args.min_rows_slice,
min_cols_slice=args.min_cols_slice,
learn_leaf_kwargs=learn_leaf_kwargs,
split_rows_kwargs=split_rows_kwargs,
split_cols_kwargs=split_cols_kwargs,
random_state=args.seed,
verbose=args.verbose
)
learning_time = time.perf_counter() - start_time
# Compute the log-likelihoods for the validation and test datasets
valid_mean_ll, valid_stddev_ll = evaluate_log_likelihoods(spn, data_valid)
test_mean_ll, test_stddev_ll = evaluate_log_likelihoods(spn, data_test)
# Save the results
results = {
'log_likelihood': {
'valid': {'mean': valid_mean_ll, 'stddev': valid_stddev_ll},
'test': {'mean': test_mean_ll, 'stddev': test_stddev_ll}
},
'learning_time': learning_time,
'statistics': compute_statistics(spn),
'settings': args.__dict__
}
with open(results_filepath, 'w') as f:
json.dump(results, f, indent=4) | 0.750918 | 0.26514 |
import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>#@(*%7!*9#q%pgyedotv%lp@9nfbj'
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'rogue.iplantcollaborative.org', 'data.cyverse.org', '*']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apps.file_data',
'apps.importer'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'main.sqlite3'),
},
'file_data': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'file_data.sqlite3'),
}
}
DATABASE_ROUTERS = ['apps.file_data.routers.FileDataRouter']
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CSRF_COOKIE_NAME = "csrftoken"
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR)
FIXTURE_DIRS = [
os.path.join(BASE_DIR, 'fixtures')
] | django/settings.py | import os
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '<KEY>#@(*%7!*9#q%pgyedotv%lp@9nfbj'
ALLOWED_HOSTS = ['localhost', '127.0.0.1', 'rogue.iplantcollaborative.org', 'data.cyverse.org', '*']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'apps.file_data',
'apps.importer'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates')
],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'main.sqlite3'),
},
'file_data': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'file_data.sqlite3'),
}
}
DATABASE_ROUTERS = ['apps.file_data.routers.FileDataRouter']
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
CSRF_COOKIE_NAME = "csrftoken"
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = False
STATIC_URL = '/static/'
STATICFILES_DIRS = [
os.path.join(BASE_DIR, 'static')
]
MEDIA_ROOT = os.path.join(BASE_DIR)
FIXTURE_DIRS = [
os.path.join(BASE_DIR, 'fixtures')
] | 0.196518 | 0.097176 |
import torch.nn.functional as F
import geometry
import os
import numpy as np
import torch
import collections
def parse_intrinsics_hdf5(raw_data, trgt_sidelength=None, invert_y=False):
s = raw_data[...].tostring()
s = s.decode('utf-8')
lines = s.split('\n')
f, cx, cy, _ = map(float, lines[0].split())
grid_barycenter = torch.Tensor(list(map(float, lines[1].split())))
height, width = map(float, lines[3].split())
try:
world2cam_poses = int(lines[4])
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx/width * trgt_sidelength
cy = cy/height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, world2cam_poses
def light_field_point_cloud(light_field_fn, num_samples=64**2, outlier_rejection=True):
dirs = torch.normal(torch.zeros(1, num_samples, 3), torch.ones(1, num_samples, 3)).cuda()
dirs = F.normalize(dirs, dim=-1)
x = (torch.rand_like(dirs) - 0.5) * 2
D = 1
x_prim = x + D * dirs
st = torch.zeros(1, num_samples, 2).requires_grad_(True).cuda()
max_norm_dcdst = torch.ones_like(st) * 0
dcdsts = []
for i in range(5):
d_prim = torch.normal(torch.zeros(1, num_samples, 3), torch.ones(1, num_samples, 3)).cuda()
d_prim = F.normalize(d_prim, dim=-1)
a = x + st[..., :1] * d_prim
b = x_prim + st[..., 1:] * d_prim
v_dir = b - a
v_mom = torch.cross(a, b, dim=-1)
v_norm = torch.cat((v_dir, v_mom), dim=-1) / v_dir.norm(dim=-1, keepdim=True)
with torch.enable_grad():
c = light_field_fn(v_norm)
dcdst = gradient(c, st)
dcdsts.append(dcdst)
criterion = max_norm_dcdst.norm(dim=-1, keepdim=True)<dcdst.norm(dim=-1, keepdim=True)
max_norm_dcdst = torch.where(criterion, dcdst, max_norm_dcdst)
dcdsts = torch.stack(dcdsts, dim=0)
dcdt = dcdsts[..., 1:]
dcds = dcdsts[..., :1]
d = D * dcdt / (dcds + dcdt)
mask = d.std(dim=0) > 1e-2
d = d.mean(0)
d[mask] = 0.
d[max_norm_dcdst.norm(dim=-1)<1] = 0.
return {'depth':d, 'points':x + d * dirs, 'colors':c}
def gradient(y, x, grad_outputs=None, create_graph=True):
if grad_outputs is None:
grad_outputs = torch.ones_like(y)
grad = torch.autograd.grad(y, [x], grad_outputs=grad_outputs, create_graph=create_graph)[0]
return grad
def parse_comma_separated_integers(string):
return list(map(int, string.split(',')))
def convert_image(img, type):
'''Expects single batch dimesion'''
img = img.squeeze(0)
if not 'normal' in type:
img = detach_all(lin2img(img, mode='np'))
if 'rgb' in type or 'normal' in type:
img += 1.
img /= 2.
elif type == 'depth':
img = (img - np.amin(img)) / (np.amax(img) - np.amin(img))
img *= 255.
img = np.clip(img, 0., 255.).astype(np.uint8)
return img
def flatten_first_two(tensor):
b, s, *rest = tensor.shape
return tensor.view(b * s, *rest)
def parse_intrinsics(filepath, trgt_sidelength=None, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
grid_barycenter = torch.Tensor(list(map(float, file.readline().split())))
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, world2cam_poses
def num_divisible_by_2(number):
i = 0
while not number % 2:
number = number // 2
i += 1
return i
def cond_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def normalize(img):
return (img - img.min()) / (img.max() - img.min())
def print_network(net):
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("%d" % params)
def add_batch_dim_to_dict(ob):
if isinstance(ob, collections.Mapping):
return {k: add_batch_dim_to_dict(v) for k, v in ob.items()}
elif isinstance(ob, tuple):
return tuple(add_batch_dim_to_dict(k) for k in ob)
elif isinstance(ob, list):
return [add_batch_dim_to_dict(k) for k in ob]
else:
try:
return ob[None, ...]
except:
return ob
def detach_all(tensor):
return tensor.detach().cpu().numpy()
def lin2img(tensor, image_resolution=None, mode='torch'):
if len(tensor.shape) == 3:
batch_size, num_samples, channels = tensor.shape
elif len(tensor.shape) == 2:
num_samples, channels = tensor.shape
if image_resolution is None:
width = np.sqrt(num_samples).astype(int)
height = width
else:
height = image_resolution[0]
width = image_resolution[1]
if len(tensor.shape) == 3:
if mode == 'torch':
tensor = tensor.permute(0, 2, 1).view(batch_size, channels, height, width)
elif mode == 'np':
tensor = tensor.view(batch_size, height, width, channels)
elif len(tensor.shape) == 2:
if mode == 'torch':
tensor = tensor.permute(1, 0).view(channels, height, width)
elif mode == 'np':
tensor = tensor.view(height, width, channels)
return tensor
def light_field_depth_map(plucker_coords, cam2world, light_field_fn):
x = geometry.get_ray_origin(cam2world)
D = 1
x_prim = x + D * plucker_coords[..., :3]
d_prim = torch.normal(torch.zeros_like(plucker_coords[..., :3]), torch.ones_like(plucker_coords[..., :3])).to(
plucker_coords.device)
d_prim = F.normalize(d_prim, dim=-1)
dcdsts = []
for i in range(5):
st = ((torch.rand_like(plucker_coords[..., :2]) - 0.5) * 1e-2).requires_grad_(True).to(plucker_coords.device)
a = x + st[..., :1] * d_prim
b = x_prim + st[..., 1:] * d_prim
v_dir = b - a
v_mom = torch.cross(a, b, dim=-1)
v_norm = torch.cat((v_dir, v_mom), dim=-1) / v_dir.norm(dim=-1, keepdim=True)
with torch.enable_grad():
c = light_field_fn(v_norm)
dcdst = gradient(c, st, create_graph=False)
dcdsts.append(dcdst)
del dcdst
del c
dcdsts = torch.stack(dcdsts, dim=0)
dcdt = dcdsts[0, ..., 1:]
dcds = dcdsts[0, ..., :1]
all_depth_estimates = D * dcdsts[..., 1:] / (dcdsts.sum(dim=-1, keepdim=True))
all_depth_estimates[torch.abs(dcdsts.sum(dim=-1)) < 5] = 0
all_depth_estimates[all_depth_estimates<0] = 0.
dcdsts_var = torch.std(dcdsts.norm(dim=-1, keepdim=True), dim=0, keepdim=True)
depth_var = torch.std(all_depth_estimates, dim=0, keepdim=True)
d = D * dcdt / (dcds + dcdt)
d[torch.abs(dcds + dcdt) < 5] = 0.
d[d<0] = 0.
d[depth_var[0, ..., 0] > 0.01] = 0.
return {'depth':d, 'points':x + d * plucker_coords[..., :3]}
def pick(list, item_idcs):
if not list:
return list
return [list[i] for i in item_idcs]
def get_mgrid(sidelen, dim=2, flatten=False):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.'''
if isinstance(sidelen, int):
sidelen = dim * (sidelen,)
if dim == 2:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[0, :, :, 0] = pixel_coords[0, :, :, 0] / (sidelen[0] - 1)
pixel_coords[0, :, :, 1] = pixel_coords[0, :, :, 1] / (sidelen[1] - 1)
elif dim == 3:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1], :sidelen[2]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[..., 0] = pixel_coords[..., 0] / max(sidelen[0] - 1, 1)
pixel_coords[..., 1] = pixel_coords[..., 1] / (sidelen[1] - 1)
pixel_coords[..., 2] = pixel_coords[..., 2] / (sidelen[2] - 1)
else:
raise NotImplementedError('Not implemented for dim=%d' % dim)
pixel_coords -= 0.5
pixel_coords *= 2.
pixel_coords = torch.from_numpy(pixel_coords)
if flatten:
pixel_coords = pixel_coords.view(-1, dim)
return pixel_coords
def dict_to_gpu(ob):
if isinstance(ob, collections.Mapping):
return {k: dict_to_gpu(v) for k, v in ob.items()}
elif isinstance(ob, tuple):
return tuple(dict_to_gpu(k) for k in ob)
elif isinstance(ob, list):
return [dict_to_gpu(k) for k in ob]
else:
try:
return ob.cuda()
except:
return ob
def assemble_model_input(context, query, gpu=True):
context['mask'] = torch.Tensor([1.])
query['mask'] = torch.Tensor([1.])
context = add_batch_dim_to_dict(context)
context = add_batch_dim_to_dict(context)
query = add_batch_dim_to_dict(query)
query = add_batch_dim_to_dict(query)
model_input = {'context': context, 'query': query, 'post_input': query}
if gpu:
model_input = dict_to_gpu(model_input)
return model_input | util.py | import torch.nn.functional as F
import geometry
import os
import numpy as np
import torch
import collections
def parse_intrinsics_hdf5(raw_data, trgt_sidelength=None, invert_y=False):
s = raw_data[...].tostring()
s = s.decode('utf-8')
lines = s.split('\n')
f, cx, cy, _ = map(float, lines[0].split())
grid_barycenter = torch.Tensor(list(map(float, lines[1].split())))
height, width = map(float, lines[3].split())
try:
world2cam_poses = int(lines[4])
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx/width * trgt_sidelength
cy = cy/height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, world2cam_poses
def light_field_point_cloud(light_field_fn, num_samples=64**2, outlier_rejection=True):
dirs = torch.normal(torch.zeros(1, num_samples, 3), torch.ones(1, num_samples, 3)).cuda()
dirs = F.normalize(dirs, dim=-1)
x = (torch.rand_like(dirs) - 0.5) * 2
D = 1
x_prim = x + D * dirs
st = torch.zeros(1, num_samples, 2).requires_grad_(True).cuda()
max_norm_dcdst = torch.ones_like(st) * 0
dcdsts = []
for i in range(5):
d_prim = torch.normal(torch.zeros(1, num_samples, 3), torch.ones(1, num_samples, 3)).cuda()
d_prim = F.normalize(d_prim, dim=-1)
a = x + st[..., :1] * d_prim
b = x_prim + st[..., 1:] * d_prim
v_dir = b - a
v_mom = torch.cross(a, b, dim=-1)
v_norm = torch.cat((v_dir, v_mom), dim=-1) / v_dir.norm(dim=-1, keepdim=True)
with torch.enable_grad():
c = light_field_fn(v_norm)
dcdst = gradient(c, st)
dcdsts.append(dcdst)
criterion = max_norm_dcdst.norm(dim=-1, keepdim=True)<dcdst.norm(dim=-1, keepdim=True)
max_norm_dcdst = torch.where(criterion, dcdst, max_norm_dcdst)
dcdsts = torch.stack(dcdsts, dim=0)
dcdt = dcdsts[..., 1:]
dcds = dcdsts[..., :1]
d = D * dcdt / (dcds + dcdt)
mask = d.std(dim=0) > 1e-2
d = d.mean(0)
d[mask] = 0.
d[max_norm_dcdst.norm(dim=-1)<1] = 0.
return {'depth':d, 'points':x + d * dirs, 'colors':c}
def gradient(y, x, grad_outputs=None, create_graph=True):
if grad_outputs is None:
grad_outputs = torch.ones_like(y)
grad = torch.autograd.grad(y, [x], grad_outputs=grad_outputs, create_graph=create_graph)[0]
return grad
def parse_comma_separated_integers(string):
return list(map(int, string.split(',')))
def convert_image(img, type):
'''Expects single batch dimesion'''
img = img.squeeze(0)
if not 'normal' in type:
img = detach_all(lin2img(img, mode='np'))
if 'rgb' in type or 'normal' in type:
img += 1.
img /= 2.
elif type == 'depth':
img = (img - np.amin(img)) / (np.amax(img) - np.amin(img))
img *= 255.
img = np.clip(img, 0., 255.).astype(np.uint8)
return img
def flatten_first_two(tensor):
b, s, *rest = tensor.shape
return tensor.view(b * s, *rest)
def parse_intrinsics(filepath, trgt_sidelength=None, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
grid_barycenter = torch.Tensor(list(map(float, file.readline().split())))
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
if trgt_sidelength is not None:
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, world2cam_poses
def num_divisible_by_2(number):
i = 0
while not number % 2:
number = number // 2
i += 1
return i
def cond_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def normalize(img):
return (img - img.min()) / (img.max() - img.min())
def print_network(net):
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("%d" % params)
def add_batch_dim_to_dict(ob):
if isinstance(ob, collections.Mapping):
return {k: add_batch_dim_to_dict(v) for k, v in ob.items()}
elif isinstance(ob, tuple):
return tuple(add_batch_dim_to_dict(k) for k in ob)
elif isinstance(ob, list):
return [add_batch_dim_to_dict(k) for k in ob]
else:
try:
return ob[None, ...]
except:
return ob
def detach_all(tensor):
return tensor.detach().cpu().numpy()
def lin2img(tensor, image_resolution=None, mode='torch'):
if len(tensor.shape) == 3:
batch_size, num_samples, channels = tensor.shape
elif len(tensor.shape) == 2:
num_samples, channels = tensor.shape
if image_resolution is None:
width = np.sqrt(num_samples).astype(int)
height = width
else:
height = image_resolution[0]
width = image_resolution[1]
if len(tensor.shape) == 3:
if mode == 'torch':
tensor = tensor.permute(0, 2, 1).view(batch_size, channels, height, width)
elif mode == 'np':
tensor = tensor.view(batch_size, height, width, channels)
elif len(tensor.shape) == 2:
if mode == 'torch':
tensor = tensor.permute(1, 0).view(channels, height, width)
elif mode == 'np':
tensor = tensor.view(height, width, channels)
return tensor
def light_field_depth_map(plucker_coords, cam2world, light_field_fn):
x = geometry.get_ray_origin(cam2world)
D = 1
x_prim = x + D * plucker_coords[..., :3]
d_prim = torch.normal(torch.zeros_like(plucker_coords[..., :3]), torch.ones_like(plucker_coords[..., :3])).to(
plucker_coords.device)
d_prim = F.normalize(d_prim, dim=-1)
dcdsts = []
for i in range(5):
st = ((torch.rand_like(plucker_coords[..., :2]) - 0.5) * 1e-2).requires_grad_(True).to(plucker_coords.device)
a = x + st[..., :1] * d_prim
b = x_prim + st[..., 1:] * d_prim
v_dir = b - a
v_mom = torch.cross(a, b, dim=-1)
v_norm = torch.cat((v_dir, v_mom), dim=-1) / v_dir.norm(dim=-1, keepdim=True)
with torch.enable_grad():
c = light_field_fn(v_norm)
dcdst = gradient(c, st, create_graph=False)
dcdsts.append(dcdst)
del dcdst
del c
dcdsts = torch.stack(dcdsts, dim=0)
dcdt = dcdsts[0, ..., 1:]
dcds = dcdsts[0, ..., :1]
all_depth_estimates = D * dcdsts[..., 1:] / (dcdsts.sum(dim=-1, keepdim=True))
all_depth_estimates[torch.abs(dcdsts.sum(dim=-1)) < 5] = 0
all_depth_estimates[all_depth_estimates<0] = 0.
dcdsts_var = torch.std(dcdsts.norm(dim=-1, keepdim=True), dim=0, keepdim=True)
depth_var = torch.std(all_depth_estimates, dim=0, keepdim=True)
d = D * dcdt / (dcds + dcdt)
d[torch.abs(dcds + dcdt) < 5] = 0.
d[d<0] = 0.
d[depth_var[0, ..., 0] > 0.01] = 0.
return {'depth':d, 'points':x + d * plucker_coords[..., :3]}
def pick(list, item_idcs):
if not list:
return list
return [list[i] for i in item_idcs]
def get_mgrid(sidelen, dim=2, flatten=False):
'''Generates a flattened grid of (x,y,...) coordinates in a range of -1 to 1.'''
if isinstance(sidelen, int):
sidelen = dim * (sidelen,)
if dim == 2:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[0, :, :, 0] = pixel_coords[0, :, :, 0] / (sidelen[0] - 1)
pixel_coords[0, :, :, 1] = pixel_coords[0, :, :, 1] / (sidelen[1] - 1)
elif dim == 3:
pixel_coords = np.stack(np.mgrid[:sidelen[0], :sidelen[1], :sidelen[2]], axis=-1)[None, ...].astype(np.float32)
pixel_coords[..., 0] = pixel_coords[..., 0] / max(sidelen[0] - 1, 1)
pixel_coords[..., 1] = pixel_coords[..., 1] / (sidelen[1] - 1)
pixel_coords[..., 2] = pixel_coords[..., 2] / (sidelen[2] - 1)
else:
raise NotImplementedError('Not implemented for dim=%d' % dim)
pixel_coords -= 0.5
pixel_coords *= 2.
pixel_coords = torch.from_numpy(pixel_coords)
if flatten:
pixel_coords = pixel_coords.view(-1, dim)
return pixel_coords
def dict_to_gpu(ob):
if isinstance(ob, collections.Mapping):
return {k: dict_to_gpu(v) for k, v in ob.items()}
elif isinstance(ob, tuple):
return tuple(dict_to_gpu(k) for k in ob)
elif isinstance(ob, list):
return [dict_to_gpu(k) for k in ob]
else:
try:
return ob.cuda()
except:
return ob
def assemble_model_input(context, query, gpu=True):
context['mask'] = torch.Tensor([1.])
query['mask'] = torch.Tensor([1.])
context = add_batch_dim_to_dict(context)
context = add_batch_dim_to_dict(context)
query = add_batch_dim_to_dict(query)
query = add_batch_dim_to_dict(query)
model_input = {'context': context, 'query': query, 'post_input': query}
if gpu:
model_input = dict_to_gpu(model_input)
return model_input | 0.557845 | 0.511595 |
from unittest.mock import Mock
import pytest
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.utils.state import State
from airflow.utils.timezone import convert_to_utc, datetime
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
class TestPrevDagrunDep:
def teardown_method(self):
clear_db_runs()
def test_first_task_run_of_new_task(self):
"""
The first task run of a new task in an old DAG should pass if the task has
ignore_first_depends_on_past set to True.
"""
dag = DAG('test_dag')
old_task = BaseOperator(
task_id='test_task',
dag=dag,
depends_on_past=True,
start_date=convert_to_utc(datetime(2016, 1, 1)),
wait_for_downstream=False,
)
# Old DAG run will include only TaskInstance of old_task
dag.create_dagrun(
run_id='old_run',
state=State.SUCCESS,
execution_date=old_task.start_date,
run_type=DagRunType.SCHEDULED,
)
new_task = BaseOperator(
task_id='new_task',
dag=dag,
depends_on_past=True,
ignore_first_depends_on_past=True,
start_date=old_task.start_date,
)
# New DAG run will include 1st TaskInstance of new_task
dr = dag.create_dagrun(
run_id='new_run',
state=State.RUNNING,
execution_date=convert_to_utc(datetime(2016, 1, 2)),
run_type=DagRunType.SCHEDULED,
)
ti = dr.get_task_instance(new_task.task_id)
ti.task = new_task
# this is important, we need to assert there is no previous_ti of this ti
assert ti.previous_ti is None
dep_context = DepContext(ignore_depends_on_past=False)
assert PrevDagrunDep().is_met(ti=ti, dep_context=dep_context)
@pytest.mark.parametrize(
"depends_on_past, wait_for_downstream, prev_ti, context_ignore_depends_on_past, dep_met",
[
# If the task does not set depends_on_past, the previous dagrun should
# be ignored, even though previous_ti would otherwise fail the dep.
pytest.param(
False,
False, # wait_for_downstream=True overrides depends_on_past=False.
Mock(
state=State.NONE,
**{"are_dependents_done.return_value": False},
),
False,
True,
id="not_depends_on_past",
),
# If the context overrides depends_on_past, the dep should be met even
# though there is no previous_ti which would normally fail the dep.
pytest.param(
True,
False,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": True},
),
True,
True,
id="context_ignore_depends_on_past",
),
# The first task run should pass since it has no previous dagrun.
pytest.param(True, False, None, False, True, id="first_task_run"),
# Previous TI did not complete execution. This dep should fail.
pytest.param(
True,
False,
Mock(
state=State.NONE,
**{"are_dependents_done.return_value": True},
),
False,
False,
id="prev_ti_bad_state",
),
# Previous TI specified to wait for the downstream tasks of the previous
# dagrun. It should fail this dep if the previous TI's downstream TIs
# are not done.
pytest.param(
True,
True,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": False},
),
False,
False,
id="failed_wait_for_downstream",
),
# All the conditions for the dep are met.
pytest.param(
True,
True,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": True},
),
False,
True,
id="all_met",
),
],
)
def test_dagrun_dep(
depends_on_past,
wait_for_downstream,
prev_ti,
context_ignore_depends_on_past,
dep_met,
):
task = BaseOperator(
task_id="test_task",
dag=DAG("test_dag"),
depends_on_past=depends_on_past,
start_date=datetime(2016, 1, 1),
wait_for_downstream=wait_for_downstream,
)
if prev_ti:
prev_dagrun = Mock(
execution_date=datetime(2016, 1, 2),
**{"get_task_instance.return_value": prev_ti},
)
else:
prev_dagrun = None
dagrun = Mock(
**{
"get_previous_scheduled_dagrun.return_value": prev_dagrun,
"get_previous_dagrun.return_value": prev_dagrun,
},
)
ti = Mock(task=task, **{"get_dagrun.return_value": dagrun})
dep_context = DepContext(ignore_depends_on_past=context_ignore_depends_on_past)
assert PrevDagrunDep().is_met(ti=ti, dep_context=dep_context) == dep_met | tests/ti_deps/deps/test_prev_dagrun_dep.py |
from unittest.mock import Mock
import pytest
from airflow.models import DAG
from airflow.models.baseoperator import BaseOperator
from airflow.ti_deps.dep_context import DepContext
from airflow.ti_deps.deps.prev_dagrun_dep import PrevDagrunDep
from airflow.utils.state import State
from airflow.utils.timezone import convert_to_utc, datetime
from airflow.utils.types import DagRunType
from tests.test_utils.db import clear_db_runs
class TestPrevDagrunDep:
def teardown_method(self):
clear_db_runs()
def test_first_task_run_of_new_task(self):
"""
The first task run of a new task in an old DAG should pass if the task has
ignore_first_depends_on_past set to True.
"""
dag = DAG('test_dag')
old_task = BaseOperator(
task_id='test_task',
dag=dag,
depends_on_past=True,
start_date=convert_to_utc(datetime(2016, 1, 1)),
wait_for_downstream=False,
)
# Old DAG run will include only TaskInstance of old_task
dag.create_dagrun(
run_id='old_run',
state=State.SUCCESS,
execution_date=old_task.start_date,
run_type=DagRunType.SCHEDULED,
)
new_task = BaseOperator(
task_id='new_task',
dag=dag,
depends_on_past=True,
ignore_first_depends_on_past=True,
start_date=old_task.start_date,
)
# New DAG run will include 1st TaskInstance of new_task
dr = dag.create_dagrun(
run_id='new_run',
state=State.RUNNING,
execution_date=convert_to_utc(datetime(2016, 1, 2)),
run_type=DagRunType.SCHEDULED,
)
ti = dr.get_task_instance(new_task.task_id)
ti.task = new_task
# this is important, we need to assert there is no previous_ti of this ti
assert ti.previous_ti is None
dep_context = DepContext(ignore_depends_on_past=False)
assert PrevDagrunDep().is_met(ti=ti, dep_context=dep_context)
@pytest.mark.parametrize(
"depends_on_past, wait_for_downstream, prev_ti, context_ignore_depends_on_past, dep_met",
[
# If the task does not set depends_on_past, the previous dagrun should
# be ignored, even though previous_ti would otherwise fail the dep.
pytest.param(
False,
False, # wait_for_downstream=True overrides depends_on_past=False.
Mock(
state=State.NONE,
**{"are_dependents_done.return_value": False},
),
False,
True,
id="not_depends_on_past",
),
# If the context overrides depends_on_past, the dep should be met even
# though there is no previous_ti which would normally fail the dep.
pytest.param(
True,
False,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": True},
),
True,
True,
id="context_ignore_depends_on_past",
),
# The first task run should pass since it has no previous dagrun.
pytest.param(True, False, None, False, True, id="first_task_run"),
# Previous TI did not complete execution. This dep should fail.
pytest.param(
True,
False,
Mock(
state=State.NONE,
**{"are_dependents_done.return_value": True},
),
False,
False,
id="prev_ti_bad_state",
),
# Previous TI specified to wait for the downstream tasks of the previous
# dagrun. It should fail this dep if the previous TI's downstream TIs
# are not done.
pytest.param(
True,
True,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": False},
),
False,
False,
id="failed_wait_for_downstream",
),
# All the conditions for the dep are met.
pytest.param(
True,
True,
Mock(
state=State.SUCCESS,
**{"are_dependents_done.return_value": True},
),
False,
True,
id="all_met",
),
],
)
def test_dagrun_dep(
depends_on_past,
wait_for_downstream,
prev_ti,
context_ignore_depends_on_past,
dep_met,
):
task = BaseOperator(
task_id="test_task",
dag=DAG("test_dag"),
depends_on_past=depends_on_past,
start_date=datetime(2016, 1, 1),
wait_for_downstream=wait_for_downstream,
)
if prev_ti:
prev_dagrun = Mock(
execution_date=datetime(2016, 1, 2),
**{"get_task_instance.return_value": prev_ti},
)
else:
prev_dagrun = None
dagrun = Mock(
**{
"get_previous_scheduled_dagrun.return_value": prev_dagrun,
"get_previous_dagrun.return_value": prev_dagrun,
},
)
ti = Mock(task=task, **{"get_dagrun.return_value": dagrun})
dep_context = DepContext(ignore_depends_on_past=context_ignore_depends_on_past)
assert PrevDagrunDep().is_met(ti=ti, dep_context=dep_context) == dep_met | 0.618089 | 0.462352 |
from server.custom_exceptions.input_missing import InputMissing
from server.custom_exceptions.input_not_int import InputNotInteger
from server.custom_exceptions.paper_trade_id_missing import PaperTradeIdMissing
from server.custom_exceptions.paper_trade_id_not_int import PaperTradeIdNotInt
from server.custom_exceptions.sell_price_missing import SellPriceMissing
from server.custom_exceptions.sell_price_negative import SellPriceNegative
from server.custom_exceptions.sell_price_not_float import SellPriceNotFloat
from server.custom_exceptions.user_id_must_be_string import UserIdMustBeString
from server.custom_exceptions.user_id_not_provided import MissingUserId
from server.custom_exceptions.paper_trade_exception import PaperTradeException
from server.custom_exceptions.input_not_string import InputNotString
from server.data_access_layer.implementation_classes.paper_trade_dao import PaperTradeDAOImp
from server.entities.paper_trade import PaperTrade
from server.service_layer.abstract_classes.paper_trade_service_abs import PaperTradeService
user_id_must_be_string: str = "The user id must be a string."
user_id_not_provided: str = "A user id must be provided."
paper_trade_id_must_be_int: str = "The paper trade id must be an integer."
paper_trade_id_not_provided: str = "A paper trade id must be provided."
paper_trade_index_must_be_int: str = "The paper trade index must be a integer."
paper_trade_value_must_be_string: str = "The paper trade object value must be a string."
paper_trade_value_must_be_float: str = "The paper trade object value must be a float."
paper_trade_value_must_be_int: str = "The paper trade object value must be an integer."
paper_trade_value_not_provided: str = "The paper trade object value must be provided."
paper_trade_index_not_provided: str = "A paper trade index must be provided."
sell_price_must_be_float: str = "The sell price must be a float."
sell_price_not_provided: str = "A sell price must be provided."
sell_price_negative: str = "A sell price must be a positive number."
class PaperTradeServiceImp(PaperTradeService):
def __init__(self, paper_trade_dao):
self.paper_trade_dao: PaperTradeDAOImp = paper_trade_dao
def add_paper_trade(self, user_id: str, pending_option: dict) -> dict:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check if value is a string
if isinstance(pending_option["ticker"], str) is False \
or isinstance(pending_option["expirationDate"], str) is False \
or isinstance(pending_option["strategyType"], str) is False:
raise InputNotString(paper_trade_value_must_be_string)
# check if value is not empty
if pending_option["tradeId"] is None or len(pending_option["ticker"].strip()) == 0 \
or pending_option["strikePrice"] is None \
or len(pending_option["expirationDate"].strip()) == 0 \
or len(pending_option["strategyType"].strip()) == 0 \
or pending_option["contracts"] is None \
or pending_option["callPrice"] is None \
or pending_option["putPrice"] is None \
or pending_option["callBreakevenAmount"] is None \
or pending_option["callBreakevenPercent"] is None \
or pending_option["putBreakevenAmount"] is None \
or pending_option["putBreakevenPercent"] is None \
or pending_option["straddleCallBreakevenAmount"] is None \
or pending_option["straddleCallBreakevenPercent"] is None \
or pending_option["straddlePutBreakevenAmount"] is None \
or pending_option["straddlePutBreakevenPercent"] is None \
or pending_option["sellPrice"] is None:
raise PaperTradeException(paper_trade_value_not_provided)
# check if value is a float
if isinstance(pending_option["callPrice"], float) is False \
or isinstance(pending_option["putPrice"], float) is False \
or isinstance(pending_option["callBreakevenAmount"], float) is False \
or isinstance(pending_option["callBreakevenPercent"], float) is False \
or isinstance(pending_option["putBreakevenAmount"], float) is False \
or isinstance(pending_option["putBreakevenPercent"], float) is False \
or isinstance(pending_option["straddleCallBreakevenAmount"], float) is False \
or isinstance(pending_option["straddleCallBreakevenPercent"], float) is False \
or isinstance(pending_option["straddlePutBreakevenAmount"], float) is False \
or isinstance(pending_option["straddlePutBreakevenPercent"], float) is False \
or isinstance(pending_option["sellPrice"], float) is False \
or isinstance(pending_option["strikePrice"], float) is False:
raise PaperTradeException(paper_trade_value_must_be_float)
# check if value is an integer
if isinstance(pending_option["tradeId"], int) is False \
or isinstance(pending_option["netProfitPercentage"], int) is False:
raise InputNotInteger(paper_trade_value_must_be_int)
paper_trade = PaperTrade(pending_option["tradeId"], pending_option["ticker"], pending_option["strikePrice"],
pending_option["expirationDate"], pending_option["contracts"],
pending_option["strategyType"], pending_option["callPrice"],
pending_option["putPrice"], pending_option["callBreakevenAmount"],
pending_option["callBreakevenPercent"], pending_option["putBreakevenAmount"],
pending_option["putBreakevenPercent"], pending_option["straddleCallBreakevenAmount"],
pending_option["straddleCallBreakevenPercent"],
pending_option["straddlePutBreakevenAmount"],
pending_option["straddlePutBreakevenPercent"], pending_option["sellPrice"])
return self.paper_trade_dao.add_paper_trade(user_id, paper_trade)
def get_paper_trades(self, user_id: str) -> list:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
return self.paper_trade_dao.get_paper_trades(user_id)
def update_paper_trade_sell_price(self, user_id: str, paper_trade_index: int, sell_price: float) -> bool:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check paper_trade_index is missing
if paper_trade_index is None:
raise InputMissing(paper_trade_index_not_provided)
# check paper_trade_index is an int
if isinstance(paper_trade_index, int) is False:
raise InputNotInteger(paper_trade_index_must_be_int)
# check sell_price is missing
if sell_price is None:
raise SellPriceMissing(sell_price_not_provided)
# check sell_price is a float
if isinstance(sell_price, float) is False:
raise SellPriceNotFloat(sell_price_must_be_float)
# check if sell_price is a negative number
if sell_price < 0:
raise SellPriceNegative(sell_price_negative)
return self.paper_trade_dao.update_paper_trade_sell_price(user_id, paper_trade_index, sell_price)
def delete_paper_trade(self, user_id: str, paper_trade_id: int) -> int:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check paper_trade_id is missing
if paper_trade_id is None:
raise PaperTradeIdMissing(paper_trade_id_not_provided)
# check paper_trade_id is an int
if isinstance(paper_trade_id, int) is False:
raise PaperTradeIdNotInt(paper_trade_id_must_be_int)
return self.paper_trade_dao.delete_paper_trade(user_id, paper_trade_id) | server/service_layer/implementation_classes/paper_trade_service.py | from server.custom_exceptions.input_missing import InputMissing
from server.custom_exceptions.input_not_int import InputNotInteger
from server.custom_exceptions.paper_trade_id_missing import PaperTradeIdMissing
from server.custom_exceptions.paper_trade_id_not_int import PaperTradeIdNotInt
from server.custom_exceptions.sell_price_missing import SellPriceMissing
from server.custom_exceptions.sell_price_negative import SellPriceNegative
from server.custom_exceptions.sell_price_not_float import SellPriceNotFloat
from server.custom_exceptions.user_id_must_be_string import UserIdMustBeString
from server.custom_exceptions.user_id_not_provided import MissingUserId
from server.custom_exceptions.paper_trade_exception import PaperTradeException
from server.custom_exceptions.input_not_string import InputNotString
from server.data_access_layer.implementation_classes.paper_trade_dao import PaperTradeDAOImp
from server.entities.paper_trade import PaperTrade
from server.service_layer.abstract_classes.paper_trade_service_abs import PaperTradeService
user_id_must_be_string: str = "The user id must be a string."
user_id_not_provided: str = "A user id must be provided."
paper_trade_id_must_be_int: str = "The paper trade id must be an integer."
paper_trade_id_not_provided: str = "A paper trade id must be provided."
paper_trade_index_must_be_int: str = "The paper trade index must be a integer."
paper_trade_value_must_be_string: str = "The paper trade object value must be a string."
paper_trade_value_must_be_float: str = "The paper trade object value must be a float."
paper_trade_value_must_be_int: str = "The paper trade object value must be an integer."
paper_trade_value_not_provided: str = "The paper trade object value must be provided."
paper_trade_index_not_provided: str = "A paper trade index must be provided."
sell_price_must_be_float: str = "The sell price must be a float."
sell_price_not_provided: str = "A sell price must be provided."
sell_price_negative: str = "A sell price must be a positive number."
class PaperTradeServiceImp(PaperTradeService):
def __init__(self, paper_trade_dao):
self.paper_trade_dao: PaperTradeDAOImp = paper_trade_dao
def add_paper_trade(self, user_id: str, pending_option: dict) -> dict:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check if value is a string
if isinstance(pending_option["ticker"], str) is False \
or isinstance(pending_option["expirationDate"], str) is False \
or isinstance(pending_option["strategyType"], str) is False:
raise InputNotString(paper_trade_value_must_be_string)
# check if value is not empty
if pending_option["tradeId"] is None or len(pending_option["ticker"].strip()) == 0 \
or pending_option["strikePrice"] is None \
or len(pending_option["expirationDate"].strip()) == 0 \
or len(pending_option["strategyType"].strip()) == 0 \
or pending_option["contracts"] is None \
or pending_option["callPrice"] is None \
or pending_option["putPrice"] is None \
or pending_option["callBreakevenAmount"] is None \
or pending_option["callBreakevenPercent"] is None \
or pending_option["putBreakevenAmount"] is None \
or pending_option["putBreakevenPercent"] is None \
or pending_option["straddleCallBreakevenAmount"] is None \
or pending_option["straddleCallBreakevenPercent"] is None \
or pending_option["straddlePutBreakevenAmount"] is None \
or pending_option["straddlePutBreakevenPercent"] is None \
or pending_option["sellPrice"] is None:
raise PaperTradeException(paper_trade_value_not_provided)
# check if value is a float
if isinstance(pending_option["callPrice"], float) is False \
or isinstance(pending_option["putPrice"], float) is False \
or isinstance(pending_option["callBreakevenAmount"], float) is False \
or isinstance(pending_option["callBreakevenPercent"], float) is False \
or isinstance(pending_option["putBreakevenAmount"], float) is False \
or isinstance(pending_option["putBreakevenPercent"], float) is False \
or isinstance(pending_option["straddleCallBreakevenAmount"], float) is False \
or isinstance(pending_option["straddleCallBreakevenPercent"], float) is False \
or isinstance(pending_option["straddlePutBreakevenAmount"], float) is False \
or isinstance(pending_option["straddlePutBreakevenPercent"], float) is False \
or isinstance(pending_option["sellPrice"], float) is False \
or isinstance(pending_option["strikePrice"], float) is False:
raise PaperTradeException(paper_trade_value_must_be_float)
# check if value is an integer
if isinstance(pending_option["tradeId"], int) is False \
or isinstance(pending_option["netProfitPercentage"], int) is False:
raise InputNotInteger(paper_trade_value_must_be_int)
paper_trade = PaperTrade(pending_option["tradeId"], pending_option["ticker"], pending_option["strikePrice"],
pending_option["expirationDate"], pending_option["contracts"],
pending_option["strategyType"], pending_option["callPrice"],
pending_option["putPrice"], pending_option["callBreakevenAmount"],
pending_option["callBreakevenPercent"], pending_option["putBreakevenAmount"],
pending_option["putBreakevenPercent"], pending_option["straddleCallBreakevenAmount"],
pending_option["straddleCallBreakevenPercent"],
pending_option["straddlePutBreakevenAmount"],
pending_option["straddlePutBreakevenPercent"], pending_option["sellPrice"])
return self.paper_trade_dao.add_paper_trade(user_id, paper_trade)
def get_paper_trades(self, user_id: str) -> list:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
return self.paper_trade_dao.get_paper_trades(user_id)
def update_paper_trade_sell_price(self, user_id: str, paper_trade_index: int, sell_price: float) -> bool:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check paper_trade_index is missing
if paper_trade_index is None:
raise InputMissing(paper_trade_index_not_provided)
# check paper_trade_index is an int
if isinstance(paper_trade_index, int) is False:
raise InputNotInteger(paper_trade_index_must_be_int)
# check sell_price is missing
if sell_price is None:
raise SellPriceMissing(sell_price_not_provided)
# check sell_price is a float
if isinstance(sell_price, float) is False:
raise SellPriceNotFloat(sell_price_must_be_float)
# check if sell_price is a negative number
if sell_price < 0:
raise SellPriceNegative(sell_price_negative)
return self.paper_trade_dao.update_paper_trade_sell_price(user_id, paper_trade_index, sell_price)
def delete_paper_trade(self, user_id: str, paper_trade_id: int) -> int:
# check user_id is a string
if isinstance(user_id, str) is False:
raise UserIdMustBeString(user_id_must_be_string)
# check user_id not empty
if len(user_id.strip()) == 0:
raise MissingUserId(user_id_not_provided)
# check paper_trade_id is missing
if paper_trade_id is None:
raise PaperTradeIdMissing(paper_trade_id_not_provided)
# check paper_trade_id is an int
if isinstance(paper_trade_id, int) is False:
raise PaperTradeIdNotInt(paper_trade_id_must_be_int)
return self.paper_trade_dao.delete_paper_trade(user_id, paper_trade_id) | 0.760651 | 0.177205 |
import time
from pathlib import Path
import os
import numpy as np
from py_diff_pd.env.env_base import EnvBase
from py_diff_pd.common.common import create_folder, ndarray
from py_diff_pd.common.hex_mesh import generate_hex_mesh, hex2obj
from py_diff_pd.common.tet_mesh import generate_tet_mesh, tet2obj, tetrahedralize
from py_diff_pd.common.hex_mesh import get_contact_vertex as get_hex_contact_vertex
from py_diff_pd.common.tet_mesh import get_contact_vertex as get_tet_contact_vertex
from py_diff_pd.core.py_diff_pd_core import HexMesh3d, HexDeformable, StdRealVector
from py_diff_pd.core.py_diff_pd_core import TetMesh3d, TetDeformable
from py_diff_pd.common.renderer import PbrtRenderer
from py_diff_pd.common.project_path import root_path
class BunnyEnv3d(EnvBase):
def __init__(self, seed, folder, options):
EnvBase.__init__(self, folder)
np.random.seed(seed)
create_folder(folder, exist_ok=True)
youngs_modulus = options['youngs_modulus'] if 'youngs_modulus' in options else 1e6
poissons_ratio = options['poissons_ratio'] if 'poissons_ratio' in options else 0.49
state_force_parameters = options['state_force_parameters'] if 'state_force_parameters' in options else ndarray([0.0, 0.0, -9.81])
mesh_type = options['mesh_type'] if 'mesh_type' in options else 'hex'
assert mesh_type in ['hex', 'tet']
# Mesh parameters.
la = youngs_modulus * poissons_ratio / ((1 + poissons_ratio) * (1 - 2 * poissons_ratio))
mu = youngs_modulus / (2 * (1 + poissons_ratio))
density = 1e3
bunny_size = 0.1
tmp_bin_file_name = '.tmp.bin'
if mesh_type == 'hex':
bin_file_name = Path(root_path) / 'asset' / 'mesh' / 'bunny_watertight.bin'
mesh = HexMesh3d()
mesh.Initialize(str(bin_file_name))
deformable = HexDeformable()
elif mesh_type == 'tet':
obj_file_name = Path(root_path) / 'asset' / 'mesh' / 'bunny_watertight_simplified2.obj'
verts, eles = tetrahedralize(obj_file_name)
generate_tet_mesh(verts, eles, tmp_bin_file_name)
mesh = TetMesh3d()
mesh.Initialize(str(tmp_bin_file_name))
deformable = TetDeformable()
else:
raise NotImplementedError
# Rescale the mesh.
mesh.Scale(bunny_size)
mesh.SaveToFile(tmp_bin_file_name)
deformable.Initialize(tmp_bin_file_name, density, 'none', youngs_modulus, poissons_ratio)
os.remove(tmp_bin_file_name)
# Elasticity.
deformable.AddPdEnergy('corotated', [2 * mu,], [])
deformable.AddPdEnergy('volume', [la,], [])
# State-based forces.
deformable.AddStateForce('gravity', state_force_parameters)
# Collisions.
if mesh_type == 'hex':
friction_node_idx = get_hex_contact_vertex(mesh)
elif mesh_type == 'tet':
friction_node_idx = get_tet_contact_vertex(mesh, threshold=np.pi * 1.2)
else:
raise NotImplementedError
# Uncomment the code below if you would like to display the contact set for a sanity check:
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
v = ndarray([ndarray(mesh.py_vertex(idx)) for idx in friction_node_idx])
ax.scatter(v[:, 0], v[:, 1], v[:, 2])
plt.show()
'''
# Friction_node_idx = all vertices on the edge.
deformable.SetFrictionalBoundary('planar', [0.0, 0.0, 1.0, 0.0], friction_node_idx)
# Initial states.
dofs = deformable.dofs()
act_dofs = deformable.act_dofs()
q0 = ndarray(mesh.py_vertices())
v0 = np.zeros(dofs)
f_ext = np.zeros(dofs)
# Data members.
self._deformable = deformable
self._q0 = q0
self._v0 = v0
self._f_ext = f_ext
self._youngs_modulus = youngs_modulus
self._poissons_ratio = poissons_ratio
self._state_force_parameters = state_force_parameters
self._stepwise_loss = False
self._target_com = ndarray(options['target_com']) if 'target_com' in options else ndarray([0.15, 0.15, 0.15])
self._bunny_size = bunny_size
self._mesh_type = mesh_type
self.__spp = options['spp'] if 'spp' in options else 4
def material_stiffness_differential(self, youngs_modulus, poissons_ratio):
jac = self._material_jacobian(youngs_modulus, poissons_ratio)
jac_total = np.zeros((2, 2))
jac_total[0] = 2 * jac[1]
jac_total[1] = jac[0]
return jac_total
def is_dirichlet_dof(self, dof):
return False
def _display_mesh(self, mesh_file, file_name):
options = {
'file_name': file_name,
'light_map': 'uffizi-large.exr',
'sample': self.__spp,
'max_depth': 2,
'camera_pos': (0.15, -1.75, 0.6),
'camera_lookat': (0, .15, .4)
}
renderer = PbrtRenderer(options)
if self._mesh_type == 'hex':
mesh = HexMesh3d()
mesh.Initialize(mesh_file)
vertices, faces = hex2obj(mesh)
fij = [(0, 1), (1, 2), (2, 3), (3, 0)]
elif self._mesh_type == 'tet':
mesh = TetMesh3d()
mesh.Initialize(mesh_file)
vertices, faces = tet2obj(mesh)
fij = [(0, 1), (1, 2), (2, 0)]
else:
raise NotImplementedError
scale = 3
# Draw wireframe of the bunny.
for f in faces:
for i, j in fij:
vi = vertices[f[i]]
vj = vertices[f[j]]
# Draw line vi to vj.
renderer.add_shape_mesh({
'name': 'curve',
'point': ndarray([vi, (2 * vi + vj) / 3, (vi + 2 * vj) / 3, vj]),
'width': 0.001
},
color=(0.7, .5, 0.7),
transforms=[
('s', scale)
])
renderer.add_tri_mesh(Path(root_path) / 'asset/mesh/curved_ground.obj',
texture_img='chkbd_24_0.7', transforms=[('s', 2)])
# Add target CoM and mesh CoM.
renderer.add_shape_mesh({ 'name': 'sphere', 'center': self._target_com, 'radius': 0.0075 },
transforms=[('s', scale)], color=(0.1, 0.1, 0.9))
com = np.mean(ndarray(mesh.py_vertices()).reshape((-1, 3)), axis=0)
renderer.add_shape_mesh({ 'name': 'sphere', 'center': com, 'radius': 0.0075 },
transforms=[('s', scale) ], color=(0.9, 0.1, 0.1))
renderer.render()
def _loss_and_grad(self, q, v):
# Compute the center of mass.
com = np.mean(q.reshape((-1, 3)), axis=0)
# Compute loss.
com_diff = com - self._target_com
loss = 0.5 * com_diff.dot(com_diff) / (self._bunny_size ** 2)
# Compute grad.
grad_q = np.zeros(q.size)
vertex_num = int(q.size // 3)
for i in range(3):
grad_q[i::3] = com_diff[i] / vertex_num / (self._bunny_size ** 2)
grad_v = np.zeros(v.size) / (self._bunny_size ** 2)
return loss, grad_q, grad_v | python/py_diff_pd/env/bunny_env_3d.py | import time
from pathlib import Path
import os
import numpy as np
from py_diff_pd.env.env_base import EnvBase
from py_diff_pd.common.common import create_folder, ndarray
from py_diff_pd.common.hex_mesh import generate_hex_mesh, hex2obj
from py_diff_pd.common.tet_mesh import generate_tet_mesh, tet2obj, tetrahedralize
from py_diff_pd.common.hex_mesh import get_contact_vertex as get_hex_contact_vertex
from py_diff_pd.common.tet_mesh import get_contact_vertex as get_tet_contact_vertex
from py_diff_pd.core.py_diff_pd_core import HexMesh3d, HexDeformable, StdRealVector
from py_diff_pd.core.py_diff_pd_core import TetMesh3d, TetDeformable
from py_diff_pd.common.renderer import PbrtRenderer
from py_diff_pd.common.project_path import root_path
class BunnyEnv3d(EnvBase):
def __init__(self, seed, folder, options):
EnvBase.__init__(self, folder)
np.random.seed(seed)
create_folder(folder, exist_ok=True)
youngs_modulus = options['youngs_modulus'] if 'youngs_modulus' in options else 1e6
poissons_ratio = options['poissons_ratio'] if 'poissons_ratio' in options else 0.49
state_force_parameters = options['state_force_parameters'] if 'state_force_parameters' in options else ndarray([0.0, 0.0, -9.81])
mesh_type = options['mesh_type'] if 'mesh_type' in options else 'hex'
assert mesh_type in ['hex', 'tet']
# Mesh parameters.
la = youngs_modulus * poissons_ratio / ((1 + poissons_ratio) * (1 - 2 * poissons_ratio))
mu = youngs_modulus / (2 * (1 + poissons_ratio))
density = 1e3
bunny_size = 0.1
tmp_bin_file_name = '.tmp.bin'
if mesh_type == 'hex':
bin_file_name = Path(root_path) / 'asset' / 'mesh' / 'bunny_watertight.bin'
mesh = HexMesh3d()
mesh.Initialize(str(bin_file_name))
deformable = HexDeformable()
elif mesh_type == 'tet':
obj_file_name = Path(root_path) / 'asset' / 'mesh' / 'bunny_watertight_simplified2.obj'
verts, eles = tetrahedralize(obj_file_name)
generate_tet_mesh(verts, eles, tmp_bin_file_name)
mesh = TetMesh3d()
mesh.Initialize(str(tmp_bin_file_name))
deformable = TetDeformable()
else:
raise NotImplementedError
# Rescale the mesh.
mesh.Scale(bunny_size)
mesh.SaveToFile(tmp_bin_file_name)
deformable.Initialize(tmp_bin_file_name, density, 'none', youngs_modulus, poissons_ratio)
os.remove(tmp_bin_file_name)
# Elasticity.
deformable.AddPdEnergy('corotated', [2 * mu,], [])
deformable.AddPdEnergy('volume', [la,], [])
# State-based forces.
deformable.AddStateForce('gravity', state_force_parameters)
# Collisions.
if mesh_type == 'hex':
friction_node_idx = get_hex_contact_vertex(mesh)
elif mesh_type == 'tet':
friction_node_idx = get_tet_contact_vertex(mesh, threshold=np.pi * 1.2)
else:
raise NotImplementedError
# Uncomment the code below if you would like to display the contact set for a sanity check:
'''
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
v = ndarray([ndarray(mesh.py_vertex(idx)) for idx in friction_node_idx])
ax.scatter(v[:, 0], v[:, 1], v[:, 2])
plt.show()
'''
# Friction_node_idx = all vertices on the edge.
deformable.SetFrictionalBoundary('planar', [0.0, 0.0, 1.0, 0.0], friction_node_idx)
# Initial states.
dofs = deformable.dofs()
act_dofs = deformable.act_dofs()
q0 = ndarray(mesh.py_vertices())
v0 = np.zeros(dofs)
f_ext = np.zeros(dofs)
# Data members.
self._deformable = deformable
self._q0 = q0
self._v0 = v0
self._f_ext = f_ext
self._youngs_modulus = youngs_modulus
self._poissons_ratio = poissons_ratio
self._state_force_parameters = state_force_parameters
self._stepwise_loss = False
self._target_com = ndarray(options['target_com']) if 'target_com' in options else ndarray([0.15, 0.15, 0.15])
self._bunny_size = bunny_size
self._mesh_type = mesh_type
self.__spp = options['spp'] if 'spp' in options else 4
def material_stiffness_differential(self, youngs_modulus, poissons_ratio):
jac = self._material_jacobian(youngs_modulus, poissons_ratio)
jac_total = np.zeros((2, 2))
jac_total[0] = 2 * jac[1]
jac_total[1] = jac[0]
return jac_total
def is_dirichlet_dof(self, dof):
return False
def _display_mesh(self, mesh_file, file_name):
options = {
'file_name': file_name,
'light_map': 'uffizi-large.exr',
'sample': self.__spp,
'max_depth': 2,
'camera_pos': (0.15, -1.75, 0.6),
'camera_lookat': (0, .15, .4)
}
renderer = PbrtRenderer(options)
if self._mesh_type == 'hex':
mesh = HexMesh3d()
mesh.Initialize(mesh_file)
vertices, faces = hex2obj(mesh)
fij = [(0, 1), (1, 2), (2, 3), (3, 0)]
elif self._mesh_type == 'tet':
mesh = TetMesh3d()
mesh.Initialize(mesh_file)
vertices, faces = tet2obj(mesh)
fij = [(0, 1), (1, 2), (2, 0)]
else:
raise NotImplementedError
scale = 3
# Draw wireframe of the bunny.
for f in faces:
for i, j in fij:
vi = vertices[f[i]]
vj = vertices[f[j]]
# Draw line vi to vj.
renderer.add_shape_mesh({
'name': 'curve',
'point': ndarray([vi, (2 * vi + vj) / 3, (vi + 2 * vj) / 3, vj]),
'width': 0.001
},
color=(0.7, .5, 0.7),
transforms=[
('s', scale)
])
renderer.add_tri_mesh(Path(root_path) / 'asset/mesh/curved_ground.obj',
texture_img='chkbd_24_0.7', transforms=[('s', 2)])
# Add target CoM and mesh CoM.
renderer.add_shape_mesh({ 'name': 'sphere', 'center': self._target_com, 'radius': 0.0075 },
transforms=[('s', scale)], color=(0.1, 0.1, 0.9))
com = np.mean(ndarray(mesh.py_vertices()).reshape((-1, 3)), axis=0)
renderer.add_shape_mesh({ 'name': 'sphere', 'center': com, 'radius': 0.0075 },
transforms=[('s', scale) ], color=(0.9, 0.1, 0.1))
renderer.render()
def _loss_and_grad(self, q, v):
# Compute the center of mass.
com = np.mean(q.reshape((-1, 3)), axis=0)
# Compute loss.
com_diff = com - self._target_com
loss = 0.5 * com_diff.dot(com_diff) / (self._bunny_size ** 2)
# Compute grad.
grad_q = np.zeros(q.size)
vertex_num = int(q.size // 3)
for i in range(3):
grad_q[i::3] = com_diff[i] / vertex_num / (self._bunny_size ** 2)
grad_v = np.zeros(v.size) / (self._bunny_size ** 2)
return loss, grad_q, grad_v | 0.555918 | 0.325668 |
import asyncio
import disnake
from typing import Dict, List
from disnake import RawMessageDeleteEvent, RawMessageUpdateEvent
from disnake.ext.commands import Bot
import utilities.random
from models.database.message import Message
from services.database.message_db import retrieve_copy_messages
from services.database.portal_db import load_channels, load_portals, add_portal, add_channel, remove_channel
from services.portal.chain import Chain
class Transmission:
channels: Dict[int, int] # key=channel_id, value=portal_id
portals: Dict[int, Chain]
# Load the channels and portals from the database
async def initialize(self, bot: Bot):
self.channels = load_channels(bot)
self.portals = await load_portals(bot)
async def add_channel_to_portal(self, channel: disnake.TextChannel,
portal_id: int):
await self.portals[portal_id].add(channel)
self.channels[channel.id] = portal_id
await add_channel(portal_id, channel.id)
async def remove_channel_from_portal(self, channel_id: int) -> int:
portal_id: int = self.channels[channel_id]
await self.portals[portal_id].remove(channel_id)
del self.channels[channel_id]
await remove_channel(channel_id)
return portal_id
async def create_portal(self, primary_channel: disnake.TextChannel) -> int:
# Creates the portal
portal_id = utilities.random.generate_random_int()
self.portals[portal_id] = await Chain.new([])
await add_portal(portal_id, primary_channel.id)
# Adds the current channel to the portal
await self.add_channel_to_portal(primary_channel, portal_id)
return portal_id
def portal_id_exists(self, portal_id: int) -> bool:
if not self.portals[portal_id]:
return False
else:
return True
def channel_in_portal(self, channel_id: int) -> bool:
if self.channels is None:
return False
if not self.channels.get(channel_id):
return False
else:
return True
async def handle_message(self, message: disnake.Message):
if self.channels is None:
return
portal_id: int = self.channels.get(message.channel.id)
if not portal_id:
return
chain: Chain = self.portals[portal_id]
await chain.send(message)
async def handle_update(self, updated_message: RawMessageUpdateEvent,
bot: Bot):
original_message = bot.get_message(updated_message.message_id)
if updated_message.data.get("content") is None:
return
try:
copy_messages_db: List[Message] = await retrieve_copy_messages(
original_message.id)
except AttributeError:
print("Original message is not in the database")
return
copy_messages: List[disnake.Message] = []
for copy_message in copy_messages_db:
message = bot.get_message(copy_message.copy_message_id)
if message.author.bot:
copy_messages.append(message)
await asyncio.gather(*[
self.portals[self.channels[copy_message.channel.id]].links[
copy_message.channel.id].update(copy_message, updated_message)
for copy_message in copy_messages
])
async def handle_delete(self, payload: RawMessageDeleteEvent, bot: Bot):
copy_messages_db: List[Message] = await retrieve_copy_messages(
payload.message_id)
copy_messages: List[disnake.Message] = []
for copy_message in copy_messages_db:
message = bot.get_message(copy_message.copy_message_id)
if not message:
continue
copy_messages.append(message)
await asyncio.gather(
*[copy_message.delete() for copy_message in copy_messages])
transmission_service = Transmission() | services/portal/transmission.py | import asyncio
import disnake
from typing import Dict, List
from disnake import RawMessageDeleteEvent, RawMessageUpdateEvent
from disnake.ext.commands import Bot
import utilities.random
from models.database.message import Message
from services.database.message_db import retrieve_copy_messages
from services.database.portal_db import load_channels, load_portals, add_portal, add_channel, remove_channel
from services.portal.chain import Chain
class Transmission:
channels: Dict[int, int] # key=channel_id, value=portal_id
portals: Dict[int, Chain]
# Load the channels and portals from the database
async def initialize(self, bot: Bot):
self.channels = load_channels(bot)
self.portals = await load_portals(bot)
async def add_channel_to_portal(self, channel: disnake.TextChannel,
portal_id: int):
await self.portals[portal_id].add(channel)
self.channels[channel.id] = portal_id
await add_channel(portal_id, channel.id)
async def remove_channel_from_portal(self, channel_id: int) -> int:
portal_id: int = self.channels[channel_id]
await self.portals[portal_id].remove(channel_id)
del self.channels[channel_id]
await remove_channel(channel_id)
return portal_id
async def create_portal(self, primary_channel: disnake.TextChannel) -> int:
# Creates the portal
portal_id = utilities.random.generate_random_int()
self.portals[portal_id] = await Chain.new([])
await add_portal(portal_id, primary_channel.id)
# Adds the current channel to the portal
await self.add_channel_to_portal(primary_channel, portal_id)
return portal_id
def portal_id_exists(self, portal_id: int) -> bool:
if not self.portals[portal_id]:
return False
else:
return True
def channel_in_portal(self, channel_id: int) -> bool:
if self.channels is None:
return False
if not self.channels.get(channel_id):
return False
else:
return True
async def handle_message(self, message: disnake.Message):
if self.channels is None:
return
portal_id: int = self.channels.get(message.channel.id)
if not portal_id:
return
chain: Chain = self.portals[portal_id]
await chain.send(message)
async def handle_update(self, updated_message: RawMessageUpdateEvent,
bot: Bot):
original_message = bot.get_message(updated_message.message_id)
if updated_message.data.get("content") is None:
return
try:
copy_messages_db: List[Message] = await retrieve_copy_messages(
original_message.id)
except AttributeError:
print("Original message is not in the database")
return
copy_messages: List[disnake.Message] = []
for copy_message in copy_messages_db:
message = bot.get_message(copy_message.copy_message_id)
if message.author.bot:
copy_messages.append(message)
await asyncio.gather(*[
self.portals[self.channels[copy_message.channel.id]].links[
copy_message.channel.id].update(copy_message, updated_message)
for copy_message in copy_messages
])
async def handle_delete(self, payload: RawMessageDeleteEvent, bot: Bot):
copy_messages_db: List[Message] = await retrieve_copy_messages(
payload.message_id)
copy_messages: List[disnake.Message] = []
for copy_message in copy_messages_db:
message = bot.get_message(copy_message.copy_message_id)
if not message:
continue
copy_messages.append(message)
await asyncio.gather(
*[copy_message.delete() for copy_message in copy_messages])
transmission_service = Transmission() | 0.72487 | 0.064535 |
"""E(x)hentai components."""
import os
import re
import requests
from bs4 import BeautifulSoup
import modules.misc as misc
from modules import exception
_LOGIN_URL = 'https://forums.e-hentai.org/index.php'
_ACCOUNT_URL = 'https://e-hentai.org/home.php'
_EXHENTAI_URL = 'https://exhentai.org/'
def _ban_checker(html: BeautifulSoup):
if not html.head and 'Your IP address has been' in html.body.p.string:
msg = html.body.p.string
match_h = re.match(r'.* (\d{1,2}) hours', msg)
match_m = re.match(r'.* (\d{1,2}) minutes', msg)
match_s = re.match(r'.* (\d{1,2}) seconds', msg)
h = match_h.group(1) if match_h else 0
m = match_m.group(1) if match_m else 0
s = match_s.group(1) if match_s else 0
raise exception.IPBannedError(h, m, s)
def login(se, proxy: dict, uid: str, pw: str) -> bool:
"""
Login and set cookies for exhentai.
Exceptions:
globj.ValidationError: Raised when username/pw is wrong, or have no permission to get into exhentai.
globj.ResponseError: Raised when server sends abnormal response(include AttributeError).
"""
try:
with se.post(_LOGIN_URL,
params={'act': 'Login', 'CODE': '01'},
data={'CookieDate': '1', 'UserName': uid, 'PassWord': pw},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as login_res:
login_html = BeautifulSoup(login_res.text, 'lxml')
se.cookies.update(login_res.cookies) # Set cookies
if login_html.head.title.string == 'Please stand by...':
with se.get(_EXHENTAI_URL,
proxies=proxy,
headers={'User-Agent': misc.USER_AGENT},
timeout=5) as ex_res:
ex_html = BeautifulSoup(ex_res.text, 'lxml')
if ex_html.head.title.string == 'ExHentai.org':
se.cookies.update(ex_res.cookies) # Set cookies for exhentai
return True
else:
raise exception.ValidationError('Login: Cannot get into exhentai.')
elif login_html.head.title.string == 'Log In':
raise exception.ValidationError('Login: Incorrect username or password.')
else:
raise exception.ResponseError('Login: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Login: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Login: ' + repr(e))
def account_info(se, proxy: dict) -> tuple:
"""
Get download limitation(used/all).
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
try:
with se.get(_ACCOUNT_URL,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as info_res:
info_html = BeautifulSoup(info_res.text, 'lxml')
_ban_checker(info_html)
info_node = info_html.find('div', class_='homebox')
if info_node:
limit = info_node('strong')
return limit[0].string, limit[1].string
else:
raise exception.ResponseError('Account_info: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Account_info: Timeout.')
def information(se, proxy: dict, addr: str) -> dict:
"""
Fetch gallery information, include misc info and thumbnail.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
addr: Gallery address.
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
re_thumb = re.compile(r'.*url\((.*)\).*')
try:
with se.get(addr,
params={'inline_set': 'ts_m'},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as gallery_res:
gallery_html = BeautifulSoup(gallery_res.text, 'lxml')
_ban_checker(gallery_html)
if 'Gallery not found.' in gallery_html.body.get_text() or 'Key missing' in gallery_html.body.get_text():
raise exception.WrongAddressError('Wrong address provided.')
name: str = gallery_html.find('h1', id='gj').string # Japanese name is prior
if not name:
name = gallery_html.find('h1', id='gn').string
info = gallery_html.find_all('td', class_='gdt2')
thumb = re_thumb.match(gallery_html.find('div', id='gd1').div['style']).group(1)
if name and info and thumb:
return {
'addr': addr,
'name': name,
'size': info[4].string,
'page': info[5].string[:-6],
'thumb': thumb
}
else:
raise exception.ResponseError('Information: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Information: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Information: ' + repr(e))
def fetch_keys(se, proxy: dict, info: dict) -> dict:
"""
Fetch keys(imgkeys and showkey) from gallery.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
info: Information of the gallery.
Return:
A dictionary. {'page': imgkey, '0': showkey}
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
re_imgkey = re.compile(r'https://exhentai\.org/s/(\w{10})/\d*-(\d{1,4})')
re_showkey = re.compile(r'[\S\s]*showkey="(\w{11})"[\S\s]*')
gid = info['addr'].split('/')[-3]
pn = int(info['page']) // 40 + 1 # range(0) has no element
keys = dict()
try:
for p in range(pn):
with se.get(info['addr'],
params={'inline_set': 'ts_m', 'p': p},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as gallery_res:
gallery_html = BeautifulSoup(gallery_res.text, 'lxml')
_ban_checker(gallery_html)
# Fetch imgkey from every picture
pics = gallery_html.find_all('div', class_='gdtm')
for item in pics:
match = re_imgkey.match(item.a['href'])
keys[match.group(2)] = match.group(1)
# Fetch showkey from first picture
showkey_url = '/'.join(['https://exhentai.org/s', keys['1'], gid + '-1'])
with se.get(showkey_url,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as showkey_res:
showkey_html = BeautifulSoup(showkey_res.text, 'lxml')
_ban_checker(showkey_html)
keys['0'] = re_showkey.match(showkey_html('script')[1].string).group(1)
return keys
except requests.Timeout:
raise requests.Timeout('Fetch_keys: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Fetch_keys: ' + repr(e))
def download(se, proxy: dict, info: dict, keys: dict, page: int, path: str, rename=False, rewrite=False):
"""
Download one picture.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
info: Information of the gallery.
keys: Keys include imgkeys and showkey.
page: Page number.
path: Save root path.
rename: Control whether rename to origin name/image number.
rewrite: Overwrite image instead of skipping it.
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
globj.LimitationReachedError: Raised when reach view limitation.
"""
gid = info['addr'].split('/')[-3]
try:
with se.post(_EXHENTAI_URL + 'api.php',
json={'method': 'showpage',
'gid': int(gid),
'page': int(page),
'imgkey': keys[str(page)],
'showkey': keys['0']},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as dl_res: # Fetch original url of picture
dl_json = dl_res.json()
if dl_json.get('error'): # Wrong imgkey or showkey
raise exception.ResponseError('Download: ' + dl_json['error'])
if dl_json.get('i3'): # Whether Reach limitation
url_html = BeautifulSoup(dl_json['i3'], 'lxml')
if url_html.a.img['src'] == 'https://exhentai.org/img/509.gif':
raise exception.LimitationReachedError(page)
if dl_json.get('i7'):
url_html = BeautifulSoup(dl_json['i7'], 'lxml') # Origin image
origin = url_html.a['href']
elif dl_json.get('i3'):
url_html = BeautifulSoup(dl_json['i3'], 'lxml') # Showing image is original
origin = url_html.a.img['src']
else:
raise exception.ResponseError('Download: No plenty elements.')
folder_name = misc.name_verify(info['name'])
folder_path = os.path.join(path, folder_name)
try: # Prevent threads starting at same time
os.makedirs(folder_path)
print('mkdir:', folder_path)
except FileExistsError:
pass
with se.get(origin,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
stream=True,
timeout=5) as pic_res:
url = pic_res.url
if url.split('/')[2] == 'exhentai.org': # If response cannot redirect(302), raise exception
raise exception.LimitationReachedError(page)
file_name = os.path.split(pic_res.url)[-1].rstrip('?dl=1') # Get file name from url
if rename:
file_name = str(page) + os.path.splitext(file_name)[1]
real_path = os.path.join(folder_path, file_name)
if not os.path.exists(real_path) or rewrite: # If file exists or not rewrite, skip it
if os.path.exists(real_path):
os.remove(real_path)
print('Downloading page {0} to {1}'.format(page, real_path))
with open(real_path, 'ab') as data:
for chunk in pic_res.iter_content():
data.write(chunk)
else:
print('Skip:', file_name)
except requests.Timeout:
raise requests.Timeout('Download: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Download: ' + repr(e))
if __name__ == '__main__':
pass | modules/ehentai/core.py | """E(x)hentai components."""
import os
import re
import requests
from bs4 import BeautifulSoup
import modules.misc as misc
from modules import exception
_LOGIN_URL = 'https://forums.e-hentai.org/index.php'
_ACCOUNT_URL = 'https://e-hentai.org/home.php'
_EXHENTAI_URL = 'https://exhentai.org/'
def _ban_checker(html: BeautifulSoup):
if not html.head and 'Your IP address has been' in html.body.p.string:
msg = html.body.p.string
match_h = re.match(r'.* (\d{1,2}) hours', msg)
match_m = re.match(r'.* (\d{1,2}) minutes', msg)
match_s = re.match(r'.* (\d{1,2}) seconds', msg)
h = match_h.group(1) if match_h else 0
m = match_m.group(1) if match_m else 0
s = match_s.group(1) if match_s else 0
raise exception.IPBannedError(h, m, s)
def login(se, proxy: dict, uid: str, pw: str) -> bool:
"""
Login and set cookies for exhentai.
Exceptions:
globj.ValidationError: Raised when username/pw is wrong, or have no permission to get into exhentai.
globj.ResponseError: Raised when server sends abnormal response(include AttributeError).
"""
try:
with se.post(_LOGIN_URL,
params={'act': 'Login', 'CODE': '01'},
data={'CookieDate': '1', 'UserName': uid, 'PassWord': pw},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as login_res:
login_html = BeautifulSoup(login_res.text, 'lxml')
se.cookies.update(login_res.cookies) # Set cookies
if login_html.head.title.string == 'Please stand by...':
with se.get(_EXHENTAI_URL,
proxies=proxy,
headers={'User-Agent': misc.USER_AGENT},
timeout=5) as ex_res:
ex_html = BeautifulSoup(ex_res.text, 'lxml')
if ex_html.head.title.string == 'ExHentai.org':
se.cookies.update(ex_res.cookies) # Set cookies for exhentai
return True
else:
raise exception.ValidationError('Login: Cannot get into exhentai.')
elif login_html.head.title.string == 'Log In':
raise exception.ValidationError('Login: Incorrect username or password.')
else:
raise exception.ResponseError('Login: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Login: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Login: ' + repr(e))
def account_info(se, proxy: dict) -> tuple:
"""
Get download limitation(used/all).
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
try:
with se.get(_ACCOUNT_URL,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as info_res:
info_html = BeautifulSoup(info_res.text, 'lxml')
_ban_checker(info_html)
info_node = info_html.find('div', class_='homebox')
if info_node:
limit = info_node('strong')
return limit[0].string, limit[1].string
else:
raise exception.ResponseError('Account_info: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Account_info: Timeout.')
def information(se, proxy: dict, addr: str) -> dict:
"""
Fetch gallery information, include misc info and thumbnail.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
addr: Gallery address.
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
re_thumb = re.compile(r'.*url\((.*)\).*')
try:
with se.get(addr,
params={'inline_set': 'ts_m'},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as gallery_res:
gallery_html = BeautifulSoup(gallery_res.text, 'lxml')
_ban_checker(gallery_html)
if 'Gallery not found.' in gallery_html.body.get_text() or 'Key missing' in gallery_html.body.get_text():
raise exception.WrongAddressError('Wrong address provided.')
name: str = gallery_html.find('h1', id='gj').string # Japanese name is prior
if not name:
name = gallery_html.find('h1', id='gn').string
info = gallery_html.find_all('td', class_='gdt2')
thumb = re_thumb.match(gallery_html.find('div', id='gd1').div['style']).group(1)
if name and info and thumb:
return {
'addr': addr,
'name': name,
'size': info[4].string,
'page': info[5].string[:-6],
'thumb': thumb
}
else:
raise exception.ResponseError('Information: Abnormal response.')
except requests.Timeout:
raise requests.Timeout('Information: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Information: ' + repr(e))
def fetch_keys(se, proxy: dict, info: dict) -> dict:
"""
Fetch keys(imgkeys and showkey) from gallery.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
info: Information of the gallery.
Return:
A dictionary. {'page': imgkey, '0': showkey}
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
"""
re_imgkey = re.compile(r'https://exhentai\.org/s/(\w{10})/\d*-(\d{1,4})')
re_showkey = re.compile(r'[\S\s]*showkey="(\w{11})"[\S\s]*')
gid = info['addr'].split('/')[-3]
pn = int(info['page']) // 40 + 1 # range(0) has no element
keys = dict()
try:
for p in range(pn):
with se.get(info['addr'],
params={'inline_set': 'ts_m', 'p': p},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as gallery_res:
gallery_html = BeautifulSoup(gallery_res.text, 'lxml')
_ban_checker(gallery_html)
# Fetch imgkey from every picture
pics = gallery_html.find_all('div', class_='gdtm')
for item in pics:
match = re_imgkey.match(item.a['href'])
keys[match.group(2)] = match.group(1)
# Fetch showkey from first picture
showkey_url = '/'.join(['https://exhentai.org/s', keys['1'], gid + '-1'])
with se.get(showkey_url,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as showkey_res:
showkey_html = BeautifulSoup(showkey_res.text, 'lxml')
_ban_checker(showkey_html)
keys['0'] = re_showkey.match(showkey_html('script')[1].string).group(1)
return keys
except requests.Timeout:
raise requests.Timeout('Fetch_keys: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Fetch_keys: ' + repr(e))
def download(se, proxy: dict, info: dict, keys: dict, page: int, path: str, rename=False, rewrite=False):
"""
Download one picture.
Args:
se: Session instance.
proxy: (Optional) The proxy used.
info: Information of the gallery.
keys: Keys include imgkeys and showkey.
page: Page number.
path: Save root path.
rename: Control whether rename to origin name/image number.
rewrite: Overwrite image instead of skipping it.
Exceptions:
globj.ResponseError: Raised when server sends abnormal response.
globj.LimitationReachedError: Raised when reach view limitation.
"""
gid = info['addr'].split('/')[-3]
try:
with se.post(_EXHENTAI_URL + 'api.php',
json={'method': 'showpage',
'gid': int(gid),
'page': int(page),
'imgkey': keys[str(page)],
'showkey': keys['0']},
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
timeout=5) as dl_res: # Fetch original url of picture
dl_json = dl_res.json()
if dl_json.get('error'): # Wrong imgkey or showkey
raise exception.ResponseError('Download: ' + dl_json['error'])
if dl_json.get('i3'): # Whether Reach limitation
url_html = BeautifulSoup(dl_json['i3'], 'lxml')
if url_html.a.img['src'] == 'https://exhentai.org/img/509.gif':
raise exception.LimitationReachedError(page)
if dl_json.get('i7'):
url_html = BeautifulSoup(dl_json['i7'], 'lxml') # Origin image
origin = url_html.a['href']
elif dl_json.get('i3'):
url_html = BeautifulSoup(dl_json['i3'], 'lxml') # Showing image is original
origin = url_html.a.img['src']
else:
raise exception.ResponseError('Download: No plenty elements.')
folder_name = misc.name_verify(info['name'])
folder_path = os.path.join(path, folder_name)
try: # Prevent threads starting at same time
os.makedirs(folder_path)
print('mkdir:', folder_path)
except FileExistsError:
pass
with se.get(origin,
headers={'User-Agent': misc.USER_AGENT},
proxies=proxy,
stream=True,
timeout=5) as pic_res:
url = pic_res.url
if url.split('/')[2] == 'exhentai.org': # If response cannot redirect(302), raise exception
raise exception.LimitationReachedError(page)
file_name = os.path.split(pic_res.url)[-1].rstrip('?dl=1') # Get file name from url
if rename:
file_name = str(page) + os.path.splitext(file_name)[1]
real_path = os.path.join(folder_path, file_name)
if not os.path.exists(real_path) or rewrite: # If file exists or not rewrite, skip it
if os.path.exists(real_path):
os.remove(real_path)
print('Downloading page {0} to {1}'.format(page, real_path))
with open(real_path, 'ab') as data:
for chunk in pic_res.iter_content():
data.write(chunk)
else:
print('Skip:', file_name)
except requests.Timeout:
raise requests.Timeout('Download: Timeout.')
except AttributeError as e:
raise exception.ResponseError('Download: ' + repr(e))
if __name__ == '__main__':
pass | 0.412412 | 0.132178 |
import c4d
from c4d import utils
from c4d.modules import bodypaint
def main():
# Retrieves active UVSet
handle = bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if not handle:
print "No active UVSet!"
return
# Prints UVSet information
print "UV Handle Data:"
print "Handle:", handle
print "Handle Mode:", handle.GetMode()
print "Handle Points:", handle.GetPoints()
print "Handle Polygons:", handle.GetPolys()
print "Handle Polygon Selection:", handle.GetPolySel()
print "Handle Hidden Polygons:", handle.GetPolyHid()
print "Handle Point Selection:", handle.GetUVPointSel()
print "Handle Point Count:", handle.GetPointCount()
print "Handle Polygon Count:", handle.GetPolyCount()
print "Handle Object:", handle.GetBaseObject()
print "Handle Editable:", handle.IsEditable()
print "Handle UVW:", handle.GetUVW()
# Builds UVCOMMAND_TRANSFORM container for the command settings
settings = c4d.BaseContainer()
settings[c4d.UVCOMMAND_TRANSFORM_MOVE_X] = 0
settings[c4d.UVCOMMAND_TRANSFORM_MOVE_Y] = 0
settings[c4d.UVCOMMAND_TRANSFORM_SCALE_X] = 1
settings[c4d.UVCOMMAND_TRANSFORM_SCALE_Y] = 1
settings[c4d.UVCOMMAND_TRANSFORM_ANGLE] = utils.DegToRad(90)
# Retrieves UVW list
uvw = handle.GetUVW()
if uvw is None:
return
# Calls UVCOMMAND_TRANSFORM to change UVW list
ret = bodypaint.CallUVCommand(handle.GetPoints(), handle.GetPointCount(), handle.GetPolys(), handle.GetPolyCount(), uvw,
handle.GetPolySel(), handle.GetUVPointSel(), op, handle.GetMode(), c4d.UVCOMMAND_TRANSFORM, settings)
if not ret:
print "CallUVCommand() failed!"
return
print "CallUVCommand() successfully called"
# Sets the transformedUVW from Texture View
if handle.SetUVWFromTextureView(uvw, True, True, True):
print "UVW from Texture View successfully set"
else:
print "UVW from Texture View failed to be set!"
# Releases active UVSet
bodypaint.FreeActiveUVSet(handle)
if __name__=='__main__':
main() | scripts/release18/CallUVCommand.py |
import c4d
from c4d import utils
from c4d.modules import bodypaint
def main():
# Retrieves active UVSet
handle = bodypaint.GetActiveUVSet(doc, c4d.GETACTIVEUVSET_ALL)
if not handle:
print "No active UVSet!"
return
# Prints UVSet information
print "UV Handle Data:"
print "Handle:", handle
print "Handle Mode:", handle.GetMode()
print "Handle Points:", handle.GetPoints()
print "Handle Polygons:", handle.GetPolys()
print "Handle Polygon Selection:", handle.GetPolySel()
print "Handle Hidden Polygons:", handle.GetPolyHid()
print "Handle Point Selection:", handle.GetUVPointSel()
print "Handle Point Count:", handle.GetPointCount()
print "Handle Polygon Count:", handle.GetPolyCount()
print "Handle Object:", handle.GetBaseObject()
print "Handle Editable:", handle.IsEditable()
print "Handle UVW:", handle.GetUVW()
# Builds UVCOMMAND_TRANSFORM container for the command settings
settings = c4d.BaseContainer()
settings[c4d.UVCOMMAND_TRANSFORM_MOVE_X] = 0
settings[c4d.UVCOMMAND_TRANSFORM_MOVE_Y] = 0
settings[c4d.UVCOMMAND_TRANSFORM_SCALE_X] = 1
settings[c4d.UVCOMMAND_TRANSFORM_SCALE_Y] = 1
settings[c4d.UVCOMMAND_TRANSFORM_ANGLE] = utils.DegToRad(90)
# Retrieves UVW list
uvw = handle.GetUVW()
if uvw is None:
return
# Calls UVCOMMAND_TRANSFORM to change UVW list
ret = bodypaint.CallUVCommand(handle.GetPoints(), handle.GetPointCount(), handle.GetPolys(), handle.GetPolyCount(), uvw,
handle.GetPolySel(), handle.GetUVPointSel(), op, handle.GetMode(), c4d.UVCOMMAND_TRANSFORM, settings)
if not ret:
print "CallUVCommand() failed!"
return
print "CallUVCommand() successfully called"
# Sets the transformedUVW from Texture View
if handle.SetUVWFromTextureView(uvw, True, True, True):
print "UVW from Texture View successfully set"
else:
print "UVW from Texture View failed to be set!"
# Releases active UVSet
bodypaint.FreeActiveUVSet(handle)
if __name__=='__main__':
main() | 0.441914 | 0.09611 |
from biokbase.workspace.client import Workspace
import biokbase.auth
import os
from getpass import getpass
import json
import time
prod_ws = 'https://kbase.us/services/ws'
ci_ws = 'https://ci.kbase.us/services/ws'
ws_metadata = {
'is_temporary': False,
'narrative_nice_name': None
}
def fetch_narrative(nar_id, auth_token, url=ci_ws, file_name=None):
"""
Fetches a Narrative object with the given reference id (of the form ##/##).
If a file_name is given, then it is printed to that file.
If the narrative is found, the jsonized string of it is returned.
If nothing is found, an empty Dict is returned.
"""
ws_client = Workspace(url=url, token=auth_token)
nar_data = ws_client.get_objects([{'ref':nar_id}])
if len(nar_data) > 0:
nar_json = json.dumps(nar_data[0])
if file_name is not None:
f = open(file_name, 'w')
f.write(nar_json)
f.close()
return nar_json
return {}
def upload_narrative(nar_file, auth_token, url=ci_ws, set_public=False):
"""
Uploads a Narrative from a downloaded object file.
This file needs to be in JSON format, and it expects all
data and info that is usually returned by the Workspace.get_objects
method.
Returns a dict of three elements:
ws: the id of the workspace that was created
obj: the id of the narrative object
ref: the above two joined together into an object ref (for convenience)
"""
# read the file
f = open(nar_file, 'r')
nar = json.loads(f.read())
f.close()
# do some setup.
current_nar_metadata = ws_metadata
current_nar_metadata['narrative_nice_name'] = nar['data']['metadata']['name']
ws_client = Workspace(url=url, token=auth_token.token)
# create the new workspace for the narrative
ws_info = ws_client.create_workspace({
'workspace': '{}:{}'.format(auth_token.user_id, str(time.time()).replace('.', '')),
'meta': current_nar_metadata,
'globalread': 'r' if set_public else 'n'
})
ws_id = ws_info[0]
# setup and save the narrative object
metadata = nar['info'][10]
ws_save_obj = {
'type': 'KBaseNarrative.Narrative',
'data': nar['data'],
'name': nar['info'][1],
'meta': nar['info'][10],
'provenance': [{
'script': 'upload_narrative_test.py',
'description': 'Temporary Narrative uploaded for automated testing'
}]
}
obj_info = ws_client.save_objects({'id': ws_id,
'objects': [ws_save_obj]})
# tweak the workspace's metadata to properly present its narrative
ws_client.alter_workspace_metadata({'wsi': {'id': ws_id}, 'new':{'narrative':obj_info[0][0]}})
return {
'ws': ws_info[0],
'obj': obj_info[0][0],
'ref': '{}/{}'.format(ws_info[0], obj_info[0][0])
}
def delete_narrative(ws_id, auth_token, url=ci_ws):
"""
Deletes a workspace with the given id. Throws a ServerError if the user given
by auth_token isn't allowed to do so.
"""
ws_client = Workspace(url=url, token=auth_token.token)
ws_client.delete_workspace({'id': ws_id})
if __name__ == '__main__':
test_user_id = 'wjriehl'
password = getpass('Password for {}: '.format(test_user_id))
t = biokbase.auth.Token(user_id=test_user_id, password=password)
fetch_narrative('8245/32', t.token, file_name='updater_test_poplar.json') | src/biokbase/narrative/tests/narrative_test_helper.py | from biokbase.workspace.client import Workspace
import biokbase.auth
import os
from getpass import getpass
import json
import time
prod_ws = 'https://kbase.us/services/ws'
ci_ws = 'https://ci.kbase.us/services/ws'
ws_metadata = {
'is_temporary': False,
'narrative_nice_name': None
}
def fetch_narrative(nar_id, auth_token, url=ci_ws, file_name=None):
"""
Fetches a Narrative object with the given reference id (of the form ##/##).
If a file_name is given, then it is printed to that file.
If the narrative is found, the jsonized string of it is returned.
If nothing is found, an empty Dict is returned.
"""
ws_client = Workspace(url=url, token=auth_token)
nar_data = ws_client.get_objects([{'ref':nar_id}])
if len(nar_data) > 0:
nar_json = json.dumps(nar_data[0])
if file_name is not None:
f = open(file_name, 'w')
f.write(nar_json)
f.close()
return nar_json
return {}
def upload_narrative(nar_file, auth_token, url=ci_ws, set_public=False):
"""
Uploads a Narrative from a downloaded object file.
This file needs to be in JSON format, and it expects all
data and info that is usually returned by the Workspace.get_objects
method.
Returns a dict of three elements:
ws: the id of the workspace that was created
obj: the id of the narrative object
ref: the above two joined together into an object ref (for convenience)
"""
# read the file
f = open(nar_file, 'r')
nar = json.loads(f.read())
f.close()
# do some setup.
current_nar_metadata = ws_metadata
current_nar_metadata['narrative_nice_name'] = nar['data']['metadata']['name']
ws_client = Workspace(url=url, token=auth_token.token)
# create the new workspace for the narrative
ws_info = ws_client.create_workspace({
'workspace': '{}:{}'.format(auth_token.user_id, str(time.time()).replace('.', '')),
'meta': current_nar_metadata,
'globalread': 'r' if set_public else 'n'
})
ws_id = ws_info[0]
# setup and save the narrative object
metadata = nar['info'][10]
ws_save_obj = {
'type': 'KBaseNarrative.Narrative',
'data': nar['data'],
'name': nar['info'][1],
'meta': nar['info'][10],
'provenance': [{
'script': 'upload_narrative_test.py',
'description': 'Temporary Narrative uploaded for automated testing'
}]
}
obj_info = ws_client.save_objects({'id': ws_id,
'objects': [ws_save_obj]})
# tweak the workspace's metadata to properly present its narrative
ws_client.alter_workspace_metadata({'wsi': {'id': ws_id}, 'new':{'narrative':obj_info[0][0]}})
return {
'ws': ws_info[0],
'obj': obj_info[0][0],
'ref': '{}/{}'.format(ws_info[0], obj_info[0][0])
}
def delete_narrative(ws_id, auth_token, url=ci_ws):
"""
Deletes a workspace with the given id. Throws a ServerError if the user given
by auth_token isn't allowed to do so.
"""
ws_client = Workspace(url=url, token=auth_token.token)
ws_client.delete_workspace({'id': ws_id})
if __name__ == '__main__':
test_user_id = 'wjriehl'
password = getpass('Password for {}: '.format(test_user_id))
t = biokbase.auth.Token(user_id=test_user_id, password=password)
fetch_narrative('8245/32', t.token, file_name='updater_test_poplar.json') | 0.494873 | 0.188324 |
import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), '<NAME>')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), '<NAME>')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "<NAME> & <NAME>"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "<NAME>")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("<NAME>", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li><NAME>: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered <NAME>. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, <NAME>. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, <NAME>. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct <NAME>, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.assertEqual("$0", self.trebek_bot.get_user_score())
def test_lifetimescore_includes_multiple_months(self):
# Seed other user's data (to reproduce bug)
self.create_user_scores()
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.trebek_bot.update_score(200)
self.assertEqual("$400", self.trebek_bot.get_user_score(True))
def test_user_lifetime_loserboard_value_includes_multiple_months(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the lifetime loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Legacy Score: $5</li>"
expected += "<li>Allen: $40</li>"
expected += "<li>Mark: $60</li>"
expected += "<li>Melvin: $100</li>"
expected += "<li>Cordarrell: $140</li></ol>"
self.assertEqual(expected, response)
def test_user_lifetime_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek lifetime leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Arian: $10,860</li>"
expected += "<li><NAME>: $1,000</li>"
expected += "<li>Zach: $824</li>"
expected += "<li>Alex: $450</li>"
expected += "<li>Richard: $400</li></ol>"
self.assertEqual(expected, response)
def main():
unittest.main()
if __name__ == '__main__':
main() | test_trebek.py | import os
import unittest
import json
import trebek
import entities
import fakeredis
import time
import datetime
# Reference this SO post on getting distances between strings:
# http://stackoverflow.com/a/1471603/98562
def get_clue_json():
with open('test-json-output.json') as json_data:
clue = json.load(json_data)
return clue
def fake_fetch_random_clue():
return entities.Question(**get_clue_json())
def fake_get_year_month():
now = datetime.datetime.now()
year, month = divmod(now.month + 1, 12)
if month == 0:
month = 12
year = year -1
next_month = datetime.datetime(now.year + year, month, 1)
return "{0}-{1}".format(next_month.year, str(next_month.month).zfill(2))
_fetch_count = 0
_invalid_clue = None
def fetch_invalid_clue():
global _fetch_count, _invalid_clue
clue = get_clue_json()
if _fetch_count == 0:
clue = _invalid_clue
_fetch_count += 1
return entities.Question(**clue)
class TestTrebek(unittest.TestCase):
def setUp(self):
d = self.get_setup_json()
self.room_message = entities.HipChatRoomMessage(**d)
self.trebek_bot = self.create_bot_with_dictionary(d)
def tearDown(self):
self.trebek_bot.redis.flushall()
def get_setup_json(self):
with open('test-room-message.json') as data:
d = json.load(data)
return d
def create_bot_with_dictionary(self, room_dictionary):
bot = trebek.Trebek(entities.HipChatRoomMessage(**room_dictionary))
bot.redis = fakeredis.FakeStrictRedis()
bot.fetch_random_clue = fake_fetch_random_clue
return bot
def create_user_scores(self, bot = None):
if bot != None:
r = bot.redis
else:
r = self.trebek_bot.redis
bot = self.trebek_bot
hipchat = trebek.Trebek.hipchat_user_key
r.set(hipchat.format(1), 'Aaron')
r.set(hipchat.format(2), 'Allen')
r.set(hipchat.format(3), 'Cordarrell')
r.set(hipchat.format(4), 'Melvin')
r.set(hipchat.format(5), 'Mark')
r.set(hipchat.format(6), 'Richard')
r.set(hipchat.format(7), '<NAME>')
r.set(hipchat.format(8), 'Arian')
r.set(hipchat.format(9), 'Zach')
r.set(hipchat.format(10), '<NAME>')
r.set(hipchat.format(11), 'Alex')
r.set(hipchat.format(12), 'Michael')
r.set(hipchat.format(13), 'Reggie')
r.set(hipchat.format(14), 'Legacy Score')
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
# Regression test old score keys will still appear in lifetime loserboard
r.set("user_score:{0}".format(14), 5)
bot.get_year_month = fake_get_year_month
user = bot.user_score_prefix + ":{0}"
r.set(user.format(1), 100)
r.set(user.format(2), 20)
r.set(user.format(3), 70)
r.set(user.format(4), 50)
r.set(user.format(5), 30)
r.set(user.format(6), 200)
r.set(user.format(7), 500)
r.set(user.format(8), 5430)
r.set(user.format(9), 412)
r.set(user.format(10), 123)
r.set(user.format(11), 225)
r.set(user.format(12), 94)
r.set(user.format(13), 87)
def test_when_value_not_included_default_to_200(self):
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.value, 200)
def test_when_answer_includes_html_answer_is_sanitized(self):
# example answer: <i>Let\\'s Make a Deal</i>
self.trebek_bot.fetch_random_clue = fake_fetch_random_clue
test_clue = self.trebek_bot.fetch_random_clue()
self.assertEqual(test_clue.answer, "Let's Make a Deal")
def test_when_response_doesNot_begin_with_question_return_none(self):
response = "some test response"
assert self.trebek_bot.response_is_a_question(response) == None
def test_when_response_is_question_return_true(self):
response = "what is some test response"
assert self.trebek_bot.response_is_a_question(response)
def test_fuzzy_matching_of_answer(self):
test_clue = fake_fetch_random_clue()
self.assertFalse(self.trebek_bot.is_correct_answer("polygamist", "polyamourus"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is let's make a deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Lets Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Dela"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Mae a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is Let's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer(test_clue.answer, "what is elt's Make a Deal"))
self.assertTrue(self.trebek_bot.is_correct_answer("a ukulele", "a ukelele"))
self.assertTrue(self.trebek_bot.is_correct_answer("Scrabble", "Scrablle"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Aristotle) Onassis", "Onassis"))
self.assertTrue(self.trebek_bot.is_correct_answer("(William) Blake", "blake"))
self.assertTrue(self.trebek_bot.is_correct_answer("wings (or feathers)", "feathers"))
self.assertTrue(self.trebek_bot.is_correct_answer("A.D. (Anno Domini)", "AD"))
self.assertTrue(self.trebek_bot.is_correct_answer("(Little Orphan) Annie", "annie"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "turtle"))
self.assertTrue(self.trebek_bot.is_correct_answer("a turtle (or a tortoise)", "tortoise"))
# self.assertTrue(self.trebek_bot.is_correct_answer("ben affleck and matt damon", "<NAME> & <NAME>"))
def test_given_json_dictionary_hipchat_object_is_parsed(self):
with open ('test-room-message.json') as data:
d = json.load(data)
t = entities.HipChatRoomMessage(**d)
self.assertEqual(t.item.message.message, "jeopardy")
self.assertEqual(t.item.message.user_from.name, "<NAME>")
def test_message_object_trims_leading_slash_command(self):
p = {}
p['from'] = { 'id':None, 'links': None, 'mention_name':None, 'name': None, 'version': None}
p['message'] = '/trebek jeopardy me'
msg = entities.HipChatMessage(p)
self.assertEqual(msg.message, "jeopardy me")
def test_when_get_response_message_is_called_user_name_is_saved(self):
self.trebek_bot.get_response_message()
key = trebek.Trebek.hipchat_user_key.format('582174')
self.assertTrue(self.trebek_bot.redis.exists(key))
user_name = self.trebek_bot.redis.get(trebek.Trebek.hipchat_user_key.format('582174')).decode()
self.assertEqual("<NAME>", user_name)
def test_number_is_formatted_as_currency(self):
currency = self.trebek_bot.format_currency("100")
self.assertEqual("$100", currency)
currency = self.trebek_bot.format_currency("1000")
self.assertEqual("$1,000", currency)
currency = self.trebek_bot.format_currency("1000000000")
self.assertEqual("$1,000,000,000", currency)
currency = self.trebek_bot.format_currency("-100")
self.assertEqual("<span style='color: red;'>-$100</span>", currency)
currency = self.trebek_bot.format_currency("-1000000000")
self.assertEqual("<span style='color: red;'>-$1,000,000,000</span>", currency)
def test_user_requests_score_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek score"
bot = self.create_bot_with_dictionary(d)
key = "{0}:{1}".format(bot.user_score_prefix,
bot.room_message.item.message.user_from.id)
bot.redis.set(key, 500)
response = bot.get_response_message()
self.assertEqual("$500", response)
def test_user_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Leaderboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Arian: $5,430</li>"
expected += "<li><NAME>: $500</li>"
expected += "<li>Zach: $412</li>"
expected += "<li>Alex: $225</li>"
expected += "<li>Richard: $200</li></ol>"
self.assertEqual(expected, response)
def test_user_loserboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
year, month = [int(x) for x in bot.get_year_month().split('-')]
dt = datetime.datetime(year, month, 1)
expected = "<p>Loserboard for {0} {1}:</p>".format(dt.strftime("%B"), dt.year)
expected += "<ol><li>Allen: $20</li>"
expected += "<li>Mark: $30</li>"
expected += "<li>Melvin: $50</li>"
expected += "<li>Cordarrell: $70</li>"
expected += "<li>Reggie: $87</li></ol>"
self.assertEqual(expected, response)
def test_jeopardy_round_can_start_from_nothing(self):
response = self.trebek_bot.get_response_message()
expected = "The category is <b>CLASSIC GAME SHOW TAGLINES</b> for $200: "
expected += "<b>\"CAVEAT EMPTOR. LET THE BUYER BEWARE\"</b> (Air Date: 18-Oct-2001)"
self.assertEqual(expected, response)
def test_user_cannot_answer_same_question_twice(self):
# Arrange
clue = self.trebek_bot.get_jeopardy_clue()
d = self.get_setup_json()
user_answer_key = trebek.Trebek.user_answer_key.format(
self.trebek_bot.room_id, clue.id, d['item']['message']['from']['id'])
self.trebek_bot.redis.set(user_answer_key, 'true')
self.trebek_bot.get_question()
d['item']['message']['message'] = '/trebek this is an answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = self.trebek_bot.redis
# Act
response = bot.get_response_message()
# Assert
self.assertEqual("You have already answered <NAME>. Let someone else respond.", response)
def test_given_incorrect_answer_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = '/trebek some test answer'
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is incorrect, <NAME>. Your score is now {0}".format(score_string), response)
def test_given_correct_answer_user_score_increased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertEqual("$200", bot.format_currency(score))
self.assertEqual("That is correct, <NAME>. Your score is now $200 (Expected Answer: Let's Make a Deal)", response)
def test_given_correct_answer_nonQuestion_form_user_score_decreased(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
score_string = "<span style='color: red;'>-$200</span>"
self.assertEqual(score_string, bot.format_currency(score))
self.assertEqual("That is correct <NAME>, however responses should be in the form of a question. Your score is now {0}".format(score_string), response)
def test_given_incorrect_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek foobar"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "Time is up! The correct answer was: <b>Let's Make a Deal</b>")
def test_given_correct_answer_time_is_up_response(self):
# Arrange
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek what is Let's Make a deal"
bot = self.create_bot_with_dictionary(d)
bot.redis = fakeredis.FakeStrictRedis()
bot.get_question()
clue = bot.get_active_clue()
clue.expiration = time.time() - (bot.seconds_to_expire + 1)
key = bot.clue_key.format(bot.room_id)
bot.redis.set(key, json.dumps(clue, cls = entities.QuestionEncoder))
response = bot.get_response_message()
user_score_key = "{0}:{1}".format(bot.user_score_prefix,
self.trebek_bot.room_message.item.message.user_from.id)
# Act
score = bot.redis.get(user_score_key)
bot.redis.flushdb()
# Assert
self.assertFalse(score)
self.assertEqual(response, "That is correct James A, however time is up. (Expected Answer: Let's Make a Deal)")
def test_when_asked_for_answer_bot_responds_with_answer(self):
d = self.get_setup_json()
bot = self.create_bot_with_dictionary(d)
bot.get_question()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
response = bot.get_response_message()
self.assertEqual("The answer was: Let's Make a Deal", response)
def test_when_no_question_exists_answer_returns_no_active_clue(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek answer"
bot = self.create_bot_with_dictionary(d)
bot.redis.flushdb()
response = bot.get_response_message()
self.assertEqual("No active clue. Type '/trebek jeopardy' to start a round", response)
def test_when_answer_contains_HTML_word_is_filtered(self):
# e.g.: ANSWER: the <i>Stegosaurus</i>
c = {'id':1, 'title': 'foo', 'created_at': 'bar', 'updated_at': 'foobar', 'clues_count':1}
q = entities.Question(1, answer= "the <i>Stegosaurus</i>", category = c)
self.assertEqual("the Stegosaurus", q.answer)
# e.g.: ANSWER: <i>the Seagull</i>
q = entities.Question(1, answer= "<i>the Seagull</i>", category = c)
self.assertEqual("the Seagull", q.answer)
q = entities.Question(1, answer= "Theodore Roosevelt", category = c)
self.assertEqual("Theodore Roosevelt", q.answer)
def test_when_fetched_clue_is_invalid_get_new_clue(self):
global _invalid_clue, _fetch_count
_fetch_count = 0
clue = get_clue_json()
clue['invalid_count'] = 1
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertEqual(clue.invalid_count, None)
def test_when_fetched_clue_is_missing_question_get_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = ""
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertNotEqual(clue.question.strip(), "")
def test_when_fetched_clue_contains_visual_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the picture seen here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("seen here" in clue.question)
def test_when_fetched_clue_contains_audio_clue_request_new_clue(self):
global _fetch_count, _invalid_clue
_fetch_count = 0
clue = get_clue_json()
clue['question'] = "the audio heard here, contains some test data"
_invalid_clue = clue
self.trebek_bot.fetch_random_clue = fetch_invalid_clue
clue = self.trebek_bot.get_jeopardy_clue()
self.assertFalse("heard here" in clue.question)
def test_when_new_month_arrives_score_resets_to_zero(self):
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.assertEqual("$0", self.trebek_bot.get_user_score())
def test_lifetimescore_includes_multiple_months(self):
# Seed other user's data (to reproduce bug)
self.create_user_scores()
self.trebek_bot.update_score(200)
self.trebek_bot.get_year_month = fake_get_year_month
self.trebek_bot.update_score(200)
self.assertEqual("$400", self.trebek_bot.get_user_score(True))
def test_user_lifetime_loserboard_value_includes_multiple_months(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek show me the lifetime loserboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Legacy Score: $5</li>"
expected += "<li>Allen: $40</li>"
expected += "<li>Mark: $60</li>"
expected += "<li>Melvin: $100</li>"
expected += "<li>Cordarrell: $140</li></ol>"
self.assertEqual(expected, response)
def test_user_lifetime_leaderboard_value_returned(self):
d = self.get_setup_json()
d['item']['message']['message'] = "/trebek lifetime leaderboard"
bot = self.create_bot_with_dictionary(d)
self.create_user_scores(bot)
response = bot.get_response_message()
expected = "<ol><li>Arian: $10,860</li>"
expected += "<li><NAME>: $1,000</li>"
expected += "<li>Zach: $824</li>"
expected += "<li>Alex: $450</li>"
expected += "<li>Richard: $400</li></ol>"
self.assertEqual(expected, response)
def main():
unittest.main()
if __name__ == '__main__':
main() | 0.443359 | 0.208703 |
import numpy as np
from nose.plugins.attrib import attr
from ion_functions.data.perf.test_performance import PerformanceTestCase
from ion_functions.data import adcp_functions as af
# Note, the VADCP related data products use the same internal functions as the
# family of beam wrapper functions (e.g. adcp_beam_eastward). Thus, those
# functions won't be added to this test. The only way to really speed this
# process up any further is to set the wrapper functions to return all the data
# products for an instrument at once rather than singly. That way the functions
# only need to be run once rather than 4 times for each instrument (a factor of
# four improvement in performance).
@attr('PERF', group='func')
class TestADCPPerformance(PerformanceTestCase):
def setUp(self):
# set test inputs -- values from DPS
self.b1 = np.array([-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]) * 1000
self.b2 = np.array([0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]) * 1000
self.b3 = np.array([-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]) * 1000
self.b4 = np.array([-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]) * 1000
self.echo = np.array([0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250])
self.sfactor = 0.45
self.heading = 98.4100 / 100.
self.pitch = 0.6900 / 100.
self.roll = -2.5400 / 100.
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063])
self.vv = np.array([-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461])
self.ww = np.array([0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966])
def test_adcp_backscatter(self):
stats = []
echo = np.tile(self.echo, (10000, 1))
sfactor = np.repeat(self.sfactor, 10000)
self.profile(stats, af.adcp_backscatter, echo, sfactor)
def test_adcp_beam_eastward(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_beam_eastward, b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
def test_adcp_beam_northward(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_beam_northward, b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
def test_adcp_beam_vertical(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
self.profile(stats, af.adcp_beam_vertical, b1, b2, b3, b4, h, p, r, vf)
def test_adcp_beam_error(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
self.profile(stats, af.adcp_beam_error, b1, b2, b3, b4)
def test_adcp_earth_eastward(self):
stats = []
u = np.tile(self.uu, (10000, 1))
v = np.tile(self.vv, (10000, 1))
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_earth_eastward, u, v, z, lat, lon, dt)
def test_adcp_earth_northward(self):
stats = []
u = np.tile(self.uu, (10000, 1))
v = np.tile(self.vv, (10000, 1))
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_earth_northward, u, v, z, lat, lon, dt)
def test_adcp_earth_vertical(self):
stats = []
w = np.tile(self.ww, (10000, 1))
self.profile(stats, af.adcp_earth_vertical, w)
# adcp_earth_error is the same transform, so this test applies to both | ion_functions/data/perf/test_adcp_performance.py | import numpy as np
from nose.plugins.attrib import attr
from ion_functions.data.perf.test_performance import PerformanceTestCase
from ion_functions.data import adcp_functions as af
# Note, the VADCP related data products use the same internal functions as the
# family of beam wrapper functions (e.g. adcp_beam_eastward). Thus, those
# functions won't be added to this test. The only way to really speed this
# process up any further is to set the wrapper functions to return all the data
# products for an instrument at once rather than singly. That way the functions
# only need to be run once rather than 4 times for each instrument (a factor of
# four improvement in performance).
@attr('PERF', group='func')
class TestADCPPerformance(PerformanceTestCase):
def setUp(self):
# set test inputs -- values from DPS
self.b1 = np.array([-0.0300, -0.2950, -0.5140, -0.2340, -0.1880,
0.2030, -0.3250, 0.3050, -0.2040, -0.2940]) * 1000
self.b2 = np.array([0.1800, -0.1320, 0.2130, 0.3090, 0.2910,
0.0490, 0.1880, 0.3730, -0.0020, 0.1720]) * 1000
self.b3 = np.array([-0.3980, -0.4360, -0.1310, -0.4730, -0.4430,
0.1880, -0.1680, 0.2910, -0.1790, 0.0080]) * 1000
self.b4 = np.array([-0.2160, -0.6050, -0.0920, -0.0580, 0.4840,
-0.0050, 0.3380, 0.1750, -0.0800, -0.5490]) * 1000
self.echo = np.array([0, 25, 50, 75, 100, 125, 150, 175, 200, 225, 250])
self.sfactor = 0.45
self.heading = 98.4100 / 100.
self.pitch = 0.6900 / 100.
self.roll = -2.5400 / 100.
self.orient = 1
self.lat = 50.0000
self.lon = -145.0000
self.depth = 0.0
self.ntp = 3545769600.0 # May 12, 2012
# set expected results -- velocity profiles in earth coordinates
# (values in DPS)
self.uu = np.array([0.2175, -0.2814, -0.1002, 0.4831, 1.2380,
-0.2455, 0.6218, -0.1807, 0.0992, -0.9063])
self.vv = np.array([-0.3367, -0.1815, -1.0522, -0.8676, -0.8919,
0.2585, -0.8497, -0.0873, -0.3073, -0.5461])
self.ww = np.array([0.1401, 0.3977, 0.1870, 0.1637, 0.0091,
-0.1290, 0.0334, -0.3017, 0.1384, 0.1966])
def test_adcp_backscatter(self):
stats = []
echo = np.tile(self.echo, (10000, 1))
sfactor = np.repeat(self.sfactor, 10000)
self.profile(stats, af.adcp_backscatter, echo, sfactor)
def test_adcp_beam_eastward(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_beam_eastward, b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
def test_adcp_beam_northward(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_beam_northward, b1, b2, b3, b4, h, p, r, vf, lat, lon, z, dt)
def test_adcp_beam_vertical(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
h = np.repeat(self.heading, 10000)
p = np.repeat(self.pitch, 10000)
r = np.repeat(self.roll, 10000)
vf = np.repeat(self.orient, 10000)
self.profile(stats, af.adcp_beam_vertical, b1, b2, b3, b4, h, p, r, vf)
def test_adcp_beam_error(self):
stats = []
b1 = np.tile(self.b1, (10000, 1))
b2 = np.tile(self.b2, (10000, 1))
b3 = np.tile(self.b3, (10000, 1))
b4 = np.tile(self.b4, (10000, 1))
self.profile(stats, af.adcp_beam_error, b1, b2, b3, b4)
def test_adcp_earth_eastward(self):
stats = []
u = np.tile(self.uu, (10000, 1))
v = np.tile(self.vv, (10000, 1))
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_earth_eastward, u, v, z, lat, lon, dt)
def test_adcp_earth_northward(self):
stats = []
u = np.tile(self.uu, (10000, 1))
v = np.tile(self.vv, (10000, 1))
lat = np.repeat(self.lat, 10000)
lon = np.repeat(self.lon, 10000)
z = np.repeat(self.depth, 10000)
dt = np.repeat(self.ntp, 10000)
self.profile(stats, af.adcp_earth_northward, u, v, z, lat, lon, dt)
def test_adcp_earth_vertical(self):
stats = []
w = np.tile(self.ww, (10000, 1))
self.profile(stats, af.adcp_earth_vertical, w)
# adcp_earth_error is the same transform, so this test applies to both | 0.654453 | 0.540014 |
import copy
import logging
import os
import subprocess
import time
import traceback
from functools import wraps
from thundra import constants
from thundra.application.global_application_info_provider import GlobalApplicationInfoProvider
from thundra.compat import PY2, TimeoutError
from thundra.config import config_names
from thundra.config.config_provider import ConfigProvider
from thundra.context.execution_context_manager import ExecutionContextManager
from thundra.context.global_execution_context_provider import GlobalExecutionContextProvider
from thundra.context.plugin_context import PluginContext
from thundra.integrations import handler_wrappers
from thundra.plugins.log.thundra_logger import debug_logger
from thundra.timeout import Timeout
from thundra.wrappers import wrapper_utils
from thundra.wrappers.aws_lambda import LambdaApplicationInfoProvider
from thundra.wrappers.aws_lambda import lambda_executor
from thundra.wrappers.base_wrapper import BaseWrapper
logger = logging.getLogger(__name__)
class LambdaWrapper(BaseWrapper):
def __init__(self, api_key=None, disable_trace=False, disable_metric=True, disable_log=True, opts=None):
super(LambdaWrapper, self).__init__(api_key, disable_trace, disable_metric, disable_log, opts)
self.application_info_provider = GlobalApplicationInfoProvider(LambdaApplicationInfoProvider())
self.plugin_context = PluginContext(application_info=self.application_info_provider.get_application_info(),
request_count=0,
executor=lambda_executor,
api_key=self.api_key)
ExecutionContextManager.set_provider(GlobalExecutionContextProvider())
self.plugins = wrapper_utils.initialize_plugins(self.plugin_context, disable_trace, disable_metric, disable_log,
self.config)
self.timeout_margin = ConfigProvider.get(config_names.THUNDRA_LAMBDA_TIMEOUT_MARGIN,
constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN)
if not ConfigProvider.get(config_names.THUNDRA_TRACE_INSTRUMENT_DISABLE):
# Pass thundra instance to integration for wrapping handler wrappers
handler_wrappers.patch_modules(self)
self.ptvsd_imported = False
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)):
self.initialize_debugger()
def __call__(self, original_func):
if hasattr(original_func, "_thundra_wrapped") or ConfigProvider.get(config_names.THUNDRA_DISABLE, False):
return original_func
@wraps(original_func)
def wrapper(event, context):
application_name = self.plugin_context.application_info.get('applicationName')
self.application_info_provider.update({
'applicationId': LambdaApplicationInfoProvider.get_application_id(context,
application_name=application_name)
})
# Execution context initialization
execution_context = wrapper_utils.create_execution_context()
try:
execution_context.platform_data['originalEvent'] = copy.deepcopy(event)
except:
execution_context.platform_data['originalEvent'] = event
execution_context.platform_data['originalContext'] = context
ExecutionContextManager.set(execution_context)
# Before running user's handler
try:
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_WARMUP_WARMUPAWARE,
False) and self.check_and_handle_warmup_request(event):
return None
self.plugin_context.request_count += 1
self.execute_hook('before:invocation', execution_context)
timeout_duration = self.get_timeout_duration(context)
except Exception as e:
logger.error("Error during the before part of Thundra: {}".format(e))
return original_func(event, context)
# Invoke user handler
try:
response = None
with Timeout(timeout_duration, self.timeout_handler, execution_context):
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(
config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)) and self.ptvsd_imported:
self.start_debugger_tracing(context)
response = original_func(event, context)
execution_context.response = response
except Exception as e:
try:
execution_context.error = {
'type': type(e).__name__,
'message': str(e),
'traceback': traceback.format_exc()
}
self.prepare_and_send_reports(execution_context)
except Exception as e_in:
logger.error("Error during the after part of Thundra: {}".format(e_in))
pass
raise e
finally:
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(
config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)) and self.ptvsd_imported:
self.stop_debugger_tracing()
# After having run the user's handler
try:
self.prepare_and_send_reports(execution_context)
except Exception as e:
logger.error("Error during the after part of Thundra: {}".format(e))
ExecutionContextManager.clear()
return response
setattr(wrapper, '_thundra_wrapped', True)
return wrapper
call = __call__
def initialize_debugger(self):
if PY2:
logger.error("Online debugging not supported in python2.7. Supported versions: 3.6, 3.7, 3.8")
return
try:
import ptvsd
self.ptvsd_imported = True
except Exception as e:
logger.error("Could not import ptvsd. Thundra ptvsd layer must be added")
def start_debugger_tracing(self, context):
try:
import ptvsd
ptvsd.tracing(True)
ptvsd.enable_attach(address=("localhost", ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_PORT)))
if not self.debugger_process:
env = os.environ.copy()
env['BROKER_HOST'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_BROKER_HOST))
env['BROKER_PORT'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_BROKER_PORT))
env['DEBUGGER_PORT'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_PORT))
env['AUTH_TOKEN'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN))
env['SESSION_NAME'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_SESSION_NAME))
if hasattr(context, 'get_remaining_time_in_millis'):
env['SESSION_TIMEOUT'] = str(context.get_remaining_time_in_millis() + int(time.time() * 1000.0))
debug_bridge_file_path = os.path.join(os.path.dirname(__file__), '../../debug/bridge.py')
self.debugger_process = subprocess.Popen(["python", debug_bridge_file_path], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, env=env)
start_time = time.time()
debug_process_running = True
while time.time() < (start_time + ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_WAIT_MAX) / 1000) \
and not ptvsd.is_attached():
if self.debugger_process.poll() is None:
ptvsd.wait_for_attach(0.01)
else:
debug_process_running = False
break
if not ptvsd.is_attached():
if debug_process_running:
logger.error('Couldn\'t complete debugger handshake in {} milliseconds.' \
.format(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_WAIT_MAX)))
ptvsd.tracing(False)
else:
ptvsd.tracing(True)
except Exception as e:
logger.error("error while setting tracing true to debugger using ptvsd: {}".format(e))
def stop_debugger_tracing(self):
try:
import ptvsd
ptvsd.tracing(False)
from ptvsd.attach_server import debugger_attached
debugger_attached.clear()
except Exception as e:
logger.error("error while setting tracing false to debugger using ptvsd: {}".format(e))
try:
if self.debugger_process:
o, e = self.debugger_process.communicate(b"fin\n")
debug_logger("Thundra debugger process output: {}".format(o.decode("utf-8")))
self.debugger_process = None
except Exception as e:
self.debugger_process = None
logger.error("error while killing proxy process for debug: {}".format(e))
def check_and_handle_warmup_request(self, event):
# Check whether it is empty request which is used as default warmup request
if not event:
print("Received warmup request as empty message. " +
"Handling with 90 milliseconds delay ...")
time.sleep(0.1)
return True
else:
if isinstance(event, str):
# Check whether it is warmup request
if event.startswith('#warmup'):
delayTime = 90
args = event[len('#warmup'):].strip().split()
# Warmup messages are in '#warmup wait=<waitTime>' format
# Iterate over all warmup arguments
for arg in args:
argParts = arg.split('=')
# Check whether argument is in key=value format
if len(argParts) == 2:
argName = argParts[0]
argValue = argParts[1]
# Check whether argument is "wait" argument
# which specifies extra wait time before returning from request
if argName == 'wait':
waitTime = int(argValue)
delayTime += waitTime
print("Received warmup request as warmup message. " +
"Handling with " + str(delayTime) + " milliseconds delay ...")
time.sleep(delayTime / 1000)
return True
return False
def get_timeout_duration(self, context):
timeout_duration = 0
if hasattr(context, 'get_remaining_time_in_millis'):
timeout_duration = context.get_remaining_time_in_millis() - self.timeout_margin
if timeout_duration <= 0:
timeout_duration = context.get_remaining_time_in_millis() - \
constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN
logger.warning('Given timeout margin is bigger than lambda timeout duration and '
'since the difference is negative, it is set to default value (' +
str(constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN) + ')')
return timeout_duration / 1000.0
def timeout_handler(self, execution_context):
execution_context.timeout = True
execution_context.error = TimeoutError('Task timed out')
self.prepare_and_send_reports(execution_context) | thundra/wrappers/aws_lambda/lambda_wrapper.py | import copy
import logging
import os
import subprocess
import time
import traceback
from functools import wraps
from thundra import constants
from thundra.application.global_application_info_provider import GlobalApplicationInfoProvider
from thundra.compat import PY2, TimeoutError
from thundra.config import config_names
from thundra.config.config_provider import ConfigProvider
from thundra.context.execution_context_manager import ExecutionContextManager
from thundra.context.global_execution_context_provider import GlobalExecutionContextProvider
from thundra.context.plugin_context import PluginContext
from thundra.integrations import handler_wrappers
from thundra.plugins.log.thundra_logger import debug_logger
from thundra.timeout import Timeout
from thundra.wrappers import wrapper_utils
from thundra.wrappers.aws_lambda import LambdaApplicationInfoProvider
from thundra.wrappers.aws_lambda import lambda_executor
from thundra.wrappers.base_wrapper import BaseWrapper
logger = logging.getLogger(__name__)
class LambdaWrapper(BaseWrapper):
def __init__(self, api_key=None, disable_trace=False, disable_metric=True, disable_log=True, opts=None):
super(LambdaWrapper, self).__init__(api_key, disable_trace, disable_metric, disable_log, opts)
self.application_info_provider = GlobalApplicationInfoProvider(LambdaApplicationInfoProvider())
self.plugin_context = PluginContext(application_info=self.application_info_provider.get_application_info(),
request_count=0,
executor=lambda_executor,
api_key=self.api_key)
ExecutionContextManager.set_provider(GlobalExecutionContextProvider())
self.plugins = wrapper_utils.initialize_plugins(self.plugin_context, disable_trace, disable_metric, disable_log,
self.config)
self.timeout_margin = ConfigProvider.get(config_names.THUNDRA_LAMBDA_TIMEOUT_MARGIN,
constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN)
if not ConfigProvider.get(config_names.THUNDRA_TRACE_INSTRUMENT_DISABLE):
# Pass thundra instance to integration for wrapping handler wrappers
handler_wrappers.patch_modules(self)
self.ptvsd_imported = False
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)):
self.initialize_debugger()
def __call__(self, original_func):
if hasattr(original_func, "_thundra_wrapped") or ConfigProvider.get(config_names.THUNDRA_DISABLE, False):
return original_func
@wraps(original_func)
def wrapper(event, context):
application_name = self.plugin_context.application_info.get('applicationName')
self.application_info_provider.update({
'applicationId': LambdaApplicationInfoProvider.get_application_id(context,
application_name=application_name)
})
# Execution context initialization
execution_context = wrapper_utils.create_execution_context()
try:
execution_context.platform_data['originalEvent'] = copy.deepcopy(event)
except:
execution_context.platform_data['originalEvent'] = event
execution_context.platform_data['originalContext'] = context
ExecutionContextManager.set(execution_context)
# Before running user's handler
try:
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_WARMUP_WARMUPAWARE,
False) and self.check_and_handle_warmup_request(event):
return None
self.plugin_context.request_count += 1
self.execute_hook('before:invocation', execution_context)
timeout_duration = self.get_timeout_duration(context)
except Exception as e:
logger.error("Error during the before part of Thundra: {}".format(e))
return original_func(event, context)
# Invoke user handler
try:
response = None
with Timeout(timeout_duration, self.timeout_handler, execution_context):
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(
config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)) and self.ptvsd_imported:
self.start_debugger_tracing(context)
response = original_func(event, context)
execution_context.response = response
except Exception as e:
try:
execution_context.error = {
'type': type(e).__name__,
'message': str(e),
'traceback': traceback.format_exc()
}
self.prepare_and_send_reports(execution_context)
except Exception as e_in:
logger.error("Error during the after part of Thundra: {}".format(e_in))
pass
raise e
finally:
if ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_ENABLE,
ConfigProvider.get(
config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN)) and self.ptvsd_imported:
self.stop_debugger_tracing()
# After having run the user's handler
try:
self.prepare_and_send_reports(execution_context)
except Exception as e:
logger.error("Error during the after part of Thundra: {}".format(e))
ExecutionContextManager.clear()
return response
setattr(wrapper, '_thundra_wrapped', True)
return wrapper
call = __call__
def initialize_debugger(self):
if PY2:
logger.error("Online debugging not supported in python2.7. Supported versions: 3.6, 3.7, 3.8")
return
try:
import ptvsd
self.ptvsd_imported = True
except Exception as e:
logger.error("Could not import ptvsd. Thundra ptvsd layer must be added")
def start_debugger_tracing(self, context):
try:
import ptvsd
ptvsd.tracing(True)
ptvsd.enable_attach(address=("localhost", ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_PORT)))
if not self.debugger_process:
env = os.environ.copy()
env['BROKER_HOST'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_BROKER_HOST))
env['BROKER_PORT'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_BROKER_PORT))
env['DEBUGGER_PORT'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_PORT))
env['AUTH_TOKEN'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_AUTH_TOKEN))
env['SESSION_NAME'] = str(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_SESSION_NAME))
if hasattr(context, 'get_remaining_time_in_millis'):
env['SESSION_TIMEOUT'] = str(context.get_remaining_time_in_millis() + int(time.time() * 1000.0))
debug_bridge_file_path = os.path.join(os.path.dirname(__file__), '../../debug/bridge.py')
self.debugger_process = subprocess.Popen(["python", debug_bridge_file_path], stdout=subprocess.PIPE,
stdin=subprocess.PIPE, env=env)
start_time = time.time()
debug_process_running = True
while time.time() < (start_time + ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_WAIT_MAX) / 1000) \
and not ptvsd.is_attached():
if self.debugger_process.poll() is None:
ptvsd.wait_for_attach(0.01)
else:
debug_process_running = False
break
if not ptvsd.is_attached():
if debug_process_running:
logger.error('Couldn\'t complete debugger handshake in {} milliseconds.' \
.format(ConfigProvider.get(config_names.THUNDRA_LAMBDA_DEBUGGER_WAIT_MAX)))
ptvsd.tracing(False)
else:
ptvsd.tracing(True)
except Exception as e:
logger.error("error while setting tracing true to debugger using ptvsd: {}".format(e))
def stop_debugger_tracing(self):
try:
import ptvsd
ptvsd.tracing(False)
from ptvsd.attach_server import debugger_attached
debugger_attached.clear()
except Exception as e:
logger.error("error while setting tracing false to debugger using ptvsd: {}".format(e))
try:
if self.debugger_process:
o, e = self.debugger_process.communicate(b"fin\n")
debug_logger("Thundra debugger process output: {}".format(o.decode("utf-8")))
self.debugger_process = None
except Exception as e:
self.debugger_process = None
logger.error("error while killing proxy process for debug: {}".format(e))
def check_and_handle_warmup_request(self, event):
# Check whether it is empty request which is used as default warmup request
if not event:
print("Received warmup request as empty message. " +
"Handling with 90 milliseconds delay ...")
time.sleep(0.1)
return True
else:
if isinstance(event, str):
# Check whether it is warmup request
if event.startswith('#warmup'):
delayTime = 90
args = event[len('#warmup'):].strip().split()
# Warmup messages are in '#warmup wait=<waitTime>' format
# Iterate over all warmup arguments
for arg in args:
argParts = arg.split('=')
# Check whether argument is in key=value format
if len(argParts) == 2:
argName = argParts[0]
argValue = argParts[1]
# Check whether argument is "wait" argument
# which specifies extra wait time before returning from request
if argName == 'wait':
waitTime = int(argValue)
delayTime += waitTime
print("Received warmup request as warmup message. " +
"Handling with " + str(delayTime) + " milliseconds delay ...")
time.sleep(delayTime / 1000)
return True
return False
def get_timeout_duration(self, context):
timeout_duration = 0
if hasattr(context, 'get_remaining_time_in_millis'):
timeout_duration = context.get_remaining_time_in_millis() - self.timeout_margin
if timeout_duration <= 0:
timeout_duration = context.get_remaining_time_in_millis() - \
constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN
logger.warning('Given timeout margin is bigger than lambda timeout duration and '
'since the difference is negative, it is set to default value (' +
str(constants.DEFAULT_LAMBDA_TIMEOUT_MARGIN) + ')')
return timeout_duration / 1000.0
def timeout_handler(self, execution_context):
execution_context.timeout = True
execution_context.error = TimeoutError('Task timed out')
self.prepare_and_send_reports(execution_context) | 0.336222 | 0.042245 |
import time
from odoo.tests.common import TransactionCase
class TestHrAttendance(TransactionCase):
"""Tests for attendance date ranges validity"""
def setUp(self):
super(TestHrAttendance, self).setUp()
self.attendance = self.env["res.partner.attendance"]
self.test_partner = self.env.ref("base.partner_demo")
def test_attendance_in_before_out(self):
# Make sure check_out is before check_in
with self.assertRaises(Exception):
self.my_attend = self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 12:00"),
"check_out": time.strftime("%Y-%m-10 11:00"),
}
)
def test_attendance_no_check_out(self):
# Make sure no second attendance without check_out can be created
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 10:00"),
}
)
with self.assertRaises(Exception):
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 11:00"),
}
)
def test_check_in_while_attendance(self):
# Make sure attendance no check in while attendance is on
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 08:00"),
"check_out": time.strftime("%Y-%m-10 09:30"),
}
)
with self.assertRaises(Exception):
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 08:30"),
"check_out": time.strftime("%Y-%m-10 09:30"),
}
)
def test_new_attendances(self):
# Make sure attendance modification raises an error when it causes an overlap
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 11:00"),
"check_out": time.strftime("%Y-%m-10 12:00"),
}
)
open_attendance = self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 10:00"),
}
)
with self.assertRaises(Exception):
open_attendance.write({"check_out": time.strftime("%Y-%m-10 11:30")}) | base_attendance/tests/test_hr_attendance_constraints.py |
import time
from odoo.tests.common import TransactionCase
class TestHrAttendance(TransactionCase):
"""Tests for attendance date ranges validity"""
def setUp(self):
super(TestHrAttendance, self).setUp()
self.attendance = self.env["res.partner.attendance"]
self.test_partner = self.env.ref("base.partner_demo")
def test_attendance_in_before_out(self):
# Make sure check_out is before check_in
with self.assertRaises(Exception):
self.my_attend = self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 12:00"),
"check_out": time.strftime("%Y-%m-10 11:00"),
}
)
def test_attendance_no_check_out(self):
# Make sure no second attendance without check_out can be created
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 10:00"),
}
)
with self.assertRaises(Exception):
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 11:00"),
}
)
def test_check_in_while_attendance(self):
# Make sure attendance no check in while attendance is on
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 08:00"),
"check_out": time.strftime("%Y-%m-10 09:30"),
}
)
with self.assertRaises(Exception):
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 08:30"),
"check_out": time.strftime("%Y-%m-10 09:30"),
}
)
def test_new_attendances(self):
# Make sure attendance modification raises an error when it causes an overlap
self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 11:00"),
"check_out": time.strftime("%Y-%m-10 12:00"),
}
)
open_attendance = self.attendance.create(
{
"partner_id": self.test_partner.id,
"check_in": time.strftime("%Y-%m-10 10:00"),
}
)
with self.assertRaises(Exception):
open_attendance.write({"check_out": time.strftime("%Y-%m-10 11:30")}) | 0.496338 | 0.295573 |
from django.db import models
from apps.ventas.producto.models import Producto
from datetime import datetime
# Create your models here.
date = datetime.now()
class Proveedor(models.Model):
"""[summary]
Args:
models ([Proveedor]): [Contiene la informacion de los proveedores]
"""
nombre_proveedor = models.CharField(max_length=500, help_text="Ingrese nombre del proveedor")
direccion = models.CharField(max_length=500, help_text="Ingrese la direccion")
ruc_proveedor = models.CharField(max_length=500, default="-", help_text="Ingrese el ruc del proveedor")
telefono = models.CharField(max_length = 500, help_text="Ingrese el telefono del proveedor")
email = models.EmailField(max_length = 500, help_text = "Ingrese email del proveedor", null=True, blank=True, default="-")
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_proveedor', 'Agregar Proveedor'),
('change_proveedor', 'Editar Proveedor'),
('delete_proveedor', 'Eliminar Proveedor'),
('view_proveedor', 'Listar Proveedores'))
def __str__(self):
return 'Proveedor: %s - ruc: %s' % (self.nombre_proveedor, self.ruc_proveedor)
class Pedido(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
cantidad_pedido = models.CharField(max_length=500, blank=True, null=True, default="-")
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y %H:%M:%S hs"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_pedido', 'Agregar Pedido'),
('change_pedido', 'Editar Pedido'),
('delete_pedido', 'Eliminar Pedido'),
('view_pedido', 'Listar Pedido'))
def obtener_dict(self):
dict = {}
dict['codigo_producto'] = self.id
dict['codigo_real'] = self.id_producto.id
dict['nombre'] = self.id_producto.nombre_producto
dict['description'] = self.id_producto.descripcion
dict['precio'] = self.id_producto.precio_compra
dict['cantidad_pedido'] = self.cantidad_pedido
return dict
def __str__(self):
return self.id_producto.nombre_producto
class PedidoCabecera(models.Model):
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Pedido Cabecera"
verbose_name_plural = "Pedido Cabeceras"
default_permissions = ()
permissions = (
('add_pedidocabecera', 'Agregar Pedido'),
('change_pedidocabecera', 'Editar Pedido'),
('delete_pedidocabecera', 'Eliminar Pedido'),
('view_pedidocabecera', 'Listar Pedido'))
def __str__(self):
return self.fecha_alta
class PedidoDetalle(models.Model):
"""Model definition for Pedido Detalle."""
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
class Meta:
"""Meta definition for Pedido Detalle"""
verbose_name = 'Pedido Detalle'
verbose_name_plural = 'Pedido Detalle'
default_permissions = ()
permissions = (
('add_pedidodetalle', 'Agregar Pedido'),
('change_pedidodetalle', 'Editar Pedido'),
('delete_pedidodetalle', 'Eliminar Pedido'),
('view_pedidodetalle', 'Listar Pedido'))
def __str__(self):
"""Unicode representation of Pedido Detalle."""
pass
class Pago(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
metodo_pago = models.CharField(max_length=100)
descripcion = models.TextField()
class Meta:
verbose_name = "Pago"
verbose_name_plural = "Plural"
ESTADOS_FACTURA = [
('PENDIENTE', 'Pendiente'),
('CANCELADO', 'Cancelado'),
('FINALIZADO', 'Finalizado'),
]
class FacturaCompra(models.Model):
nro_factura = models.CharField(max_length=500, null=True)
nro_timbrado = models.CharField(max_length=500, null=True)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y"), null=True)
fecha_emision_factura = models.CharField(max_length=500, null=True)
fecha_emision = models.CharField(max_length=500, null=True)
fecha_vencimiento = models.CharField(max_length=500, null=True)
tipo_factura = models.BooleanField(default=True)
estado = models.CharField(max_length=500, choices=ESTADOS_FACTURA, default=ESTADOS_FACTURA[0])
total_iva = models.IntegerField(default=0)
total = models.FloatField(default=0)
factura_cargada_producto = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_cargada_pedido = models.CharField(max_length=2, default="N", blank=True, null=True)
pedidod_to_factura = models.CharField(max_length=2, default="N", blank=True, null=True)
facturado = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_caja = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_proveedor = models.ForeignKey('Proveedor', on_delete=models.CASCADE, null=True)
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE, null=True)
def __str__(self):
return 'Factura Compra: %s - Proveedor: %s' % (self.nro_factura, self.id_proveedor)
class Meta:
verbose_name = 'Factura Compra'
verbose_name_plural = 'Facturas Compras'
default_permissions = ()
permissions = (
('add_facturacompra', 'Agregar Factura Compra'),
('change_facturacompra', 'Editar Factura Compra'),
('delete_facturacompra', 'Eliminar Factura Compra'),
('view_facturacompra', 'Listar Factura Compra'))
class FacturaDet(models.Model):
id_factura = models.ForeignKey('FacturaCompra', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
precio_compra = models.CharField(max_length=800, blank=True, null=True)
detalle_cargado_reporte = models.CharField(max_length=2, default="N", blank=True, null=True)
detalle_cargado_mes = models.CharField(max_length=2, default="N", blank=True, null=True)
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
ordering = ['id']
default_permissions = ()
permissions = (
('add_facturadet', 'Agregar Factura Compra'),
('change_facturadet', 'Editar Factura Compra'),
('delete_facturadet', 'Eliminar Factura Compra'),
('view_facturadet', 'Listar Factura Compra')) | sysvet/apps/compras/models.py | from django.db import models
from apps.ventas.producto.models import Producto
from datetime import datetime
# Create your models here.
date = datetime.now()
class Proveedor(models.Model):
"""[summary]
Args:
models ([Proveedor]): [Contiene la informacion de los proveedores]
"""
nombre_proveedor = models.CharField(max_length=500, help_text="Ingrese nombre del proveedor")
direccion = models.CharField(max_length=500, help_text="Ingrese la direccion")
ruc_proveedor = models.CharField(max_length=500, default="-", help_text="Ingrese el ruc del proveedor")
telefono = models.CharField(max_length = 500, help_text="Ingrese el telefono del proveedor")
email = models.EmailField(max_length = 500, help_text = "Ingrese email del proveedor", null=True, blank=True, default="-")
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_proveedor', 'Agregar Proveedor'),
('change_proveedor', 'Editar Proveedor'),
('delete_proveedor', 'Eliminar Proveedor'),
('view_proveedor', 'Listar Proveedores'))
def __str__(self):
return 'Proveedor: %s - ruc: %s' % (self.nombre_proveedor, self.ruc_proveedor)
class Pedido(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
cantidad_pedido = models.CharField(max_length=500, blank=True, null=True, default="-")
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y %H:%M:%S hs"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
verbose_name = "Proveedor"
verbose_name_plural = "Proveedores"
default_permissions = ()
permissions = (
('add_pedido', 'Agregar Pedido'),
('change_pedido', 'Editar Pedido'),
('delete_pedido', 'Eliminar Pedido'),
('view_pedido', 'Listar Pedido'))
def obtener_dict(self):
dict = {}
dict['codigo_producto'] = self.id
dict['codigo_real'] = self.id_producto.id
dict['nombre'] = self.id_producto.nombre_producto
dict['description'] = self.id_producto.descripcion
dict['precio'] = self.id_producto.precio_compra
dict['cantidad_pedido'] = self.cantidad_pedido
return dict
def __str__(self):
return self.id_producto.nombre_producto
class PedidoCabecera(models.Model):
fecha_alta = models.CharField(max_length = 200, default = date.strftime("%d/%m/%Y"), editable = False)
pedido_cargado = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
class Meta:
verbose_name = "Pedido Cabecera"
verbose_name_plural = "Pedido Cabeceras"
default_permissions = ()
permissions = (
('add_pedidocabecera', 'Agregar Pedido'),
('change_pedidocabecera', 'Editar Pedido'),
('delete_pedidocabecera', 'Eliminar Pedido'),
('view_pedidocabecera', 'Listar Pedido'))
def __str__(self):
return self.fecha_alta
class PedidoDetalle(models.Model):
"""Model definition for Pedido Detalle."""
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
class Meta:
"""Meta definition for Pedido Detalle"""
verbose_name = 'Pedido Detalle'
verbose_name_plural = 'Pedido Detalle'
default_permissions = ()
permissions = (
('add_pedidodetalle', 'Agregar Pedido'),
('change_pedidodetalle', 'Editar Pedido'),
('delete_pedidodetalle', 'Eliminar Pedido'),
('view_pedidodetalle', 'Listar Pedido'))
def __str__(self):
"""Unicode representation of Pedido Detalle."""
pass
class Pago(models.Model):
"""[summary]
Args:
models ([Pedido]): [Contiene la informacion de los pedidos]
"""
metodo_pago = models.CharField(max_length=100)
descripcion = models.TextField()
class Meta:
verbose_name = "Pago"
verbose_name_plural = "Plural"
ESTADOS_FACTURA = [
('PENDIENTE', 'Pendiente'),
('CANCELADO', 'Cancelado'),
('FINALIZADO', 'Finalizado'),
]
class FacturaCompra(models.Model):
nro_factura = models.CharField(max_length=500, null=True)
nro_timbrado = models.CharField(max_length=500, null=True)
fecha_alta = models.CharField(max_length=500, default = date.strftime("%d/%m/%Y"), null=True)
fecha_emision_factura = models.CharField(max_length=500, null=True)
fecha_emision = models.CharField(max_length=500, null=True)
fecha_vencimiento = models.CharField(max_length=500, null=True)
tipo_factura = models.BooleanField(default=True)
estado = models.CharField(max_length=500, choices=ESTADOS_FACTURA, default=ESTADOS_FACTURA[0])
total_iva = models.IntegerField(default=0)
total = models.FloatField(default=0)
factura_cargada_producto = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_cargada_pedido = models.CharField(max_length=2, default="N", blank=True, null=True)
pedidod_to_factura = models.CharField(max_length=2, default="N", blank=True, null=True)
facturado = models.CharField(max_length=2, default="N", blank=True, null=True)
factura_caja = models.CharField(max_length=2, default="N", blank=True, null=True)
last_modified = models.DateTimeField(auto_now=True, blank=True)
is_active = models.CharField(max_length=2, default="S", blank=True, null=True)
id_proveedor = models.ForeignKey('Proveedor', on_delete=models.CASCADE, null=True)
id_pedido_cabecera = models.ForeignKey('PedidoCabecera', on_delete=models.CASCADE, null=True)
def __str__(self):
return 'Factura Compra: %s - Proveedor: %s' % (self.nro_factura, self.id_proveedor)
class Meta:
verbose_name = 'Factura Compra'
verbose_name_plural = 'Facturas Compras'
default_permissions = ()
permissions = (
('add_facturacompra', 'Agregar Factura Compra'),
('change_facturacompra', 'Editar Factura Compra'),
('delete_facturacompra', 'Eliminar Factura Compra'),
('view_facturacompra', 'Listar Factura Compra'))
class FacturaDet(models.Model):
id_factura = models.ForeignKey('FacturaCompra', on_delete=models.CASCADE)
id_pedido = models.ForeignKey('Pedido', on_delete=models.CASCADE, null=True)
cantidad = models.IntegerField()
precio_compra = models.CharField(max_length=800, blank=True, null=True)
detalle_cargado_reporte = models.CharField(max_length=2, default="N", blank=True, null=True)
detalle_cargado_mes = models.CharField(max_length=2, default="N", blank=True, null=True)
descripcion = models.CharField(max_length=800, blank=True)
id_producto = models.ForeignKey(Producto, on_delete=models.PROTECT, null=True)
class Meta:
ordering = ['id']
default_permissions = ()
permissions = (
('add_facturadet', 'Agregar Factura Compra'),
('change_facturadet', 'Editar Factura Compra'),
('delete_facturadet', 'Eliminar Factura Compra'),
('view_facturadet', 'Listar Factura Compra')) | 0.60964 | 0.207135 |
import numpy as np
def forward(Observation, Emission, Transition, Initial):
"""
Performs the forward algorithm for a hidden markov model:
Observation is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Emission is a numpy.ndarray of shape (N, M) containing the emission
probability of a specific observation given a hidden state
Emission[i, j] is the probability of observing j given the hidden
state i
N is the number of hidden states
M is the number of all possible observations
Transition is a 2D numpy.ndarray of shape (N, N) containing the transition
probabilities
Transition[i, j] is the probability of transitioning from the hidden
state i to j
Initial a numpy.ndarray of shape (N, 1) containing the probability of
starting in a particular hidden state
Returns: P, F, or None, None on failure
P is the likelihood of the observations given the model
F is a numpy.ndarray of shape (N, T) containing the forward path
probabilities
F[i, j] is the probability of being in hidden state i at time j
given the previous observations
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return None, None
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
if not np.sum(Emission, axis=1).all():
return None, None
if not np.sum(Transition, axis=1).all():
return None, None
if not np.sum(Initial) == 1:
return None, None
F = np.zeros((Emission.shape[0], Observation.shape[0]))
F[:, 0] = Initial.T * Emission[:, Observation[0]]
for t in range(1, Observation.shape[0]):
F[:, t] = (F[:, t - 1].dot(Transition[:, :])) * \
Emission[:, Observation[t]]
P = np.sum(F[:, -1])
return (P, F)
def backward(Observation, Emission, Transition, Initial):
"""
Performs the backward algorithm for a hidden markov model:
Observation is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Emission is a numpy.ndarray of shape (N, M) containing the emission
probability of a specific observation given a hidden state
Emission[i, j] is the probability of observing j given the hidden
state i
N is the number of hidden states
M is the number of all possible observations
Transition is a 2D numpy.ndarray of shape (N, N) containing the transition
probabilities
Transition[i, j] is the probability of transitioning from the hidden
state i to j
Initial a numpy.ndarray of shape (N, 1) containing the probability of
starting in a particular hidden state
Returns: P, B, or None, None on failure
Pis the likelihood of the observations given the model
B is a numpy.ndarray of shape (N, T) containing the backward path
probabilities
B[i, j] is the probability of generating the future observations
from hidden state i at time j
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return None, None
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
if not np.sum(Emission, axis=1).all():
return None, None
if not np.sum(Transition, axis=1).all():
return None, None
if not np.sum(Initial) == 1:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
B = np.zeros((Emission.shape[0], Observation.shape[0]))
B[:, Observation.shape[0] - 1] += 1
for t in range(Observation.shape[0] - 2, -1, -1):
B[:, t] = (B[:, t + 1] * (Transition[:, :])
).dot(Emission[:, Observation[t + 1]])
P = np.sum(B[:, 0] * Initial.T * Emission[:, Observation[0]])
return (P, B)
def baum_welch(Observations, Transition, Emission, Initial, iterations=1000):
"""
Performs the Baum-Welch algorithm for a hidden markov model:
Observations is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Transition is a numpy.ndarray of shape (M, M) that contains the
initialized transition probabilities
M is the number of hidden states
Emission is a numpy.ndarray of shape (M, N) that contains the initialized
emission probabilities
N is the number of output states
Initial is a numpy.ndarray of shape (M, 1) that contains the initialized
starting probabilities
iterations is the number of times expectation-maximization should be
performed
Returns: the converged Transition, Emission, or None, None on failure
"""
N, _ = Transition.shape
T = Observations.shape[0]
for i in range(iterations):
P1, alpha = forward(Observations, Emission, Transition, Initial)
P2, beta = backward(Observations, Emission, Transition, Initial)
xi = np.zeros((N, N, T - 1))
for t in range(T - 1):
ems = Emission[:, Observations[t + 1]].T
den = np.dot(np.multiply(np.dot(alpha[:, t].T, Transition), ems),
beta[:, t + 1])
for i in range(N):
a = Transition[i]
num = alpha[i, t] * a * ems * beta[:, t + 1].T
xi[i, :, t] = num / den
gamma = np.sum(xi, axis=1)
Transition = np.sum(xi, 2) / np.sum(gamma, axis=1).reshape((-1, 1))
gamma = np.hstack((gamma, np.sum(xi[:, :, T - 2],
axis=0).reshape((-1, 1))))
den = np.sum(gamma, axis=1)
for i in range(Emission.shape[1]):
Emission[:, i] = np.sum(gamma[:, Observations == i], axis=1)
Emission = np.divide(Emission, den.reshape((-1, 1)))
return Transition, Emission | unsupervised_learning/0x02-hmm/6-baum_welch.py | import numpy as np
def forward(Observation, Emission, Transition, Initial):
"""
Performs the forward algorithm for a hidden markov model:
Observation is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Emission is a numpy.ndarray of shape (N, M) containing the emission
probability of a specific observation given a hidden state
Emission[i, j] is the probability of observing j given the hidden
state i
N is the number of hidden states
M is the number of all possible observations
Transition is a 2D numpy.ndarray of shape (N, N) containing the transition
probabilities
Transition[i, j] is the probability of transitioning from the hidden
state i to j
Initial a numpy.ndarray of shape (N, 1) containing the probability of
starting in a particular hidden state
Returns: P, F, or None, None on failure
P is the likelihood of the observations given the model
F is a numpy.ndarray of shape (N, T) containing the forward path
probabilities
F[i, j] is the probability of being in hidden state i at time j
given the previous observations
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return None, None
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
if not np.sum(Emission, axis=1).all():
return None, None
if not np.sum(Transition, axis=1).all():
return None, None
if not np.sum(Initial) == 1:
return None, None
F = np.zeros((Emission.shape[0], Observation.shape[0]))
F[:, 0] = Initial.T * Emission[:, Observation[0]]
for t in range(1, Observation.shape[0]):
F[:, t] = (F[:, t - 1].dot(Transition[:, :])) * \
Emission[:, Observation[t]]
P = np.sum(F[:, -1])
return (P, F)
def backward(Observation, Emission, Transition, Initial):
"""
Performs the backward algorithm for a hidden markov model:
Observation is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Emission is a numpy.ndarray of shape (N, M) containing the emission
probability of a specific observation given a hidden state
Emission[i, j] is the probability of observing j given the hidden
state i
N is the number of hidden states
M is the number of all possible observations
Transition is a 2D numpy.ndarray of shape (N, N) containing the transition
probabilities
Transition[i, j] is the probability of transitioning from the hidden
state i to j
Initial a numpy.ndarray of shape (N, 1) containing the probability of
starting in a particular hidden state
Returns: P, B, or None, None on failure
Pis the likelihood of the observations given the model
B is a numpy.ndarray of shape (N, T) containing the backward path
probabilities
B[i, j] is the probability of generating the future observations
from hidden state i at time j
"""
if not isinstance(Observation, np.ndarray) or len(Observation.shape) != 1:
return None, None
if not isinstance(Emission, np.ndarray) or len(Emission.shape) != 2:
return None, None
if not isinstance(Transition, np.ndarray) or len(Transition.shape) != 2:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if not isinstance(Initial, np.ndarray) or len(Initial.shape) != 2:
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
if not np.sum(Emission, axis=1).all():
return None, None
if not np.sum(Transition, axis=1).all():
return None, None
if not np.sum(Initial) == 1:
return None, None
if Transition.shape != (Emission.shape[0], Emission.shape[0]):
return None, None
if Initial.shape != (Emission.shape[0], 1):
return None, None
B = np.zeros((Emission.shape[0], Observation.shape[0]))
B[:, Observation.shape[0] - 1] += 1
for t in range(Observation.shape[0] - 2, -1, -1):
B[:, t] = (B[:, t + 1] * (Transition[:, :])
).dot(Emission[:, Observation[t + 1]])
P = np.sum(B[:, 0] * Initial.T * Emission[:, Observation[0]])
return (P, B)
def baum_welch(Observations, Transition, Emission, Initial, iterations=1000):
"""
Performs the Baum-Welch algorithm for a hidden markov model:
Observations is a numpy.ndarray of shape (T,) that contains the index of
the observation
T is the number of observations
Transition is a numpy.ndarray of shape (M, M) that contains the
initialized transition probabilities
M is the number of hidden states
Emission is a numpy.ndarray of shape (M, N) that contains the initialized
emission probabilities
N is the number of output states
Initial is a numpy.ndarray of shape (M, 1) that contains the initialized
starting probabilities
iterations is the number of times expectation-maximization should be
performed
Returns: the converged Transition, Emission, or None, None on failure
"""
N, _ = Transition.shape
T = Observations.shape[0]
for i in range(iterations):
P1, alpha = forward(Observations, Emission, Transition, Initial)
P2, beta = backward(Observations, Emission, Transition, Initial)
xi = np.zeros((N, N, T - 1))
for t in range(T - 1):
ems = Emission[:, Observations[t + 1]].T
den = np.dot(np.multiply(np.dot(alpha[:, t].T, Transition), ems),
beta[:, t + 1])
for i in range(N):
a = Transition[i]
num = alpha[i, t] * a * ems * beta[:, t + 1].T
xi[i, :, t] = num / den
gamma = np.sum(xi, axis=1)
Transition = np.sum(xi, 2) / np.sum(gamma, axis=1).reshape((-1, 1))
gamma = np.hstack((gamma, np.sum(xi[:, :, T - 2],
axis=0).reshape((-1, 1))))
den = np.sum(gamma, axis=1)
for i in range(Emission.shape[1]):
Emission[:, i] = np.sum(gamma[:, Observations == i], axis=1)
Emission = np.divide(Emission, den.reshape((-1, 1)))
return Transition, Emission | 0.899723 | 0.930046 |
from pprint import pformat
from six import iteritems
import re
class V1ServiceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'cluster_ip': 'str',
'external_i_ps': 'list[str]',
'external_name': 'str',
'external_traffic_policy': 'str',
'health_check_node_port': 'int',
'load_balancer_ip': 'str',
'load_balancer_source_ranges': 'list[str]',
'ports': 'list[V1ServicePort]',
'publish_not_ready_addresses': 'bool',
'selector': 'dict(str, str)',
'session_affinity': 'str',
'session_affinity_config': 'V1SessionAffinityConfig',
'type': 'str'
}
attribute_map = {
'cluster_ip': 'clusterIP',
'external_i_ps': 'externalIPs',
'external_name': 'externalName',
'external_traffic_policy': 'externalTrafficPolicy',
'health_check_node_port': 'healthCheckNodePort',
'load_balancer_ip': 'loadBalancerIP',
'load_balancer_source_ranges': 'loadBalancerSourceRanges',
'ports': 'ports',
'publish_not_ready_addresses': 'publishNotReadyAddresses',
'selector': 'selector',
'session_affinity': 'sessionAffinity',
'session_affinity_config': 'sessionAffinityConfig',
'type': 'type'
}
def __init__(self,
cluster_ip=None,
external_i_ps=None,
external_name=None,
external_traffic_policy=None,
health_check_node_port=None,
load_balancer_ip=None,
load_balancer_source_ranges=None,
ports=None,
publish_not_ready_addresses=None,
selector=None,
session_affinity=None,
session_affinity_config=None,
type=None):
"""
V1ServiceSpec - a model defined in Swagger
"""
self._cluster_ip = None
self._external_i_ps = None
self._external_name = None
self._external_traffic_policy = None
self._health_check_node_port = None
self._load_balancer_ip = None
self._load_balancer_source_ranges = None
self._ports = None
self._publish_not_ready_addresses = None
self._selector = None
self._session_affinity = None
self._session_affinity_config = None
self._type = None
self.discriminator = None
if cluster_ip is not None:
self.cluster_ip = cluster_ip
if external_i_ps is not None:
self.external_i_ps = external_i_ps
if external_name is not None:
self.external_name = external_name
if external_traffic_policy is not None:
self.external_traffic_policy = external_traffic_policy
if health_check_node_port is not None:
self.health_check_node_port = health_check_node_port
if load_balancer_ip is not None:
self.load_balancer_ip = load_balancer_ip
if load_balancer_source_ranges is not None:
self.load_balancer_source_ranges = load_balancer_source_ranges
if ports is not None:
self.ports = ports
if publish_not_ready_addresses is not None:
self.publish_not_ready_addresses = publish_not_ready_addresses
if selector is not None:
self.selector = selector
if session_affinity is not None:
self.session_affinity = session_affinity
if session_affinity_config is not None:
self.session_affinity_config = session_affinity_config
if type is not None:
self.type = type
@property
def cluster_ip(self):
"""
Gets the cluster_ip of this V1ServiceSpec.
clusterIP is the IP address of the service and is usually assigned
randomly by the master. If an address is specified manually and is not
in use by others, it will be allocated to the service; otherwise,
creation of the service will fail. This field can not be changed through
updates. Valid values are \"None\", empty string (\"\"), or a valid IP
address. \"None\" can be specified for headless services when proxying
is not required. Only applies to types ClusterIP, NodePort, and
LoadBalancer. Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The cluster_ip of this V1ServiceSpec.
:rtype: str
"""
return self._cluster_ip
@cluster_ip.setter
def cluster_ip(self, cluster_ip):
"""
Sets the cluster_ip of this V1ServiceSpec.
clusterIP is the IP address of the service and is usually assigned
randomly by the master. If an address is specified manually and is not
in use by others, it will be allocated to the service; otherwise,
creation of the service will fail. This field can not be changed through
updates. Valid values are \"None\", empty string (\"\"), or a valid IP
address. \"None\" can be specified for headless services when proxying
is not required. Only applies to types ClusterIP, NodePort, and
LoadBalancer. Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param cluster_ip: The cluster_ip of this V1ServiceSpec.
:type: str
"""
self._cluster_ip = cluster_ip
@property
def external_i_ps(self):
"""
Gets the external_i_ps of this V1ServiceSpec.
externalIPs is a list of IP addresses for which nodes in the cluster
will also accept traffic for this service. These IPs are not managed by
Kubernetes. The user is responsible for ensuring that traffic arrives
at a node with this IP. A common example is external load-balancers
that are not part of the Kubernetes system.
:return: The external_i_ps of this V1ServiceSpec.
:rtype: list[str]
"""
return self._external_i_ps
@external_i_ps.setter
def external_i_ps(self, external_i_ps):
"""
Sets the external_i_ps of this V1ServiceSpec.
externalIPs is a list of IP addresses for which nodes in the cluster
will also accept traffic for this service. These IPs are not managed by
Kubernetes. The user is responsible for ensuring that traffic arrives
at a node with this IP. A common example is external load-balancers
that are not part of the Kubernetes system.
:param external_i_ps: The external_i_ps of this V1ServiceSpec.
:type: list[str]
"""
self._external_i_ps = external_i_ps
@property
def external_name(self):
"""
Gets the external_name of this V1ServiceSpec.
externalName is the external reference that kubedns or equivalent will
return as a CNAME record for this service. No proxying will be involved.
Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
and requires Type to be ExternalName.
:return: The external_name of this V1ServiceSpec.
:rtype: str
"""
return self._external_name
@external_name.setter
def external_name(self, external_name):
"""
Sets the external_name of this V1ServiceSpec.
externalName is the external reference that kubedns or equivalent will
return as a CNAME record for this service. No proxying will be involved.
Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
and requires Type to be ExternalName.
:param external_name: The external_name of this V1ServiceSpec.
:type: str
"""
self._external_name = external_name
@property
def external_traffic_policy(self):
"""
Gets the external_traffic_policy of this V1ServiceSpec.
externalTrafficPolicy denotes if this Service desires to route external
traffic to node-local or cluster-wide endpoints. \"Local\" preserves the
client source IP and avoids a second hop for LoadBalancer and Nodeport
type services, but risks potentially imbalanced traffic spreading.
\"Cluster\" obscures the client source IP and may cause a second hop to
another node, but should have good overall load-spreading.
:return: The external_traffic_policy of this V1ServiceSpec.
:rtype: str
"""
return self._external_traffic_policy
@external_traffic_policy.setter
def external_traffic_policy(self, external_traffic_policy):
"""
Sets the external_traffic_policy of this V1ServiceSpec.
externalTrafficPolicy denotes if this Service desires to route external
traffic to node-local or cluster-wide endpoints. \"Local\" preserves the
client source IP and avoids a second hop for LoadBalancer and Nodeport
type services, but risks potentially imbalanced traffic spreading.
\"Cluster\" obscures the client source IP and may cause a second hop to
another node, but should have good overall load-spreading.
:param external_traffic_policy: The external_traffic_policy of this
V1ServiceSpec.
:type: str
"""
self._external_traffic_policy = external_traffic_policy
@property
def health_check_node_port(self):
"""
Gets the health_check_node_port of this V1ServiceSpec.
healthCheckNodePort specifies the healthcheck nodePort for the service.
If not specified, HealthCheckNodePort is created by the service api
backend with the allocated nodePort. Will use user-specified nodePort
value if specified by the client. Only effects when Type is set to
LoadBalancer and ExternalTrafficPolicy is set to Local.
:return: The health_check_node_port of this V1ServiceSpec.
:rtype: int
"""
return self._health_check_node_port
@health_check_node_port.setter
def health_check_node_port(self, health_check_node_port):
"""
Sets the health_check_node_port of this V1ServiceSpec.
healthCheckNodePort specifies the healthcheck nodePort for the service.
If not specified, HealthCheckNodePort is created by the service api
backend with the allocated nodePort. Will use user-specified nodePort
value if specified by the client. Only effects when Type is set to
LoadBalancer and ExternalTrafficPolicy is set to Local.
:param health_check_node_port: The health_check_node_port of this
V1ServiceSpec.
:type: int
"""
self._health_check_node_port = health_check_node_port
@property
def load_balancer_ip(self):
"""
Gets the load_balancer_ip of this V1ServiceSpec.
Only applies to Service Type: LoadBalancer LoadBalancer will get created
with the IP specified in this field. This feature depends on whether the
underlying cloud-provider supports specifying the loadBalancerIP when a
load balancer is created. This field will be ignored if the
cloud-provider does not support the feature.
:return: The load_balancer_ip of this V1ServiceSpec.
:rtype: str
"""
return self._load_balancer_ip
@load_balancer_ip.setter
def load_balancer_ip(self, load_balancer_ip):
"""
Sets the load_balancer_ip of this V1ServiceSpec.
Only applies to Service Type: LoadBalancer LoadBalancer will get created
with the IP specified in this field. This feature depends on whether the
underlying cloud-provider supports specifying the loadBalancerIP when a
load balancer is created. This field will be ignored if the
cloud-provider does not support the feature.
:param load_balancer_ip: The load_balancer_ip of this V1ServiceSpec.
:type: str
"""
self._load_balancer_ip = load_balancer_ip
@property
def load_balancer_source_ranges(self):
"""
Gets the load_balancer_source_ranges of this V1ServiceSpec.
If specified and supported by the platform, this will restrict traffic
through the cloud-provider load-balancer will be restricted to the
specified client IPs. This field will be ignored if the cloud-provider
does not support the feature.\" More info:
https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
:return: The load_balancer_source_ranges of this V1ServiceSpec.
:rtype: list[str]
"""
return self._load_balancer_source_ranges
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, load_balancer_source_ranges):
"""
Sets the load_balancer_source_ranges of this V1ServiceSpec.
If specified and supported by the platform, this will restrict traffic
through the cloud-provider load-balancer will be restricted to the
specified client IPs. This field will be ignored if the cloud-provider
does not support the feature.\" More info:
https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
:param load_balancer_source_ranges: The load_balancer_source_ranges of
this V1ServiceSpec.
:type: list[str]
"""
self._load_balancer_source_ranges = load_balancer_source_ranges
@property
def ports(self):
"""
Gets the ports of this V1ServiceSpec.
The list of ports that are exposed by this service. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The ports of this V1ServiceSpec.
:rtype: list[V1ServicePort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1ServiceSpec.
The list of ports that are exposed by this service. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param ports: The ports of this V1ServiceSpec.
:type: list[V1ServicePort]
"""
self._ports = ports
@property
def publish_not_ready_addresses(self):
"""
Gets the publish_not_ready_addresses of this V1ServiceSpec.
publishNotReadyAddresses, when set to true, indicates that DNS
implementations must publish the notReadyAddresses of subsets for the
Endpoints associated with the Service. The default value is false. The
primary use case for setting this field is to use a StatefulSet's
Headless Service to propagate SRV records for its Pods without respect
to their readiness for purpose of peer discovery.
:return: The publish_not_ready_addresses of this V1ServiceSpec.
:rtype: bool
"""
return self._publish_not_ready_addresses
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, publish_not_ready_addresses):
"""
Sets the publish_not_ready_addresses of this V1ServiceSpec.
publishNotReadyAddresses, when set to true, indicates that DNS
implementations must publish the notReadyAddresses of subsets for the
Endpoints associated with the Service. The default value is false. The
primary use case for setting this field is to use a StatefulSet's
Headless Service to propagate SRV records for its Pods without respect
to their readiness for purpose of peer discovery.
:param publish_not_ready_addresses: The publish_not_ready_addresses of
this V1ServiceSpec.
:type: bool
"""
self._publish_not_ready_addresses = publish_not_ready_addresses
@property
def selector(self):
"""
Gets the selector of this V1ServiceSpec.
Route service traffic to pods with label keys and values matching this
selector. If empty or not present, the service is assumed to have an
external process managing its endpoints, which Kubernetes will not
modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/
:return: The selector of this V1ServiceSpec.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1ServiceSpec.
Route service traffic to pods with label keys and values matching this
selector. If empty or not present, the service is assumed to have an
external process managing its endpoints, which Kubernetes will not
modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/
:param selector: The selector of this V1ServiceSpec.
:type: dict(str, str)
"""
self._selector = selector
@property
def session_affinity(self):
"""
Gets the session_affinity of this V1ServiceSpec.
Supports \"ClientIP\" and \"None\". Used to maintain session affinity.
Enable client IP based session affinity. Must be ClientIP or None.
Defaults to None. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The session_affinity of this V1ServiceSpec.
:rtype: str
"""
return self._session_affinity
@session_affinity.setter
def session_affinity(self, session_affinity):
"""
Sets the session_affinity of this V1ServiceSpec.
Supports \"ClientIP\" and \"None\". Used to maintain session affinity.
Enable client IP based session affinity. Must be ClientIP or None.
Defaults to None. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param session_affinity: The session_affinity of this V1ServiceSpec.
:type: str
"""
self._session_affinity = session_affinity
@property
def session_affinity_config(self):
"""
Gets the session_affinity_config of this V1ServiceSpec.
sessionAffinityConfig contains the configurations of session affinity.
:return: The session_affinity_config of this V1ServiceSpec.
:rtype: V1SessionAffinityConfig
"""
return self._session_affinity_config
@session_affinity_config.setter
def session_affinity_config(self, session_affinity_config):
"""
Sets the session_affinity_config of this V1ServiceSpec.
sessionAffinityConfig contains the configurations of session affinity.
:param session_affinity_config: The session_affinity_config of this
V1ServiceSpec.
:type: V1SessionAffinityConfig
"""
self._session_affinity_config = session_affinity_config
@property
def type(self):
"""
Gets the type of this V1ServiceSpec.
type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
\"ExternalName\" maps to the specified externalName. \"ClusterIP\"
allocates a cluster-internal IP address for load-balancing to endpoints.
Endpoints are determined by the selector or if that is not specified, by
manual construction of an Endpoints object. If clusterIP is \"None\", no
virtual IP is allocated and the endpoints are published as a set of
endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and
allocates a port on every node which routes to the clusterIP.
\"LoadBalancer\" builds on NodePort and creates an external
load-balancer (if supported in the current cloud) which routes to the
clusterIP. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
:return: The type of this V1ServiceSpec.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1ServiceSpec.
type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
\"ExternalName\" maps to the specified externalName. \"ClusterIP\"
allocates a cluster-internal IP address for load-balancing to endpoints.
Endpoints are determined by the selector or if that is not specified, by
manual construction of an Endpoints object. If clusterIP is \"None\", no
virtual IP is allocated and the endpoints are published as a set of
endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and
allocates a port on every node which routes to the clusterIP.
\"LoadBalancer\" builds on NodePort and creates an external
load-balancer (if supported in the current cloud) which routes to the
clusterIP. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
:param type: The type of this V1ServiceSpec.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServiceSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | mac/google-cloud-sdk/lib/third_party/kubernetes/client/models/v1_service_spec.py | from pprint import pformat
from six import iteritems
import re
class V1ServiceSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name and the value is attribute
type.
attribute_map (dict): The key is attribute name and the value is json key
in definition.
"""
swagger_types = {
'cluster_ip': 'str',
'external_i_ps': 'list[str]',
'external_name': 'str',
'external_traffic_policy': 'str',
'health_check_node_port': 'int',
'load_balancer_ip': 'str',
'load_balancer_source_ranges': 'list[str]',
'ports': 'list[V1ServicePort]',
'publish_not_ready_addresses': 'bool',
'selector': 'dict(str, str)',
'session_affinity': 'str',
'session_affinity_config': 'V1SessionAffinityConfig',
'type': 'str'
}
attribute_map = {
'cluster_ip': 'clusterIP',
'external_i_ps': 'externalIPs',
'external_name': 'externalName',
'external_traffic_policy': 'externalTrafficPolicy',
'health_check_node_port': 'healthCheckNodePort',
'load_balancer_ip': 'loadBalancerIP',
'load_balancer_source_ranges': 'loadBalancerSourceRanges',
'ports': 'ports',
'publish_not_ready_addresses': 'publishNotReadyAddresses',
'selector': 'selector',
'session_affinity': 'sessionAffinity',
'session_affinity_config': 'sessionAffinityConfig',
'type': 'type'
}
def __init__(self,
cluster_ip=None,
external_i_ps=None,
external_name=None,
external_traffic_policy=None,
health_check_node_port=None,
load_balancer_ip=None,
load_balancer_source_ranges=None,
ports=None,
publish_not_ready_addresses=None,
selector=None,
session_affinity=None,
session_affinity_config=None,
type=None):
"""
V1ServiceSpec - a model defined in Swagger
"""
self._cluster_ip = None
self._external_i_ps = None
self._external_name = None
self._external_traffic_policy = None
self._health_check_node_port = None
self._load_balancer_ip = None
self._load_balancer_source_ranges = None
self._ports = None
self._publish_not_ready_addresses = None
self._selector = None
self._session_affinity = None
self._session_affinity_config = None
self._type = None
self.discriminator = None
if cluster_ip is not None:
self.cluster_ip = cluster_ip
if external_i_ps is not None:
self.external_i_ps = external_i_ps
if external_name is not None:
self.external_name = external_name
if external_traffic_policy is not None:
self.external_traffic_policy = external_traffic_policy
if health_check_node_port is not None:
self.health_check_node_port = health_check_node_port
if load_balancer_ip is not None:
self.load_balancer_ip = load_balancer_ip
if load_balancer_source_ranges is not None:
self.load_balancer_source_ranges = load_balancer_source_ranges
if ports is not None:
self.ports = ports
if publish_not_ready_addresses is not None:
self.publish_not_ready_addresses = publish_not_ready_addresses
if selector is not None:
self.selector = selector
if session_affinity is not None:
self.session_affinity = session_affinity
if session_affinity_config is not None:
self.session_affinity_config = session_affinity_config
if type is not None:
self.type = type
@property
def cluster_ip(self):
"""
Gets the cluster_ip of this V1ServiceSpec.
clusterIP is the IP address of the service and is usually assigned
randomly by the master. If an address is specified manually and is not
in use by others, it will be allocated to the service; otherwise,
creation of the service will fail. This field can not be changed through
updates. Valid values are \"None\", empty string (\"\"), or a valid IP
address. \"None\" can be specified for headless services when proxying
is not required. Only applies to types ClusterIP, NodePort, and
LoadBalancer. Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The cluster_ip of this V1ServiceSpec.
:rtype: str
"""
return self._cluster_ip
@cluster_ip.setter
def cluster_ip(self, cluster_ip):
"""
Sets the cluster_ip of this V1ServiceSpec.
clusterIP is the IP address of the service and is usually assigned
randomly by the master. If an address is specified manually and is not
in use by others, it will be allocated to the service; otherwise,
creation of the service will fail. This field can not be changed through
updates. Valid values are \"None\", empty string (\"\"), or a valid IP
address. \"None\" can be specified for headless services when proxying
is not required. Only applies to types ClusterIP, NodePort, and
LoadBalancer. Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param cluster_ip: The cluster_ip of this V1ServiceSpec.
:type: str
"""
self._cluster_ip = cluster_ip
@property
def external_i_ps(self):
"""
Gets the external_i_ps of this V1ServiceSpec.
externalIPs is a list of IP addresses for which nodes in the cluster
will also accept traffic for this service. These IPs are not managed by
Kubernetes. The user is responsible for ensuring that traffic arrives
at a node with this IP. A common example is external load-balancers
that are not part of the Kubernetes system.
:return: The external_i_ps of this V1ServiceSpec.
:rtype: list[str]
"""
return self._external_i_ps
@external_i_ps.setter
def external_i_ps(self, external_i_ps):
"""
Sets the external_i_ps of this V1ServiceSpec.
externalIPs is a list of IP addresses for which nodes in the cluster
will also accept traffic for this service. These IPs are not managed by
Kubernetes. The user is responsible for ensuring that traffic arrives
at a node with this IP. A common example is external load-balancers
that are not part of the Kubernetes system.
:param external_i_ps: The external_i_ps of this V1ServiceSpec.
:type: list[str]
"""
self._external_i_ps = external_i_ps
@property
def external_name(self):
"""
Gets the external_name of this V1ServiceSpec.
externalName is the external reference that kubedns or equivalent will
return as a CNAME record for this service. No proxying will be involved.
Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
and requires Type to be ExternalName.
:return: The external_name of this V1ServiceSpec.
:rtype: str
"""
return self._external_name
@external_name.setter
def external_name(self, external_name):
"""
Sets the external_name of this V1ServiceSpec.
externalName is the external reference that kubedns or equivalent will
return as a CNAME record for this service. No proxying will be involved.
Must be a valid RFC-1123 hostname (https://tools.ietf.org/html/rfc1123)
and requires Type to be ExternalName.
:param external_name: The external_name of this V1ServiceSpec.
:type: str
"""
self._external_name = external_name
@property
def external_traffic_policy(self):
"""
Gets the external_traffic_policy of this V1ServiceSpec.
externalTrafficPolicy denotes if this Service desires to route external
traffic to node-local or cluster-wide endpoints. \"Local\" preserves the
client source IP and avoids a second hop for LoadBalancer and Nodeport
type services, but risks potentially imbalanced traffic spreading.
\"Cluster\" obscures the client source IP and may cause a second hop to
another node, but should have good overall load-spreading.
:return: The external_traffic_policy of this V1ServiceSpec.
:rtype: str
"""
return self._external_traffic_policy
@external_traffic_policy.setter
def external_traffic_policy(self, external_traffic_policy):
"""
Sets the external_traffic_policy of this V1ServiceSpec.
externalTrafficPolicy denotes if this Service desires to route external
traffic to node-local or cluster-wide endpoints. \"Local\" preserves the
client source IP and avoids a second hop for LoadBalancer and Nodeport
type services, but risks potentially imbalanced traffic spreading.
\"Cluster\" obscures the client source IP and may cause a second hop to
another node, but should have good overall load-spreading.
:param external_traffic_policy: The external_traffic_policy of this
V1ServiceSpec.
:type: str
"""
self._external_traffic_policy = external_traffic_policy
@property
def health_check_node_port(self):
"""
Gets the health_check_node_port of this V1ServiceSpec.
healthCheckNodePort specifies the healthcheck nodePort for the service.
If not specified, HealthCheckNodePort is created by the service api
backend with the allocated nodePort. Will use user-specified nodePort
value if specified by the client. Only effects when Type is set to
LoadBalancer and ExternalTrafficPolicy is set to Local.
:return: The health_check_node_port of this V1ServiceSpec.
:rtype: int
"""
return self._health_check_node_port
@health_check_node_port.setter
def health_check_node_port(self, health_check_node_port):
"""
Sets the health_check_node_port of this V1ServiceSpec.
healthCheckNodePort specifies the healthcheck nodePort for the service.
If not specified, HealthCheckNodePort is created by the service api
backend with the allocated nodePort. Will use user-specified nodePort
value if specified by the client. Only effects when Type is set to
LoadBalancer and ExternalTrafficPolicy is set to Local.
:param health_check_node_port: The health_check_node_port of this
V1ServiceSpec.
:type: int
"""
self._health_check_node_port = health_check_node_port
@property
def load_balancer_ip(self):
"""
Gets the load_balancer_ip of this V1ServiceSpec.
Only applies to Service Type: LoadBalancer LoadBalancer will get created
with the IP specified in this field. This feature depends on whether the
underlying cloud-provider supports specifying the loadBalancerIP when a
load balancer is created. This field will be ignored if the
cloud-provider does not support the feature.
:return: The load_balancer_ip of this V1ServiceSpec.
:rtype: str
"""
return self._load_balancer_ip
@load_balancer_ip.setter
def load_balancer_ip(self, load_balancer_ip):
"""
Sets the load_balancer_ip of this V1ServiceSpec.
Only applies to Service Type: LoadBalancer LoadBalancer will get created
with the IP specified in this field. This feature depends on whether the
underlying cloud-provider supports specifying the loadBalancerIP when a
load balancer is created. This field will be ignored if the
cloud-provider does not support the feature.
:param load_balancer_ip: The load_balancer_ip of this V1ServiceSpec.
:type: str
"""
self._load_balancer_ip = load_balancer_ip
@property
def load_balancer_source_ranges(self):
"""
Gets the load_balancer_source_ranges of this V1ServiceSpec.
If specified and supported by the platform, this will restrict traffic
through the cloud-provider load-balancer will be restricted to the
specified client IPs. This field will be ignored if the cloud-provider
does not support the feature.\" More info:
https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
:return: The load_balancer_source_ranges of this V1ServiceSpec.
:rtype: list[str]
"""
return self._load_balancer_source_ranges
@load_balancer_source_ranges.setter
def load_balancer_source_ranges(self, load_balancer_source_ranges):
"""
Sets the load_balancer_source_ranges of this V1ServiceSpec.
If specified and supported by the platform, this will restrict traffic
through the cloud-provider load-balancer will be restricted to the
specified client IPs. This field will be ignored if the cloud-provider
does not support the feature.\" More info:
https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/
:param load_balancer_source_ranges: The load_balancer_source_ranges of
this V1ServiceSpec.
:type: list[str]
"""
self._load_balancer_source_ranges = load_balancer_source_ranges
@property
def ports(self):
"""
Gets the ports of this V1ServiceSpec.
The list of ports that are exposed by this service. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The ports of this V1ServiceSpec.
:rtype: list[V1ServicePort]
"""
return self._ports
@ports.setter
def ports(self, ports):
"""
Sets the ports of this V1ServiceSpec.
The list of ports that are exposed by this service. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param ports: The ports of this V1ServiceSpec.
:type: list[V1ServicePort]
"""
self._ports = ports
@property
def publish_not_ready_addresses(self):
"""
Gets the publish_not_ready_addresses of this V1ServiceSpec.
publishNotReadyAddresses, when set to true, indicates that DNS
implementations must publish the notReadyAddresses of subsets for the
Endpoints associated with the Service. The default value is false. The
primary use case for setting this field is to use a StatefulSet's
Headless Service to propagate SRV records for its Pods without respect
to their readiness for purpose of peer discovery.
:return: The publish_not_ready_addresses of this V1ServiceSpec.
:rtype: bool
"""
return self._publish_not_ready_addresses
@publish_not_ready_addresses.setter
def publish_not_ready_addresses(self, publish_not_ready_addresses):
"""
Sets the publish_not_ready_addresses of this V1ServiceSpec.
publishNotReadyAddresses, when set to true, indicates that DNS
implementations must publish the notReadyAddresses of subsets for the
Endpoints associated with the Service. The default value is false. The
primary use case for setting this field is to use a StatefulSet's
Headless Service to propagate SRV records for its Pods without respect
to their readiness for purpose of peer discovery.
:param publish_not_ready_addresses: The publish_not_ready_addresses of
this V1ServiceSpec.
:type: bool
"""
self._publish_not_ready_addresses = publish_not_ready_addresses
@property
def selector(self):
"""
Gets the selector of this V1ServiceSpec.
Route service traffic to pods with label keys and values matching this
selector. If empty or not present, the service is assumed to have an
external process managing its endpoints, which Kubernetes will not
modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/
:return: The selector of this V1ServiceSpec.
:rtype: dict(str, str)
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1ServiceSpec.
Route service traffic to pods with label keys and values matching this
selector. If empty or not present, the service is assumed to have an
external process managing its endpoints, which Kubernetes will not
modify. Only applies to types ClusterIP, NodePort, and LoadBalancer.
Ignored if type is ExternalName. More info:
https://kubernetes.io/docs/concepts/services-networking/service/
:param selector: The selector of this V1ServiceSpec.
:type: dict(str, str)
"""
self._selector = selector
@property
def session_affinity(self):
"""
Gets the session_affinity of this V1ServiceSpec.
Supports \"ClientIP\" and \"None\". Used to maintain session affinity.
Enable client IP based session affinity. Must be ClientIP or None.
Defaults to None. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:return: The session_affinity of this V1ServiceSpec.
:rtype: str
"""
return self._session_affinity
@session_affinity.setter
def session_affinity(self, session_affinity):
"""
Sets the session_affinity of this V1ServiceSpec.
Supports \"ClientIP\" and \"None\". Used to maintain session affinity.
Enable client IP based session affinity. Must be ClientIP or None.
Defaults to None. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies
:param session_affinity: The session_affinity of this V1ServiceSpec.
:type: str
"""
self._session_affinity = session_affinity
@property
def session_affinity_config(self):
"""
Gets the session_affinity_config of this V1ServiceSpec.
sessionAffinityConfig contains the configurations of session affinity.
:return: The session_affinity_config of this V1ServiceSpec.
:rtype: V1SessionAffinityConfig
"""
return self._session_affinity_config
@session_affinity_config.setter
def session_affinity_config(self, session_affinity_config):
"""
Sets the session_affinity_config of this V1ServiceSpec.
sessionAffinityConfig contains the configurations of session affinity.
:param session_affinity_config: The session_affinity_config of this
V1ServiceSpec.
:type: V1SessionAffinityConfig
"""
self._session_affinity_config = session_affinity_config
@property
def type(self):
"""
Gets the type of this V1ServiceSpec.
type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
\"ExternalName\" maps to the specified externalName. \"ClusterIP\"
allocates a cluster-internal IP address for load-balancing to endpoints.
Endpoints are determined by the selector or if that is not specified, by
manual construction of an Endpoints object. If clusterIP is \"None\", no
virtual IP is allocated and the endpoints are published as a set of
endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and
allocates a port on every node which routes to the clusterIP.
\"LoadBalancer\" builds on NodePort and creates an external
load-balancer (if supported in the current cloud) which routes to the
clusterIP. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
:return: The type of this V1ServiceSpec.
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this V1ServiceSpec.
type determines how the Service is exposed. Defaults to ClusterIP. Valid
options are ExternalName, ClusterIP, NodePort, and LoadBalancer.
\"ExternalName\" maps to the specified externalName. \"ClusterIP\"
allocates a cluster-internal IP address for load-balancing to endpoints.
Endpoints are determined by the selector or if that is not specified, by
manual construction of an Endpoints object. If clusterIP is \"None\", no
virtual IP is allocated and the endpoints are published as a set of
endpoints rather than a stable IP. \"NodePort\" builds on ClusterIP and
allocates a port on every node which routes to the clusterIP.
\"LoadBalancer\" builds on NodePort and creates an external
load-balancer (if supported in the current cloud) which routes to the
clusterIP. More info:
https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types
:param type: The type of this V1ServiceSpec.
:type: str
"""
self._type = type
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(
map(lambda x: x.to_dict() if hasattr(x, 'to_dict') else x, value))
elif hasattr(value, 'to_dict'):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(
map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], 'to_dict') else item, value.items()))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1ServiceSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other | 0.680135 | 0.115761 |
from __future__ import print_function
import copy
import os
import shutil
import sys
import mock
from chromite.lib import constants
from chromite.cli import command_unittest
from chromite.cli.cros import cros_chrome_sdk
from chromite.lib import cache
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from gn_helpers import gn_helpers
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class MockChromeSDKCommand(command_unittest.MockCommand):
"""Mock out the build command."""
TARGET = 'chromite.cli.cros.cros_chrome_sdk.ChromeSDKCommand'
TARGET_CLASS = cros_chrome_sdk.ChromeSDKCommand
COMMAND = 'chrome-sdk'
ATTRS = (('_GOMA_DOWNLOAD_URL', '_SetupEnvironment') +
command_unittest.MockCommand.ATTRS)
_GOMA_DOWNLOAD_URL = 'Invalid URL'
def __init__(self, *args, **kwargs):
command_unittest.MockCommand.__init__(self, *args, **kwargs)
self.env = None
def _SetupEnvironment(self, *args, **kwargs):
env = self.backup['_SetupEnvironment'](*args, **kwargs)
self.env = copy.deepcopy(env)
return env
class ParserTest(cros_test_lib.MockTempDirTestCase):
"""Test the parser."""
def testNormal(self):
"""Tests that our example parser works normally."""
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD],
base_args=['--cache-dir', self.tempdir]) as bootstrap:
self.assertEqual(bootstrap.inst.options.board, SDKFetcherMock.BOARD)
self.assertEqual(bootstrap.inst.options.cache_dir, self.tempdir)
def testVersion(self):
"""Tests that a platform version is allowed."""
VERSION = '1234.0.0'
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, '--version', VERSION]) as parser:
self.assertEqual(parser.inst.options.version, VERSION)
def testFullVersion(self):
"""Tests that a full version is allowed."""
FULL_VERSION = 'R56-1234.0.0'
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, '--version', FULL_VERSION]) as parser:
self.assertEqual(parser.inst.options.version, FULL_VERSION)
def _GSCopyMock(_self, path, dest, **_kwargs):
"""Used to simulate a GS Copy operation."""
with osutils.TempDir() as tempdir:
local_path = os.path.join(tempdir, os.path.basename(path))
osutils.Touch(local_path)
shutil.move(local_path, dest)
def _DependencyMockCtx(f):
"""Attribute that ensures dependency PartialMocks are started.
Since PartialMock does not support nested mocking, we need to first call
stop() on the outer level PartialMock (which is passed in to us). We then
re-start() the outer level upon exiting the context.
"""
def new_f(self, *args, **kwargs):
if not self.entered:
try:
self.entered = True
# Temporarily disable outer GSContext mock before starting our mock.
# TODO(rcui): Generalize this attribute and include in partial_mock.py.
for emock in self.external_mocks:
emock.stop()
with self.gs_mock:
return f(self, *args, **kwargs)
finally:
self.entered = False
for emock in self.external_mocks:
emock.start()
else:
return f(self, *args, **kwargs)
return new_f
class SDKFetcherMock(partial_mock.PartialMock):
"""Provides mocking functionality for SDKFetcher."""
TARGET = 'chromite.cli.cros.cros_chrome_sdk.SDKFetcher'
ATTRS = ('__init__', 'GetFullVersion', '_GetMetadata', '_UpdateTarball',
'_GetManifest', 'UpdateDefaultVersion', '_GetTarballCacheKey')
FAKE_METADATA = """
{
"boards": ["eve"],
"cros-version": "25.3543.2",
"metadata-version": "1",
"bot-hostname": "build82-m2.golo.chromium.org",
"bot-config": "eve-release",
"toolchain-tuple": ["i686-pc-linux-gnu"],
"toolchain-url": "2013/01/%(target)s-2013.01.23.003823.tar.xz",
"sdk-version": "2013.01.23.003823"
}"""
BOARD = 'eve'
BOARDS = ['amd64-generic', 'arm-generic']
VERSION = '4567.8.9'
def __init__(self, external_mocks=None):
"""Initializes the mock.
Args:
external_mocks: A list of already started PartialMock/patcher instances.
stop() will be called on each element every time execution enters one of
our the mocked out methods, and start() called on it once execution
leaves the mocked out method.
"""
partial_mock.PartialMock.__init__(self)
self.external_mocks = external_mocks or []
self.entered = False
self.gs_mock = gs_unittest.GSContextMock()
self.gs_mock.SetDefaultCmdResult()
self.env = None
self.tarball_cache_key_map = {}
@_DependencyMockCtx
def _target__init__(self, inst, *args, **kwargs):
self.backup['__init__'](inst, *args, **kwargs)
if not inst.cache_base.startswith('/tmp'):
raise AssertionError('For testing, SDKFetcher cache_dir needs to be a '
'dir under /tmp')
@_DependencyMockCtx
def UpdateDefaultVersion(self, inst, *_args, **_kwargs):
inst._SetDefaultVersion(self.VERSION)
return self.VERSION, True
@_DependencyMockCtx
def _UpdateTarball(self, inst, *args, **kwargs):
with mock.patch.object(gs.GSContext, 'Copy', autospec=True,
side_effect=_GSCopyMock):
with mock.patch.object(cache, 'Untar'):
return self.backup['_UpdateTarball'](inst, *args, **kwargs)
@_DependencyMockCtx
def GetFullVersion(self, _inst, version):
return 'R26-%s' % version
@_DependencyMockCtx
def _GetMetadata(self, inst, *args, **kwargs):
self.gs_mock.SetDefaultCmdResult()
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/%s' % constants.METADATA_JSON),
output=self.FAKE_METADATA)
return self.backup['_GetMetadata'](inst, *args, **kwargs)
@_DependencyMockCtx
def _GetManifest(self, _inst, _version):
return {
'packages': {
'app-emulation/qemu': [['3.0.0', {}]],
'chromeos-base/tast-cmd': [['1.2.3', {}]],
'chromeos-base/tast-remote-tests-cros': [['7.8.9', {}]],
'sys-firmware/seabios': [['1.11.0', {}]]
}
}
@_DependencyMockCtx
def _GetTarballCacheKey(self, _inst, component, _url):
return (os.path.join(
component,
self.tarball_cache_key_map.get(component, 'some-fake-hash')),)
class RunThroughTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Run the script with most things mocked out."""
VERSION_KEY = (SDKFetcherMock.BOARD, SDKFetcherMock.VERSION,
constants.CHROME_SYSROOT_TAR)
FAKE_ENV = {
'GN_ARGS': 'target_sysroot="/path/to/sysroot" is_clang=false',
'AR': 'x86_64-cros-linux-gnu-ar',
'AS': 'x86_64-cros-linux-gnu-as',
'CXX': 'x86_64-cros-linux-gnu-clang++',
'CC': 'x86_64-cros-linux-gnu-clang',
'LD': 'x86_64-cros-linux-gnu-clang++',
'NM': 'x86_64-cros-linux-gnu-nm',
'RANLIB': 'x86_64-cros-linux-gnu-ranlib',
'READELF': 'x86_64-cros-linux-gnu-readelf',
'CFLAGS': '-O2',
'CXXFLAGS': '-O2',
}
def SetupCommandMock(self, many_boards=False, extra_args=None,
default_cache_dir=False):
cmd_args = ['--chrome-src', self.chrome_src_dir, 'true']
if many_boards:
cmd_args += ['--boards', ':'.join(SDKFetcherMock.BOARDS), '--no-shell']
# --no-shell drops gni files in //build/args/chromeos/.
osutils.SafeMakedirs(
os.path.join(self.chrome_root, 'src', 'build', 'args', 'chromeos'))
else:
cmd_args += ['--board', SDKFetcherMock.BOARD]
if extra_args:
cmd_args.extend(extra_args)
base_args = None if default_cache_dir else ['--cache-dir', self.tempdir]
self.cmd_mock = MockChromeSDKCommand(cmd_args, base_args=base_args)
self.StartPatcher(self.cmd_mock)
self.cmd_mock.UnMockAttr('Run')
def SourceEnvironmentMock(self, path, *_args, **_kwargs):
if path.endswith('environment'):
return copy.deepcopy(self.FAKE_ENV)
return {}
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
self.sdk_mock = self.StartPatcher(SDKFetcherMock(
external_mocks=[self.rc_mock]))
# This needs to occur before initializing MockChromeSDKCommand.
self.bashrc = os.path.join(self.tempdir, 'bashrc')
self.PatchObject(constants, 'CHROME_SDK_BASHRC', new=self.bashrc)
self.PatchObject(osutils, 'SourceEnvironment',
autospec=True, side_effect=self.SourceEnvironmentMock)
self.rc_mock.AddCmdResult(cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD,
output='8088')
# Initialized by SetupCommandMock.
self.cmd_mock = None
# Set up a fake Chrome src/ directory
self.chrome_root = os.path.join(self.tempdir, 'chrome_root')
self.chrome_src_dir = os.path.join(self.chrome_root, 'src')
osutils.SafeMakedirs(self.chrome_src_dir)
osutils.Touch(os.path.join(self.chrome_root, '.gclient'))
@property
def cache(self):
return self.cmd_mock.inst.sdk.tarball_cache
def testIt(self):
"""Test a runthrough of the script."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer() as logs:
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Goma:', inverted=True)
def testManyBoards(self):
"""Test a runthrough when multiple boards are specified via --boards."""
self.SetupCommandMock(many_boards=True)
self.cmd_mock.inst.Run()
for board in SDKFetcherMock.BOARDS:
board_arg_file = os.path.join(
self.chrome_src_dir, 'build/args/chromeos/%s.gni' % board)
self.assertExists(board_arg_file)
def testManyBoardsBrokenArgs(self):
"""Tests that malformed args.gn files will be fixed in --boards."""
self.SetupCommandMock(many_boards=True)
for board in SDKFetcherMock.BOARDS:
gn_args_file = os.path.join(
self.chrome_src_dir, 'out_%s' % board, 'Release', 'args.gn')
osutils.WriteFile(gn_args_file, 'foo\nbar', makedirs=True)
self.cmd_mock.inst.Run()
for board in SDKFetcherMock.BOARDS:
gn_args_file = os.path.join(
self.chrome_src_dir, 'out_%s' % board, 'Release', 'args.gn')
self.assertTrue(osutils.ReadFile(gn_args_file).startswith('import'))
def testErrorCodePassthrough(self):
"""Test that error codes are passed through."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer():
self.rc_mock.AddCmdResult(partial_mock.ListRegex('-- true'),
returncode=5)
returncode = self.cmd_mock.inst.Run()
self.assertEqual(returncode, 5)
def testLocalSDKPath(self):
"""Fetch components from a local --sdk-path."""
sdk_dir = os.path.join(self.tempdir, 'sdk_dir')
osutils.SafeMakedirs(sdk_dir)
osutils.WriteFile(os.path.join(sdk_dir, constants.METADATA_JSON),
SDKFetcherMock.FAKE_METADATA)
self.SetupCommandMock(extra_args=['--sdk-path', sdk_dir])
with cros_test_lib.LoggingCapturer():
self.cmd_mock.inst.Run()
def testGomaError(self):
"""We print an error message when GomaError is raised."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer() as logs:
self.PatchObject(cros_chrome_sdk.ChromeSDKCommand, '_FetchGoma',
side_effect=cros_chrome_sdk.GomaError())
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Goma:')
def testSpecificComponent(self):
"""Tests that SDKFetcher.Prepare() handles |components| param properly."""
sdk = cros_chrome_sdk.SDKFetcher(os.path.join(self.tempdir),
SDKFetcherMock.BOARD)
components = [constants.BASE_IMAGE_TAR, constants.CHROME_SYSROOT_TAR]
with sdk.Prepare(components=components) as ctx:
for c in components:
self.assertExists(ctx.key_map[c].path)
for c in [constants.IMAGE_SCRIPTS_TAR, constants.CHROME_ENV_TAR]:
self.assertFalse(c in ctx.key_map)
@staticmethod
def FindInPath(paths, endswith):
for path in paths.split(':'):
if path.endswith(endswith):
return True
return False
def testGomaInPath(self):
"""Verify that we do indeed add Goma to the PATH."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
self.assertIn('use_goma = true', self.cmd_mock.env['GN_ARGS'])
def testNoGoma(self):
"""Verify that we do not add Goma to the PATH."""
self.SetupCommandMock(extra_args=['--nogoma'])
self.cmd_mock.inst.Run()
self.assertIn('use_goma = false', self.cmd_mock.env['GN_ARGS'])
def testGnArgsStalenessCheckNoMatch(self):
"""Verifies the GN args are checked for staleness with a mismatch."""
with cros_test_lib.LoggingCapturer() as logs:
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
osutils.WriteFile(gn_args_file_path, 'foo = "no match"')
self.SetupCommandMock()
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file')
def testGnArgsStalenessCheckMatch(self):
"""Verifies the GN args are checked for staleness with a match."""
with cros_test_lib.LoggingCapturer() as logs:
self.SetupCommandMock()
self.cmd_mock.inst.Run()
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
osutils.WriteFile(gn_args_file_path, self.cmd_mock.env['GN_ARGS'])
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file', inverted=True)
def testGnArgsStalenessExtraArgs(self):
"""Verifies the GN extra args regenerate gn."""
with cros_test_lib.LoggingCapturer() as logs:
self.SetupCommandMock(
extra_args=['--gn-extra-args=dcheck_always_on=true'])
self.cmd_mock.inst.Run()
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
gn_args_dict = gn_helpers.FromGNArgs(self.cmd_mock.env['GN_ARGS'])
osutils.WriteFile(gn_args_file_path, gn_helpers.ToGNString(gn_args_dict))
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file', inverted=True)
def testChromiumOutDirSet(self):
"""Verify that CHROMIUM_OUT_DIR is set."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
out_dir = os.path.join(self.chrome_src_dir, 'out_%s' % SDKFetcherMock.BOARD)
self.assertEqual(out_dir, self.cmd_mock.env['CHROMIUM_OUT_DIR'])
@mock.patch('chromite.lib.gclient.LoadGclientFile')
def testInternalGclientSpec(self, mock_gclient_load):
"""Verify that the SDK exits with an error if the gclient spec is wrong."""
self.SetupCommandMock(extra_args=['--internal'])
# Simple Chrome should exit with an error if "--internal" is passed and
# "checkout_src_internal" isn't present in the .gclient file.
mock_gclient_load.return_value = [{
'url': 'https://chromium.googlesource.com/chromium/src.git',
'custom_deps': {},
'custom_vars': {},
}]
with self.assertRaises(cros_build_lib.DieSystemExit):
self.cmd_mock.inst.Run()
# With "checkout_src_internal" set, Simple Chrome should run without error.
mock_gclient_load.return_value = [{
'url': 'https://chromium.googlesource.com/chromium/src.git',
'custom_deps': {},
'custom_vars': {
'checkout_src_internal': True
},
}]
self.cmd_mock.inst.Run()
def testClearSDKCache(self):
"""Verifies cache directories are removed with --clear-sdk-cache."""
# Ensure we have checkout type GCLIENT.
self.PatchObject(os, 'getcwd', return_value=self.chrome_root)
# Use the default cache location.
self.SetupCommandMock(extra_args=['--clear-sdk-cache'],
default_cache_dir=True)
chrome_cache = os.path.join(self.chrome_src_dir, 'build/cros_cache')
self.assertNotExists(chrome_cache)
self.cmd_mock.inst.Run()
self.assertExists(chrome_cache)
def testSeabiosDownload(self):
"""Verify _CreateSeabiosFWSymlinks.
Create qemu/seabios directory structure with expected symlinks,
break the symlinks, and verify that they get fixed.
"""
qemu_share = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/app-emulation/qemu/some-fake-hash/usr/share')
seabios_share = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/sys-firmware/seabios/some-fake-hash/usr/share')
# Create qemu subdirectories.
for share_dir in ['qemu', 'seabios', 'seavgabios']:
os.makedirs(os.path.join(qemu_share, share_dir))
def _CreateLink(share, bios_dir, bios):
src_file = os.path.join(share, bios_dir, bios)
dest_file = os.path.join(share, 'qemu', bios)
osutils.Touch(src_file, makedirs=True)
rel_path = os.path.relpath(src_file, os.path.dirname(dest_file))
os.symlink(rel_path, dest_file)
def _VerifyLinks(broken):
"""Verfies that the links are |broken|."""
qemu_share_dir = os.path.join(qemu_share, 'qemu')
for link in os.listdir(qemu_share_dir):
full_link = os.path.join(qemu_share_dir, link)
self.assertTrue(os.path.islink(full_link))
self.assertNotEqual(os.path.exists(full_link), broken)
# Create qemu links.
for bios in ['bios.bin', 'bios256k.bin']:
_CreateLink(qemu_share, 'seabios', bios)
for bios in ['vgabios-vmware.bin', 'vgabios-virtio.bin',
'vgabios-stdvga.bin', 'vgabios-qxl.bin',
'vgabios-cirrus.bin', 'vgabios.bin']:
_CreateLink(qemu_share, 'seavgabios', bios)
# Move the seabios/seavgabios directories into the seabios package, which
# breaks the links.
for bios_dir in ['seabios', 'seavgabios']:
shutil.move(os.path.join(qemu_share, bios_dir),
os.path.join(seabios_share, bios_dir))
_VerifyLinks(broken=True)
# Run the command and verify the links get fixed.
self.SetupCommandMock(extra_args=['--download-vm'])
self.cmd_mock.inst.Run()
_VerifyLinks(broken=False)
def testSymlinkCache(self):
"""Ensures the symlink cache contains valid links to the tarball cache."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
board, version, _ = self.VERSION_KEY
toolchain_dir = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/some-fake-hash')
sysroot_dir = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/sysroot_chromeos-base_chromeos-chrome.tar.xz/'
'some-fake-hash')
self.assertExists(toolchain_dir)
self.assertExists(sysroot_dir)
toolchain_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+target_toolchain' % (board, version))
sysroot_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+sysroot_chromeos-base_chromeos-'
'chrome.tar.xz' % (board, version))
self.assertTrue(os.path.islink(toolchain_link))
self.assertTrue(os.path.islink(sysroot_link))
self.assertEqual(os.path.realpath(toolchain_link), toolchain_dir)
self.assertEqual(os.path.realpath(sysroot_link), sysroot_dir)
def testSymlinkCacheToolchainOverride(self):
"""Ensures that the SDK picks up an overridden component."""
sdk = cros_chrome_sdk.SDKFetcher(os.path.join(self.tempdir),
SDKFetcherMock.BOARD)
board, version, _ = self.VERSION_KEY
toolchain_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+target_toolchain' % (board, version))
components = [sdk.TARGET_TOOLCHAIN_KEY]
toolchain_url_1 = 'some-fake-gs-path-1'
toolchain_dir_1 = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/',
toolchain_url_1)
toolchain_url_2 = 'some-fake-gs-path-2'
toolchain_dir_2 = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/',
toolchain_url_2)
# Prepare the cache using 'toolchain_url_1'.
self.sdk_mock.tarball_cache_key_map = {
sdk.TARGET_TOOLCHAIN_KEY: toolchain_url_1
}
with sdk.Prepare(components, toolchain_url=toolchain_url_1):
self.assertEqual(toolchain_dir_1, os.path.realpath(toolchain_link))
self.assertExists(toolchain_dir_1)
self.assertNotExists(toolchain_dir_2)
# Prepare the cache with 'toolchain_url_2' and make sure the active symlink
# points to it and that 'toolchain_url_1' is still present.
self.sdk_mock.tarball_cache_key_map = {
sdk.TARGET_TOOLCHAIN_KEY: toolchain_url_2
}
with sdk.Prepare(components, toolchain_url=toolchain_url_2):
self.assertEqual(toolchain_dir_2, os.path.realpath(toolchain_link))
self.assertExists(toolchain_dir_2)
self.assertExists(toolchain_dir_1)
class GomaTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Test Goma setup functionality."""
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
self.cmd_mock = MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, 'true'],
base_args=['--cache-dir', self.tempdir])
self.StartPatcher(self.cmd_mock)
def VerifyGomaError(self):
self.assertRaises(cros_chrome_sdk.GomaError, self.cmd_mock.inst._FetchGoma)
def testNoGomaPort(self):
"""We print an error when gomacc is not returning a port."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD)
self.VerifyGomaError()
def testGomaccError(self):
"""We print an error when gomacc exits with nonzero returncode."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD, returncode=1)
self.VerifyGomaError()
def testFetchError(self):
"""We print an error when we can't fetch Goma."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD, returncode=1)
self.VerifyGomaError()
def testGomaStart(self):
"""Test that we start Goma if it's not already started."""
# Duplicate return values.
self.PatchObject(cros_chrome_sdk.ChromeSDKCommand, '_GomaPort',
side_effect=['XXXX', 'XXXX'])
# Run it twice to exercise caching.
for _ in range(2):
goma_dir, goma_port = self.cmd_mock.inst._FetchGoma()
self.assertEqual(goma_port, 'XXXX')
self.assertTrue(bool(goma_dir))
class VersionTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests the determination of which SDK version to use."""
VERSION = '3543.0.0'
FULL_VERSION = 'R55-%s' % VERSION
RECENT_VERSION_MISSING = '3542.0.0'
RECENT_VERSION_FOUND = '3541.0.0'
FULL_VERSION_RECENT = 'R55-%s' % RECENT_VERSION_FOUND
NON_CANARY_VERSION = '3543.2.1'
FULL_VERSION_NON_CANARY = 'R55-%s' % NON_CANARY_VERSION
BOARD = 'eve'
VERSION_BASE = ('gs://chromeos-image-archive/%s-release/LATEST-%s'
% (BOARD, VERSION))
CAT_ERROR = 'CommandException: No URLs matched %s' % VERSION_BASE
LS_ERROR = 'CommandException: One or more URLs matched no objects.'
def setUp(self):
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
self.gs_mock.SetDefaultCmdResult()
self.sdk_mock = self.StartPatcher(SDKFetcherMock(
external_mocks=[self.gs_mock]))
os.environ.pop(cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV, None)
self.sdk = cros_chrome_sdk.SDKFetcher(
os.path.join(self.tempdir, 'cache'), self.BOARD)
def testUpdateDefaultChromeVersion(self):
"""We pick up the right LKGM version from the Chrome tree."""
dir_struct = [
'gclient_root/.gclient'
]
cros_test_lib.CreateOnDiskHierarchy(self.tempdir, dir_struct)
gclient_root = os.path.join(self.tempdir, 'gclient_root')
self.PatchObject(os, 'getcwd', return_value=gclient_root)
lkgm_file = os.path.join(gclient_root, 'src', constants.PATH_TO_CHROME_LKGM)
osutils.Touch(lkgm_file, makedirs=True)
osutils.WriteFile(lkgm_file, self.VERSION)
self.sdk_mock.UnMockAttr('UpdateDefaultVersion')
self.sdk.UpdateDefaultVersion()
self.assertEqual(self.sdk.GetDefaultVersion(),
self.VERSION)
def testFullVersionFromFullVersion(self):
"""Test that a fully specified version is allowed."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.FULL_VERSION))
def testFullVersionFromPlatformVersion(self):
"""Test full version calculation from the platform version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
def _SetupMissingVersions(self):
"""Version & Version-1 are missing, but Version-2 exists."""
def _RaiseGSNoSuchKey(*_args, **_kwargs):
raise gs.GSNoSuchKey('file does not exist')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
side_effect=_RaiseGSNoSuchKey)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex(
'cat .*/LATEST-%s' % self.RECENT_VERSION_MISSING),
side_effect=_RaiseGSNoSuchKey)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.RECENT_VERSION_FOUND),
output=self.FULL_VERSION_RECENT)
def testNoFallbackVersion(self):
"""Test that all versions are checked before raising an exception."""
def _RaiseGSNoSuchKey(*_args, **_kwargs):
raise gs.GSNoSuchKey('file does not exist')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-*'),
side_effect=_RaiseGSNoSuchKey)
self.sdk.fallback_versions = 2000000
with cros_test_lib.LoggingCapturer() as logs:
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
self.AssertLogsContain(logs, 'LATEST-1.0.0')
self.AssertLogsContain(logs, 'LATEST--1.0.0', inverted=True)
def testFallbackVersions(self):
"""Test full version calculation with various fallback version counts."""
self._SetupMissingVersions()
for version in range(6):
self.sdk.fallback_versions = version
# _SetupMissingVersions mocks the result of 3 files.
# The file ending with LATEST-3.0.0 is the only one that would pass.
if version < 3:
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
else:
self.assertEqual(
self.FULL_VERSION_RECENT,
self.sdk.GetFullVersion(self.VERSION))
def testFullVersionCaching(self):
"""Test full version calculation and caching."""
def RaiseException(*_args, **_kwargs):
raise Exception('boom')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
# Test that we access the cache on the next call, rather than checking GS.
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
side_effect=RaiseException)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
# Test that we access GS again if the board is changed.
self.sdk.board += '2'
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION + '2')
self.assertEqual(
self.FULL_VERSION + '2',
self.sdk.GetFullVersion(self.VERSION))
def testNoLatestVersion(self):
"""We raise an exception when there is no recent latest version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-*'),
output='', error=self.CAT_ERROR, returncode=1)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('ls .*%s' % self.VERSION),
output='', error=self.LS_ERROR, returncode=1)
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
def testNonCanaryFullVersion(self):
"""Test full version calculation for a non canary version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.NON_CANARY_VERSION),
output=self.FULL_VERSION_NON_CANARY)
self.assertEqual(
self.FULL_VERSION_NON_CANARY,
self.sdk.GetFullVersion(self.NON_CANARY_VERSION))
def testNonCanaryNoLatestVersion(self):
"""We raise an exception when there is no matching latest non canary."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.NON_CANARY_VERSION),
output='', error=self.CAT_ERROR, returncode=1)
# Set any other query to return a valid version, but we don't expect that
# to occur for non canary versions.
self.gs_mock.SetDefaultCmdResult(output=self.FULL_VERSION_NON_CANARY)
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.NON_CANARY_VERSION)
def testDefaultEnvBadBoard(self):
"""We don't use the version in the environment if board doesn't match."""
os.environ[cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV] = self.VERSION
self.assertNotEqual(self.VERSION, self.sdk_mock.VERSION)
self.assertEqual(self.sdk.GetDefaultVersion(), None)
def testDefaultEnvGoodBoard(self):
"""We use the version in the environment if board matches."""
sdk_version_env = cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV
os.environ[sdk_version_env] = self.VERSION
os.environ[cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV] = self.BOARD
self.assertEqual(self.sdk.GetDefaultVersion(), self.VERSION)
class PathVerifyTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests user_rc PATH validation and warnings."""
def testPathVerifyWarnings(self):
"""Test the user rc PATH verification codepath."""
def SourceEnvironmentMock(*_args, **_kwargs):
return {
'PATH': ':'.join([os.path.dirname(p) for p in abs_paths]),
}
self.PatchObject(osutils, 'SourceEnvironment',
side_effect=SourceEnvironmentMock)
file_list = (
'goma/goma_ctl.py',
'clang/clang',
'chromite/parallel_emerge',
)
abs_paths = [os.path.join(self.tempdir, relpath) for relpath in file_list]
for p in abs_paths:
osutils.Touch(p, makedirs=True, mode=0o755)
with cros_test_lib.LoggingCapturer() as logs:
cros_chrome_sdk.ChromeSDKCommand._VerifyGoma(None)
cros_chrome_sdk.ChromeSDKCommand._VerifyChromiteBin(None)
for msg in ['managed Goma', 'default Chromite']:
self.AssertLogsMatch(logs, msg)
class ClearOldItemsTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests SDKFetcher.ClearOldItems() behavior."""
def setUp(self):
"""Sets up a temporary symlink & tarball cache."""
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
self.gs_mock.SetDefaultCmdResult()
self.sdk_fetcher = cros_chrome_sdk.SDKFetcher(self.tempdir, None)
def testBrokenSymlinkCleared(self):
"""Adds a broken symlink and ensures it gets removed."""
osutils.Touch(os.path.join(self.tempdir, 'some-file'))
valid_link_ref = self.sdk_fetcher.symlink_cache.Lookup('some-valid-link')
with valid_link_ref:
self.sdk_fetcher._UpdateCacheSymlink(
valid_link_ref, os.path.join(self.tempdir, 'some-file'))
broken_link_ref = self.sdk_fetcher.symlink_cache.Lookup('some-broken-link')
with broken_link_ref:
self.sdk_fetcher._UpdateCacheSymlink(
broken_link_ref, '/some/invalid/file')
# Broken symlink should exist before the ClearOldItems() call, and be
# removed after.
self.assertTrue(valid_link_ref.Exists())
self.assertTrue(broken_link_ref.Exists())
cros_chrome_sdk.SDKFetcher.ClearOldItems(self.tempdir)
self.assertTrue(valid_link_ref.Exists())
self.assertFalse(broken_link_ref.Exists()) | cli/cros/cros_chrome_sdk_unittest.py | from __future__ import print_function
import copy
import os
import shutil
import sys
import mock
from chromite.lib import constants
from chromite.cli import command_unittest
from chromite.cli.cros import cros_chrome_sdk
from chromite.lib import cache
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import gs
from chromite.lib import gs_unittest
from chromite.lib import osutils
from chromite.lib import partial_mock
from gn_helpers import gn_helpers
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
# pylint: disable=protected-access
class MockChromeSDKCommand(command_unittest.MockCommand):
"""Mock out the build command."""
TARGET = 'chromite.cli.cros.cros_chrome_sdk.ChromeSDKCommand'
TARGET_CLASS = cros_chrome_sdk.ChromeSDKCommand
COMMAND = 'chrome-sdk'
ATTRS = (('_GOMA_DOWNLOAD_URL', '_SetupEnvironment') +
command_unittest.MockCommand.ATTRS)
_GOMA_DOWNLOAD_URL = 'Invalid URL'
def __init__(self, *args, **kwargs):
command_unittest.MockCommand.__init__(self, *args, **kwargs)
self.env = None
def _SetupEnvironment(self, *args, **kwargs):
env = self.backup['_SetupEnvironment'](*args, **kwargs)
self.env = copy.deepcopy(env)
return env
class ParserTest(cros_test_lib.MockTempDirTestCase):
"""Test the parser."""
def testNormal(self):
"""Tests that our example parser works normally."""
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD],
base_args=['--cache-dir', self.tempdir]) as bootstrap:
self.assertEqual(bootstrap.inst.options.board, SDKFetcherMock.BOARD)
self.assertEqual(bootstrap.inst.options.cache_dir, self.tempdir)
def testVersion(self):
"""Tests that a platform version is allowed."""
VERSION = '1234.0.0'
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, '--version', VERSION]) as parser:
self.assertEqual(parser.inst.options.version, VERSION)
def testFullVersion(self):
"""Tests that a full version is allowed."""
FULL_VERSION = 'R56-1234.0.0'
with MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, '--version', FULL_VERSION]) as parser:
self.assertEqual(parser.inst.options.version, FULL_VERSION)
def _GSCopyMock(_self, path, dest, **_kwargs):
"""Used to simulate a GS Copy operation."""
with osutils.TempDir() as tempdir:
local_path = os.path.join(tempdir, os.path.basename(path))
osutils.Touch(local_path)
shutil.move(local_path, dest)
def _DependencyMockCtx(f):
"""Attribute that ensures dependency PartialMocks are started.
Since PartialMock does not support nested mocking, we need to first call
stop() on the outer level PartialMock (which is passed in to us). We then
re-start() the outer level upon exiting the context.
"""
def new_f(self, *args, **kwargs):
if not self.entered:
try:
self.entered = True
# Temporarily disable outer GSContext mock before starting our mock.
# TODO(rcui): Generalize this attribute and include in partial_mock.py.
for emock in self.external_mocks:
emock.stop()
with self.gs_mock:
return f(self, *args, **kwargs)
finally:
self.entered = False
for emock in self.external_mocks:
emock.start()
else:
return f(self, *args, **kwargs)
return new_f
class SDKFetcherMock(partial_mock.PartialMock):
"""Provides mocking functionality for SDKFetcher."""
TARGET = 'chromite.cli.cros.cros_chrome_sdk.SDKFetcher'
ATTRS = ('__init__', 'GetFullVersion', '_GetMetadata', '_UpdateTarball',
'_GetManifest', 'UpdateDefaultVersion', '_GetTarballCacheKey')
FAKE_METADATA = """
{
"boards": ["eve"],
"cros-version": "25.3543.2",
"metadata-version": "1",
"bot-hostname": "build82-m2.golo.chromium.org",
"bot-config": "eve-release",
"toolchain-tuple": ["i686-pc-linux-gnu"],
"toolchain-url": "2013/01/%(target)s-2013.01.23.003823.tar.xz",
"sdk-version": "2013.01.23.003823"
}"""
BOARD = 'eve'
BOARDS = ['amd64-generic', 'arm-generic']
VERSION = '4567.8.9'
def __init__(self, external_mocks=None):
"""Initializes the mock.
Args:
external_mocks: A list of already started PartialMock/patcher instances.
stop() will be called on each element every time execution enters one of
our the mocked out methods, and start() called on it once execution
leaves the mocked out method.
"""
partial_mock.PartialMock.__init__(self)
self.external_mocks = external_mocks or []
self.entered = False
self.gs_mock = gs_unittest.GSContextMock()
self.gs_mock.SetDefaultCmdResult()
self.env = None
self.tarball_cache_key_map = {}
@_DependencyMockCtx
def _target__init__(self, inst, *args, **kwargs):
self.backup['__init__'](inst, *args, **kwargs)
if not inst.cache_base.startswith('/tmp'):
raise AssertionError('For testing, SDKFetcher cache_dir needs to be a '
'dir under /tmp')
@_DependencyMockCtx
def UpdateDefaultVersion(self, inst, *_args, **_kwargs):
inst._SetDefaultVersion(self.VERSION)
return self.VERSION, True
@_DependencyMockCtx
def _UpdateTarball(self, inst, *args, **kwargs):
with mock.patch.object(gs.GSContext, 'Copy', autospec=True,
side_effect=_GSCopyMock):
with mock.patch.object(cache, 'Untar'):
return self.backup['_UpdateTarball'](inst, *args, **kwargs)
@_DependencyMockCtx
def GetFullVersion(self, _inst, version):
return 'R26-%s' % version
@_DependencyMockCtx
def _GetMetadata(self, inst, *args, **kwargs):
self.gs_mock.SetDefaultCmdResult()
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/%s' % constants.METADATA_JSON),
output=self.FAKE_METADATA)
return self.backup['_GetMetadata'](inst, *args, **kwargs)
@_DependencyMockCtx
def _GetManifest(self, _inst, _version):
return {
'packages': {
'app-emulation/qemu': [['3.0.0', {}]],
'chromeos-base/tast-cmd': [['1.2.3', {}]],
'chromeos-base/tast-remote-tests-cros': [['7.8.9', {}]],
'sys-firmware/seabios': [['1.11.0', {}]]
}
}
@_DependencyMockCtx
def _GetTarballCacheKey(self, _inst, component, _url):
return (os.path.join(
component,
self.tarball_cache_key_map.get(component, 'some-fake-hash')),)
class RunThroughTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Run the script with most things mocked out."""
VERSION_KEY = (SDKFetcherMock.BOARD, SDKFetcherMock.VERSION,
constants.CHROME_SYSROOT_TAR)
FAKE_ENV = {
'GN_ARGS': 'target_sysroot="/path/to/sysroot" is_clang=false',
'AR': 'x86_64-cros-linux-gnu-ar',
'AS': 'x86_64-cros-linux-gnu-as',
'CXX': 'x86_64-cros-linux-gnu-clang++',
'CC': 'x86_64-cros-linux-gnu-clang',
'LD': 'x86_64-cros-linux-gnu-clang++',
'NM': 'x86_64-cros-linux-gnu-nm',
'RANLIB': 'x86_64-cros-linux-gnu-ranlib',
'READELF': 'x86_64-cros-linux-gnu-readelf',
'CFLAGS': '-O2',
'CXXFLAGS': '-O2',
}
def SetupCommandMock(self, many_boards=False, extra_args=None,
default_cache_dir=False):
cmd_args = ['--chrome-src', self.chrome_src_dir, 'true']
if many_boards:
cmd_args += ['--boards', ':'.join(SDKFetcherMock.BOARDS), '--no-shell']
# --no-shell drops gni files in //build/args/chromeos/.
osutils.SafeMakedirs(
os.path.join(self.chrome_root, 'src', 'build', 'args', 'chromeos'))
else:
cmd_args += ['--board', SDKFetcherMock.BOARD]
if extra_args:
cmd_args.extend(extra_args)
base_args = None if default_cache_dir else ['--cache-dir', self.tempdir]
self.cmd_mock = MockChromeSDKCommand(cmd_args, base_args=base_args)
self.StartPatcher(self.cmd_mock)
self.cmd_mock.UnMockAttr('Run')
def SourceEnvironmentMock(self, path, *_args, **_kwargs):
if path.endswith('environment'):
return copy.deepcopy(self.FAKE_ENV)
return {}
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
self.sdk_mock = self.StartPatcher(SDKFetcherMock(
external_mocks=[self.rc_mock]))
# This needs to occur before initializing MockChromeSDKCommand.
self.bashrc = os.path.join(self.tempdir, 'bashrc')
self.PatchObject(constants, 'CHROME_SDK_BASHRC', new=self.bashrc)
self.PatchObject(osutils, 'SourceEnvironment',
autospec=True, side_effect=self.SourceEnvironmentMock)
self.rc_mock.AddCmdResult(cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD,
output='8088')
# Initialized by SetupCommandMock.
self.cmd_mock = None
# Set up a fake Chrome src/ directory
self.chrome_root = os.path.join(self.tempdir, 'chrome_root')
self.chrome_src_dir = os.path.join(self.chrome_root, 'src')
osutils.SafeMakedirs(self.chrome_src_dir)
osutils.Touch(os.path.join(self.chrome_root, '.gclient'))
@property
def cache(self):
return self.cmd_mock.inst.sdk.tarball_cache
def testIt(self):
"""Test a runthrough of the script."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer() as logs:
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Goma:', inverted=True)
def testManyBoards(self):
"""Test a runthrough when multiple boards are specified via --boards."""
self.SetupCommandMock(many_boards=True)
self.cmd_mock.inst.Run()
for board in SDKFetcherMock.BOARDS:
board_arg_file = os.path.join(
self.chrome_src_dir, 'build/args/chromeos/%s.gni' % board)
self.assertExists(board_arg_file)
def testManyBoardsBrokenArgs(self):
"""Tests that malformed args.gn files will be fixed in --boards."""
self.SetupCommandMock(many_boards=True)
for board in SDKFetcherMock.BOARDS:
gn_args_file = os.path.join(
self.chrome_src_dir, 'out_%s' % board, 'Release', 'args.gn')
osutils.WriteFile(gn_args_file, 'foo\nbar', makedirs=True)
self.cmd_mock.inst.Run()
for board in SDKFetcherMock.BOARDS:
gn_args_file = os.path.join(
self.chrome_src_dir, 'out_%s' % board, 'Release', 'args.gn')
self.assertTrue(osutils.ReadFile(gn_args_file).startswith('import'))
def testErrorCodePassthrough(self):
"""Test that error codes are passed through."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer():
self.rc_mock.AddCmdResult(partial_mock.ListRegex('-- true'),
returncode=5)
returncode = self.cmd_mock.inst.Run()
self.assertEqual(returncode, 5)
def testLocalSDKPath(self):
"""Fetch components from a local --sdk-path."""
sdk_dir = os.path.join(self.tempdir, 'sdk_dir')
osutils.SafeMakedirs(sdk_dir)
osutils.WriteFile(os.path.join(sdk_dir, constants.METADATA_JSON),
SDKFetcherMock.FAKE_METADATA)
self.SetupCommandMock(extra_args=['--sdk-path', sdk_dir])
with cros_test_lib.LoggingCapturer():
self.cmd_mock.inst.Run()
def testGomaError(self):
"""We print an error message when GomaError is raised."""
self.SetupCommandMock()
with cros_test_lib.LoggingCapturer() as logs:
self.PatchObject(cros_chrome_sdk.ChromeSDKCommand, '_FetchGoma',
side_effect=cros_chrome_sdk.GomaError())
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Goma:')
def testSpecificComponent(self):
"""Tests that SDKFetcher.Prepare() handles |components| param properly."""
sdk = cros_chrome_sdk.SDKFetcher(os.path.join(self.tempdir),
SDKFetcherMock.BOARD)
components = [constants.BASE_IMAGE_TAR, constants.CHROME_SYSROOT_TAR]
with sdk.Prepare(components=components) as ctx:
for c in components:
self.assertExists(ctx.key_map[c].path)
for c in [constants.IMAGE_SCRIPTS_TAR, constants.CHROME_ENV_TAR]:
self.assertFalse(c in ctx.key_map)
@staticmethod
def FindInPath(paths, endswith):
for path in paths.split(':'):
if path.endswith(endswith):
return True
return False
def testGomaInPath(self):
"""Verify that we do indeed add Goma to the PATH."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
self.assertIn('use_goma = true', self.cmd_mock.env['GN_ARGS'])
def testNoGoma(self):
"""Verify that we do not add Goma to the PATH."""
self.SetupCommandMock(extra_args=['--nogoma'])
self.cmd_mock.inst.Run()
self.assertIn('use_goma = false', self.cmd_mock.env['GN_ARGS'])
def testGnArgsStalenessCheckNoMatch(self):
"""Verifies the GN args are checked for staleness with a mismatch."""
with cros_test_lib.LoggingCapturer() as logs:
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
osutils.WriteFile(gn_args_file_path, 'foo = "no match"')
self.SetupCommandMock()
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file')
def testGnArgsStalenessCheckMatch(self):
"""Verifies the GN args are checked for staleness with a match."""
with cros_test_lib.LoggingCapturer() as logs:
self.SetupCommandMock()
self.cmd_mock.inst.Run()
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
osutils.WriteFile(gn_args_file_path, self.cmd_mock.env['GN_ARGS'])
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file', inverted=True)
def testGnArgsStalenessExtraArgs(self):
"""Verifies the GN extra args regenerate gn."""
with cros_test_lib.LoggingCapturer() as logs:
self.SetupCommandMock(
extra_args=['--gn-extra-args=dcheck_always_on=true'])
self.cmd_mock.inst.Run()
out_dir = 'out_%s' % SDKFetcherMock.BOARD
build_label = 'Release'
gn_args_file_dir = os.path.join(self.chrome_src_dir, out_dir, build_label)
gn_args_file_path = os.path.join(gn_args_file_dir, 'args.gn')
osutils.SafeMakedirs(gn_args_file_dir)
gn_args_dict = gn_helpers.FromGNArgs(self.cmd_mock.env['GN_ARGS'])
osutils.WriteFile(gn_args_file_path, gn_helpers.ToGNString(gn_args_dict))
self.cmd_mock.inst.Run()
self.AssertLogsContain(logs, 'Stale args.gn file', inverted=True)
def testChromiumOutDirSet(self):
"""Verify that CHROMIUM_OUT_DIR is set."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
out_dir = os.path.join(self.chrome_src_dir, 'out_%s' % SDKFetcherMock.BOARD)
self.assertEqual(out_dir, self.cmd_mock.env['CHROMIUM_OUT_DIR'])
@mock.patch('chromite.lib.gclient.LoadGclientFile')
def testInternalGclientSpec(self, mock_gclient_load):
"""Verify that the SDK exits with an error if the gclient spec is wrong."""
self.SetupCommandMock(extra_args=['--internal'])
# Simple Chrome should exit with an error if "--internal" is passed and
# "checkout_src_internal" isn't present in the .gclient file.
mock_gclient_load.return_value = [{
'url': 'https://chromium.googlesource.com/chromium/src.git',
'custom_deps': {},
'custom_vars': {},
}]
with self.assertRaises(cros_build_lib.DieSystemExit):
self.cmd_mock.inst.Run()
# With "checkout_src_internal" set, Simple Chrome should run without error.
mock_gclient_load.return_value = [{
'url': 'https://chromium.googlesource.com/chromium/src.git',
'custom_deps': {},
'custom_vars': {
'checkout_src_internal': True
},
}]
self.cmd_mock.inst.Run()
def testClearSDKCache(self):
"""Verifies cache directories are removed with --clear-sdk-cache."""
# Ensure we have checkout type GCLIENT.
self.PatchObject(os, 'getcwd', return_value=self.chrome_root)
# Use the default cache location.
self.SetupCommandMock(extra_args=['--clear-sdk-cache'],
default_cache_dir=True)
chrome_cache = os.path.join(self.chrome_src_dir, 'build/cros_cache')
self.assertNotExists(chrome_cache)
self.cmd_mock.inst.Run()
self.assertExists(chrome_cache)
def testSeabiosDownload(self):
"""Verify _CreateSeabiosFWSymlinks.
Create qemu/seabios directory structure with expected symlinks,
break the symlinks, and verify that they get fixed.
"""
qemu_share = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/app-emulation/qemu/some-fake-hash/usr/share')
seabios_share = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/sys-firmware/seabios/some-fake-hash/usr/share')
# Create qemu subdirectories.
for share_dir in ['qemu', 'seabios', 'seavgabios']:
os.makedirs(os.path.join(qemu_share, share_dir))
def _CreateLink(share, bios_dir, bios):
src_file = os.path.join(share, bios_dir, bios)
dest_file = os.path.join(share, 'qemu', bios)
osutils.Touch(src_file, makedirs=True)
rel_path = os.path.relpath(src_file, os.path.dirname(dest_file))
os.symlink(rel_path, dest_file)
def _VerifyLinks(broken):
"""Verfies that the links are |broken|."""
qemu_share_dir = os.path.join(qemu_share, 'qemu')
for link in os.listdir(qemu_share_dir):
full_link = os.path.join(qemu_share_dir, link)
self.assertTrue(os.path.islink(full_link))
self.assertNotEqual(os.path.exists(full_link), broken)
# Create qemu links.
for bios in ['bios.bin', 'bios256k.bin']:
_CreateLink(qemu_share, 'seabios', bios)
for bios in ['vgabios-vmware.bin', 'vgabios-virtio.bin',
'vgabios-stdvga.bin', 'vgabios-qxl.bin',
'vgabios-cirrus.bin', 'vgabios.bin']:
_CreateLink(qemu_share, 'seavgabios', bios)
# Move the seabios/seavgabios directories into the seabios package, which
# breaks the links.
for bios_dir in ['seabios', 'seavgabios']:
shutil.move(os.path.join(qemu_share, bios_dir),
os.path.join(seabios_share, bios_dir))
_VerifyLinks(broken=True)
# Run the command and verify the links get fixed.
self.SetupCommandMock(extra_args=['--download-vm'])
self.cmd_mock.inst.Run()
_VerifyLinks(broken=False)
def testSymlinkCache(self):
"""Ensures the symlink cache contains valid links to the tarball cache."""
self.SetupCommandMock()
self.cmd_mock.inst.Run()
board, version, _ = self.VERSION_KEY
toolchain_dir = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/some-fake-hash')
sysroot_dir = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/sysroot_chromeos-base_chromeos-chrome.tar.xz/'
'some-fake-hash')
self.assertExists(toolchain_dir)
self.assertExists(sysroot_dir)
toolchain_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+target_toolchain' % (board, version))
sysroot_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+sysroot_chromeos-base_chromeos-'
'chrome.tar.xz' % (board, version))
self.assertTrue(os.path.islink(toolchain_link))
self.assertTrue(os.path.islink(sysroot_link))
self.assertEqual(os.path.realpath(toolchain_link), toolchain_dir)
self.assertEqual(os.path.realpath(sysroot_link), sysroot_dir)
def testSymlinkCacheToolchainOverride(self):
"""Ensures that the SDK picks up an overridden component."""
sdk = cros_chrome_sdk.SDKFetcher(os.path.join(self.tempdir),
SDKFetcherMock.BOARD)
board, version, _ = self.VERSION_KEY
toolchain_link = os.path.join(
self.tempdir,
'chrome-sdk/symlinks/%s+%s+target_toolchain' % (board, version))
components = [sdk.TARGET_TOOLCHAIN_KEY]
toolchain_url_1 = 'some-fake-gs-path-1'
toolchain_dir_1 = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/',
toolchain_url_1)
toolchain_url_2 = 'some-fake-gs-path-2'
toolchain_dir_2 = os.path.join(
self.tempdir,
'chrome-sdk/tarballs/target_toolchain/',
toolchain_url_2)
# Prepare the cache using 'toolchain_url_1'.
self.sdk_mock.tarball_cache_key_map = {
sdk.TARGET_TOOLCHAIN_KEY: toolchain_url_1
}
with sdk.Prepare(components, toolchain_url=toolchain_url_1):
self.assertEqual(toolchain_dir_1, os.path.realpath(toolchain_link))
self.assertExists(toolchain_dir_1)
self.assertNotExists(toolchain_dir_2)
# Prepare the cache with 'toolchain_url_2' and make sure the active symlink
# points to it and that 'toolchain_url_1' is still present.
self.sdk_mock.tarball_cache_key_map = {
sdk.TARGET_TOOLCHAIN_KEY: toolchain_url_2
}
with sdk.Prepare(components, toolchain_url=toolchain_url_2):
self.assertEqual(toolchain_dir_2, os.path.realpath(toolchain_link))
self.assertExists(toolchain_dir_2)
self.assertExists(toolchain_dir_1)
class GomaTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Test Goma setup functionality."""
def setUp(self):
self.rc_mock = cros_test_lib.RunCommandMock()
self.rc_mock.SetDefaultCmdResult()
self.StartPatcher(self.rc_mock)
self.cmd_mock = MockChromeSDKCommand(
['--board', SDKFetcherMock.BOARD, 'true'],
base_args=['--cache-dir', self.tempdir])
self.StartPatcher(self.cmd_mock)
def VerifyGomaError(self):
self.assertRaises(cros_chrome_sdk.GomaError, self.cmd_mock.inst._FetchGoma)
def testNoGomaPort(self):
"""We print an error when gomacc is not returning a port."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD)
self.VerifyGomaError()
def testGomaccError(self):
"""We print an error when gomacc exits with nonzero returncode."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD, returncode=1)
self.VerifyGomaError()
def testFetchError(self):
"""We print an error when we can't fetch Goma."""
self.rc_mock.AddCmdResult(
cros_chrome_sdk.ChromeSDKCommand.GOMACC_PORT_CMD, returncode=1)
self.VerifyGomaError()
def testGomaStart(self):
"""Test that we start Goma if it's not already started."""
# Duplicate return values.
self.PatchObject(cros_chrome_sdk.ChromeSDKCommand, '_GomaPort',
side_effect=['XXXX', 'XXXX'])
# Run it twice to exercise caching.
for _ in range(2):
goma_dir, goma_port = self.cmd_mock.inst._FetchGoma()
self.assertEqual(goma_port, 'XXXX')
self.assertTrue(bool(goma_dir))
class VersionTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests the determination of which SDK version to use."""
VERSION = '3543.0.0'
FULL_VERSION = 'R55-%s' % VERSION
RECENT_VERSION_MISSING = '3542.0.0'
RECENT_VERSION_FOUND = '3541.0.0'
FULL_VERSION_RECENT = 'R55-%s' % RECENT_VERSION_FOUND
NON_CANARY_VERSION = '3543.2.1'
FULL_VERSION_NON_CANARY = 'R55-%s' % NON_CANARY_VERSION
BOARD = 'eve'
VERSION_BASE = ('gs://chromeos-image-archive/%s-release/LATEST-%s'
% (BOARD, VERSION))
CAT_ERROR = 'CommandException: No URLs matched %s' % VERSION_BASE
LS_ERROR = 'CommandException: One or more URLs matched no objects.'
def setUp(self):
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
self.gs_mock.SetDefaultCmdResult()
self.sdk_mock = self.StartPatcher(SDKFetcherMock(
external_mocks=[self.gs_mock]))
os.environ.pop(cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV, None)
self.sdk = cros_chrome_sdk.SDKFetcher(
os.path.join(self.tempdir, 'cache'), self.BOARD)
def testUpdateDefaultChromeVersion(self):
"""We pick up the right LKGM version from the Chrome tree."""
dir_struct = [
'gclient_root/.gclient'
]
cros_test_lib.CreateOnDiskHierarchy(self.tempdir, dir_struct)
gclient_root = os.path.join(self.tempdir, 'gclient_root')
self.PatchObject(os, 'getcwd', return_value=gclient_root)
lkgm_file = os.path.join(gclient_root, 'src', constants.PATH_TO_CHROME_LKGM)
osutils.Touch(lkgm_file, makedirs=True)
osutils.WriteFile(lkgm_file, self.VERSION)
self.sdk_mock.UnMockAttr('UpdateDefaultVersion')
self.sdk.UpdateDefaultVersion()
self.assertEqual(self.sdk.GetDefaultVersion(),
self.VERSION)
def testFullVersionFromFullVersion(self):
"""Test that a fully specified version is allowed."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.FULL_VERSION))
def testFullVersionFromPlatformVersion(self):
"""Test full version calculation from the platform version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
def _SetupMissingVersions(self):
"""Version & Version-1 are missing, but Version-2 exists."""
def _RaiseGSNoSuchKey(*_args, **_kwargs):
raise gs.GSNoSuchKey('file does not exist')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
side_effect=_RaiseGSNoSuchKey)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex(
'cat .*/LATEST-%s' % self.RECENT_VERSION_MISSING),
side_effect=_RaiseGSNoSuchKey)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.RECENT_VERSION_FOUND),
output=self.FULL_VERSION_RECENT)
def testNoFallbackVersion(self):
"""Test that all versions are checked before raising an exception."""
def _RaiseGSNoSuchKey(*_args, **_kwargs):
raise gs.GSNoSuchKey('file does not exist')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-*'),
side_effect=_RaiseGSNoSuchKey)
self.sdk.fallback_versions = 2000000
with cros_test_lib.LoggingCapturer() as logs:
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
self.AssertLogsContain(logs, 'LATEST-1.0.0')
self.AssertLogsContain(logs, 'LATEST--1.0.0', inverted=True)
def testFallbackVersions(self):
"""Test full version calculation with various fallback version counts."""
self._SetupMissingVersions()
for version in range(6):
self.sdk.fallback_versions = version
# _SetupMissingVersions mocks the result of 3 files.
# The file ending with LATEST-3.0.0 is the only one that would pass.
if version < 3:
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
else:
self.assertEqual(
self.FULL_VERSION_RECENT,
self.sdk.GetFullVersion(self.VERSION))
def testFullVersionCaching(self):
"""Test full version calculation and caching."""
def RaiseException(*_args, **_kwargs):
raise Exception('boom')
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
# Test that we access the cache on the next call, rather than checking GS.
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
side_effect=RaiseException)
self.assertEqual(
self.FULL_VERSION,
self.sdk.GetFullVersion(self.VERSION))
# Test that we access GS again if the board is changed.
self.sdk.board += '2'
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.VERSION),
output=self.FULL_VERSION + '2')
self.assertEqual(
self.FULL_VERSION + '2',
self.sdk.GetFullVersion(self.VERSION))
def testNoLatestVersion(self):
"""We raise an exception when there is no recent latest version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-*'),
output='', error=self.CAT_ERROR, returncode=1)
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('ls .*%s' % self.VERSION),
output='', error=self.LS_ERROR, returncode=1)
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.VERSION)
def testNonCanaryFullVersion(self):
"""Test full version calculation for a non canary version."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.NON_CANARY_VERSION),
output=self.FULL_VERSION_NON_CANARY)
self.assertEqual(
self.FULL_VERSION_NON_CANARY,
self.sdk.GetFullVersion(self.NON_CANARY_VERSION))
def testNonCanaryNoLatestVersion(self):
"""We raise an exception when there is no matching latest non canary."""
self.sdk_mock.UnMockAttr('GetFullVersion')
self.gs_mock.AddCmdResult(
partial_mock.ListRegex('cat .*/LATEST-%s' % self.NON_CANARY_VERSION),
output='', error=self.CAT_ERROR, returncode=1)
# Set any other query to return a valid version, but we don't expect that
# to occur for non canary versions.
self.gs_mock.SetDefaultCmdResult(output=self.FULL_VERSION_NON_CANARY)
self.assertRaises(cros_chrome_sdk.MissingSDK, self.sdk.GetFullVersion,
self.NON_CANARY_VERSION)
def testDefaultEnvBadBoard(self):
"""We don't use the version in the environment if board doesn't match."""
os.environ[cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV] = self.VERSION
self.assertNotEqual(self.VERSION, self.sdk_mock.VERSION)
self.assertEqual(self.sdk.GetDefaultVersion(), None)
def testDefaultEnvGoodBoard(self):
"""We use the version in the environment if board matches."""
sdk_version_env = cros_chrome_sdk.SDKFetcher.SDK_VERSION_ENV
os.environ[sdk_version_env] = self.VERSION
os.environ[cros_chrome_sdk.SDKFetcher.SDK_BOARD_ENV] = self.BOARD
self.assertEqual(self.sdk.GetDefaultVersion(), self.VERSION)
class PathVerifyTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests user_rc PATH validation and warnings."""
def testPathVerifyWarnings(self):
"""Test the user rc PATH verification codepath."""
def SourceEnvironmentMock(*_args, **_kwargs):
return {
'PATH': ':'.join([os.path.dirname(p) for p in abs_paths]),
}
self.PatchObject(osutils, 'SourceEnvironment',
side_effect=SourceEnvironmentMock)
file_list = (
'goma/goma_ctl.py',
'clang/clang',
'chromite/parallel_emerge',
)
abs_paths = [os.path.join(self.tempdir, relpath) for relpath in file_list]
for p in abs_paths:
osutils.Touch(p, makedirs=True, mode=0o755)
with cros_test_lib.LoggingCapturer() as logs:
cros_chrome_sdk.ChromeSDKCommand._VerifyGoma(None)
cros_chrome_sdk.ChromeSDKCommand._VerifyChromiteBin(None)
for msg in ['managed Goma', 'default Chromite']:
self.AssertLogsMatch(logs, msg)
class ClearOldItemsTest(cros_test_lib.MockTempDirTestCase,
cros_test_lib.LoggingTestCase):
"""Tests SDKFetcher.ClearOldItems() behavior."""
def setUp(self):
"""Sets up a temporary symlink & tarball cache."""
self.gs_mock = self.StartPatcher(gs_unittest.GSContextMock())
self.gs_mock.SetDefaultCmdResult()
self.sdk_fetcher = cros_chrome_sdk.SDKFetcher(self.tempdir, None)
def testBrokenSymlinkCleared(self):
"""Adds a broken symlink and ensures it gets removed."""
osutils.Touch(os.path.join(self.tempdir, 'some-file'))
valid_link_ref = self.sdk_fetcher.symlink_cache.Lookup('some-valid-link')
with valid_link_ref:
self.sdk_fetcher._UpdateCacheSymlink(
valid_link_ref, os.path.join(self.tempdir, 'some-file'))
broken_link_ref = self.sdk_fetcher.symlink_cache.Lookup('some-broken-link')
with broken_link_ref:
self.sdk_fetcher._UpdateCacheSymlink(
broken_link_ref, '/some/invalid/file')
# Broken symlink should exist before the ClearOldItems() call, and be
# removed after.
self.assertTrue(valid_link_ref.Exists())
self.assertTrue(broken_link_ref.Exists())
cros_chrome_sdk.SDKFetcher.ClearOldItems(self.tempdir)
self.assertTrue(valid_link_ref.Exists())
self.assertFalse(broken_link_ref.Exists()) | 0.525856 | 0.107531 |
from __clrclasses__.System.Runtime.CompilerServices import AccessedThroughPropertyAttribute
from __clrclasses__.System.Runtime.CompilerServices import AsyncStateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import AsyncTaskMethodBuilder
from __clrclasses__.System.Runtime.CompilerServices import AsyncVoidMethodBuilder
from __clrclasses__.System.Runtime.CompilerServices import CallConvCdecl
from __clrclasses__.System.Runtime.CompilerServices import CallConvFastcall
from __clrclasses__.System.Runtime.CompilerServices import CallConvStdcall
from __clrclasses__.System.Runtime.CompilerServices import CallConvThiscall
from __clrclasses__.System.Runtime.CompilerServices import CallerFilePathAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallerLineNumberAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallerMemberNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallSite
from __clrclasses__.System.Runtime.CompilerServices import CallSiteBinder
from __clrclasses__.System.Runtime.CompilerServices import CallSiteHelpers
from __clrclasses__.System.Runtime.CompilerServices import CallSiteOps
from __clrclasses__.System.Runtime.CompilerServices import Closure
from __clrclasses__.System.Runtime.CompilerServices import CompilationRelaxations
from __clrclasses__.System.Runtime.CompilerServices import CompilationRelaxationsAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerGeneratedAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerGlobalScopeAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerMarshalOverride
from __clrclasses__.System.Runtime.CompilerServices import ConditionalWeakTable
from __clrclasses__.System.Runtime.CompilerServices import ConfiguredTaskAwaitable
from __clrclasses__.System.Runtime.CompilerServices import ContractHelper
from __clrclasses__.System.Runtime.CompilerServices import CustomConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DateTimeConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DebugInfoGenerator
from __clrclasses__.System.Runtime.CompilerServices import DecimalConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DefaultDependencyAttribute
from __clrclasses__.System.Runtime.CompilerServices import DependencyAttribute
from __clrclasses__.System.Runtime.CompilerServices import DisablePrivateReflectionAttribute
from __clrclasses__.System.Runtime.CompilerServices import DiscardableAttribute
from __clrclasses__.System.Runtime.CompilerServices import DynamicAttribute
from __clrclasses__.System.Runtime.CompilerServices import ExecutionScope
from __clrclasses__.System.Runtime.CompilerServices import ExtensionAttribute
from __clrclasses__.System.Runtime.CompilerServices import FixedAddressValueTypeAttribute
from __clrclasses__.System.Runtime.CompilerServices import FixedBufferAttribute
from __clrclasses__.System.Runtime.CompilerServices import FormattableStringFactory
from __clrclasses__.System.Runtime.CompilerServices import HasCopySemanticsAttribute
from __clrclasses__.System.Runtime.CompilerServices import IAsyncStateMachine
from __clrclasses__.System.Runtime.CompilerServices import ICriticalNotifyCompletion
from __clrclasses__.System.Runtime.CompilerServices import IDispatchConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import IndexerNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import INotifyCompletion
from __clrclasses__.System.Runtime.CompilerServices import InternalsVisibleToAttribute
from __clrclasses__.System.Runtime.CompilerServices import IRuntimeVariables
from __clrclasses__.System.Runtime.CompilerServices import IsBoxed
from __clrclasses__.System.Runtime.CompilerServices import IsByRefLikeAttribute
from __clrclasses__.System.Runtime.CompilerServices import IsByValue
from __clrclasses__.System.Runtime.CompilerServices import IsConst
from __clrclasses__.System.Runtime.CompilerServices import IsCopyConstructed
from __clrclasses__.System.Runtime.CompilerServices import IsExplicitlyDereferenced
from __clrclasses__.System.Runtime.CompilerServices import IsImplicitlyDereferenced
from __clrclasses__.System.Runtime.CompilerServices import IsJitIntrinsic
from __clrclasses__.System.Runtime.CompilerServices import IsLong
from __clrclasses__.System.Runtime.CompilerServices import IsPinned
from __clrclasses__.System.Runtime.CompilerServices import IsReadOnlyAttribute
from __clrclasses__.System.Runtime.CompilerServices import IsSignUnspecifiedByte
from __clrclasses__.System.Runtime.CompilerServices import IStrongBox
from __clrclasses__.System.Runtime.CompilerServices import IsUdtReturn
from __clrclasses__.System.Runtime.CompilerServices import IsVolatile
from __clrclasses__.System.Runtime.CompilerServices import IteratorStateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import ITuple
from __clrclasses__.System.Runtime.CompilerServices import IUnknownConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import LoadHint
from __clrclasses__.System.Runtime.CompilerServices import MethodCodeType
from __clrclasses__.System.Runtime.CompilerServices import MethodImplAttribute
from __clrclasses__.System.Runtime.CompilerServices import MethodImplOptions
from __clrclasses__.System.Runtime.CompilerServices import NativeCppClassAttribute
from __clrclasses__.System.Runtime.CompilerServices import ReadOnlyCollectionBuilder
from __clrclasses__.System.Runtime.CompilerServices import ReferenceAssemblyAttribute
from __clrclasses__.System.Runtime.CompilerServices import RequiredAttributeAttribute
from __clrclasses__.System.Runtime.CompilerServices import RuleCache
from __clrclasses__.System.Runtime.CompilerServices import RuntimeCompatibilityAttribute
from __clrclasses__.System.Runtime.CompilerServices import RuntimeFeature
from __clrclasses__.System.Runtime.CompilerServices import RuntimeHelpers
from __clrclasses__.System.Runtime.CompilerServices import RuntimeOps
from __clrclasses__.System.Runtime.CompilerServices import RuntimeWrappedException
from __clrclasses__.System.Runtime.CompilerServices import ScopelessEnumAttribute
from __clrclasses__.System.Runtime.CompilerServices import SpecialNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import StateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import StringFreezingAttribute
from __clrclasses__.System.Runtime.CompilerServices import StrongBox
from __clrclasses__.System.Runtime.CompilerServices import SuppressIldasmAttribute
from __clrclasses__.System.Runtime.CompilerServices import TaskAwaiter
from __clrclasses__.System.Runtime.CompilerServices import TupleElementNamesAttribute
from __clrclasses__.System.Runtime.CompilerServices import TypeForwardedFromAttribute
from __clrclasses__.System.Runtime.CompilerServices import TypeForwardedToAttribute
from __clrclasses__.System.Runtime.CompilerServices import UnsafeValueTypeAttribute
from __clrclasses__.System.Runtime.CompilerServices import YieldAwaitable | extensions/.stubs/clrclasses/System/Runtime/CompilerServices/__init__.py | from __clrclasses__.System.Runtime.CompilerServices import AccessedThroughPropertyAttribute
from __clrclasses__.System.Runtime.CompilerServices import AsyncStateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import AsyncTaskMethodBuilder
from __clrclasses__.System.Runtime.CompilerServices import AsyncVoidMethodBuilder
from __clrclasses__.System.Runtime.CompilerServices import CallConvCdecl
from __clrclasses__.System.Runtime.CompilerServices import CallConvFastcall
from __clrclasses__.System.Runtime.CompilerServices import CallConvStdcall
from __clrclasses__.System.Runtime.CompilerServices import CallConvThiscall
from __clrclasses__.System.Runtime.CompilerServices import CallerFilePathAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallerLineNumberAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallerMemberNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import CallSite
from __clrclasses__.System.Runtime.CompilerServices import CallSiteBinder
from __clrclasses__.System.Runtime.CompilerServices import CallSiteHelpers
from __clrclasses__.System.Runtime.CompilerServices import CallSiteOps
from __clrclasses__.System.Runtime.CompilerServices import Closure
from __clrclasses__.System.Runtime.CompilerServices import CompilationRelaxations
from __clrclasses__.System.Runtime.CompilerServices import CompilationRelaxationsAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerGeneratedAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerGlobalScopeAttribute
from __clrclasses__.System.Runtime.CompilerServices import CompilerMarshalOverride
from __clrclasses__.System.Runtime.CompilerServices import ConditionalWeakTable
from __clrclasses__.System.Runtime.CompilerServices import ConfiguredTaskAwaitable
from __clrclasses__.System.Runtime.CompilerServices import ContractHelper
from __clrclasses__.System.Runtime.CompilerServices import CustomConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DateTimeConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DebugInfoGenerator
from __clrclasses__.System.Runtime.CompilerServices import DecimalConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import DefaultDependencyAttribute
from __clrclasses__.System.Runtime.CompilerServices import DependencyAttribute
from __clrclasses__.System.Runtime.CompilerServices import DisablePrivateReflectionAttribute
from __clrclasses__.System.Runtime.CompilerServices import DiscardableAttribute
from __clrclasses__.System.Runtime.CompilerServices import DynamicAttribute
from __clrclasses__.System.Runtime.CompilerServices import ExecutionScope
from __clrclasses__.System.Runtime.CompilerServices import ExtensionAttribute
from __clrclasses__.System.Runtime.CompilerServices import FixedAddressValueTypeAttribute
from __clrclasses__.System.Runtime.CompilerServices import FixedBufferAttribute
from __clrclasses__.System.Runtime.CompilerServices import FormattableStringFactory
from __clrclasses__.System.Runtime.CompilerServices import HasCopySemanticsAttribute
from __clrclasses__.System.Runtime.CompilerServices import IAsyncStateMachine
from __clrclasses__.System.Runtime.CompilerServices import ICriticalNotifyCompletion
from __clrclasses__.System.Runtime.CompilerServices import IDispatchConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import IndexerNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import INotifyCompletion
from __clrclasses__.System.Runtime.CompilerServices import InternalsVisibleToAttribute
from __clrclasses__.System.Runtime.CompilerServices import IRuntimeVariables
from __clrclasses__.System.Runtime.CompilerServices import IsBoxed
from __clrclasses__.System.Runtime.CompilerServices import IsByRefLikeAttribute
from __clrclasses__.System.Runtime.CompilerServices import IsByValue
from __clrclasses__.System.Runtime.CompilerServices import IsConst
from __clrclasses__.System.Runtime.CompilerServices import IsCopyConstructed
from __clrclasses__.System.Runtime.CompilerServices import IsExplicitlyDereferenced
from __clrclasses__.System.Runtime.CompilerServices import IsImplicitlyDereferenced
from __clrclasses__.System.Runtime.CompilerServices import IsJitIntrinsic
from __clrclasses__.System.Runtime.CompilerServices import IsLong
from __clrclasses__.System.Runtime.CompilerServices import IsPinned
from __clrclasses__.System.Runtime.CompilerServices import IsReadOnlyAttribute
from __clrclasses__.System.Runtime.CompilerServices import IsSignUnspecifiedByte
from __clrclasses__.System.Runtime.CompilerServices import IStrongBox
from __clrclasses__.System.Runtime.CompilerServices import IsUdtReturn
from __clrclasses__.System.Runtime.CompilerServices import IsVolatile
from __clrclasses__.System.Runtime.CompilerServices import IteratorStateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import ITuple
from __clrclasses__.System.Runtime.CompilerServices import IUnknownConstantAttribute
from __clrclasses__.System.Runtime.CompilerServices import LoadHint
from __clrclasses__.System.Runtime.CompilerServices import MethodCodeType
from __clrclasses__.System.Runtime.CompilerServices import MethodImplAttribute
from __clrclasses__.System.Runtime.CompilerServices import MethodImplOptions
from __clrclasses__.System.Runtime.CompilerServices import NativeCppClassAttribute
from __clrclasses__.System.Runtime.CompilerServices import ReadOnlyCollectionBuilder
from __clrclasses__.System.Runtime.CompilerServices import ReferenceAssemblyAttribute
from __clrclasses__.System.Runtime.CompilerServices import RequiredAttributeAttribute
from __clrclasses__.System.Runtime.CompilerServices import RuleCache
from __clrclasses__.System.Runtime.CompilerServices import RuntimeCompatibilityAttribute
from __clrclasses__.System.Runtime.CompilerServices import RuntimeFeature
from __clrclasses__.System.Runtime.CompilerServices import RuntimeHelpers
from __clrclasses__.System.Runtime.CompilerServices import RuntimeOps
from __clrclasses__.System.Runtime.CompilerServices import RuntimeWrappedException
from __clrclasses__.System.Runtime.CompilerServices import ScopelessEnumAttribute
from __clrclasses__.System.Runtime.CompilerServices import SpecialNameAttribute
from __clrclasses__.System.Runtime.CompilerServices import StateMachineAttribute
from __clrclasses__.System.Runtime.CompilerServices import StringFreezingAttribute
from __clrclasses__.System.Runtime.CompilerServices import StrongBox
from __clrclasses__.System.Runtime.CompilerServices import SuppressIldasmAttribute
from __clrclasses__.System.Runtime.CompilerServices import TaskAwaiter
from __clrclasses__.System.Runtime.CompilerServices import TupleElementNamesAttribute
from __clrclasses__.System.Runtime.CompilerServices import TypeForwardedFromAttribute
from __clrclasses__.System.Runtime.CompilerServices import TypeForwardedToAttribute
from __clrclasses__.System.Runtime.CompilerServices import UnsafeValueTypeAttribute
from __clrclasses__.System.Runtime.CompilerServices import YieldAwaitable | 0.613121 | 0.031232 |
import os
import torch
import torch.nn.functional as F
import yaml
import copy
from ast import literal_eval
from typing import Callable, Iterable, List, TypeVar
import torch.distributed as dist
from typing import Tuple
import argparse
A = TypeVar("A")
B = TypeVar("B")
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``TimeDistributed`` reshapes the input to be
``(batch_size * time_steps, [rest])``, applies the contained ``Module``, then reshapes it back.
Note that while the above gives shapes with ``batch_size`` first, this ``Module`` also works if
``batch_size`` is second - we always just combine the first two dimensions, then split them.
It also reshapes keyword arguments unless they are not tensors or their name is specified in
the optional ``pass_through`` iterable.
"""
def __init__(self, module):
super().__init__()
self._module = module
def forward(self, *inputs, pass_through: List[str] = None, **kwargs):
# pylint: disable=arguments-differ
pass_through = pass_through or []
reshaped_inputs = [self._reshape_tensor(input_tensor) for input_tensor in inputs]
# Need some input to then get the batch_size and time_steps.
some_input = None
if inputs:
some_input = inputs[-1]
reshaped_kwargs = {}
for key, value in kwargs.items():
if isinstance(value, torch.Tensor) and key not in pass_through:
if some_input is None:
some_input = value
value = self._reshape_tensor(value)
reshaped_kwargs[key] = value
reshaped_outputs = self._module(*reshaped_inputs, **reshaped_kwargs)
if some_input is None:
raise RuntimeError("No input tensor to time-distribute")
# Now get the output back into the right shape.
# (batch_size, time_steps, **output_size)
new_size = some_input.size()[:2] + reshaped_outputs.size()[1:]
outputs = reshaped_outputs.contiguous().view(new_size)
return outputs
@staticmethod
def _reshape_tensor(input_tensor):
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError(f"No dimension to distribute: {input_size}")
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, **input_size).
squashed_shape = [-1] + list(input_size[2:])
return input_tensor.contiguous().view(*squashed_shape)
def main_process(args: argparse.Namespace) -> bool:
if args.distributed:
rank = dist.get_rank()
if rank == 0:
return True
else:
return False
else:
return True
def setup(args: argparse.Namespace,
rank: int,
world_size: int) -> None:
"""
Used for distributed learning
"""
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(args.port)
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def cleanup() -> None:
"""
Used for distributed learning
"""
dist.destroy_process_group()
def find_free_port() -> int:
"""
Used for distributed learning
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def map_(fn: Callable[[A], B], iter: Iterable[A]) -> List[B]:
"""
Used for multiprocessing
"""
return list(map(fn, iter))
def get_model_dir(args: argparse.Namespace) -> str:
"""
Obtain the directory to save/load the model
"""
path = os.path.join('model_ckpt',
args.train_name,
f'split={args.train_split}',
'model',
f'pspnet_{args.arch}{args.layers}',
f'smoothing={args.smoothing}',
f'mixup={args.mixup}')
return path
def to_one_hot(mask: torch.tensor,
num_classes: int) -> torch.tensor:
"""
inputs:
mask : shape [n_task, shot, h, w]
num_classes : Number of classes
returns :
one_hot_mask : shape [n_task, shot, num_class, h, w]
"""
n_tasks, shot, h, w = mask.size()
one_hot_mask = torch.zeros(n_tasks, shot, num_classes, h, w).to(dist.get_rank())
new_mask = mask.unsqueeze(2).clone()
new_mask[torch.where(new_mask == 255)] = 0 # Ignore_pixels are anyways filtered out in the losses
one_hot_mask.scatter_(2, new_mask, 1).long()
return one_hot_mask
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def batch_intersectionAndUnionGPU(logits: torch.Tensor,
target: torch.Tensor,
num_classes: int,
ignore_index=255) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
"""
inputs:
logits : shape [n_task, shot, num_class, h, w]
target : shape [n_task, shot, H, W]
num_classes : Number of classes
returns :
area_intersection : shape [n_task, shot, num_class]
area_union : shape [n_task, shot, num_class]
area_target : shape [n_task, shot, num_class]
"""
n_task, shots, num_classes, h, w = logits.size()
H, W = target.size()[-2:]
logits = F.interpolate(logits.view(n_task * shots, num_classes, h, w),
size=(H, W), mode='bilinear', align_corners=True).view(n_task, shots, num_classes, H, W)
preds = logits.argmax(2) # [n_task, shot, H, W]
n_tasks, shot, num_classes, H, W = logits.size()
area_intersection = torch.zeros(n_tasks, shot, num_classes)
area_union = torch.zeros(n_tasks, shot, num_classes)
area_target = torch.zeros(n_tasks, shot, num_classes)
for task in range(n_tasks):
for shot in range(shots):
i, u, t = intersectionAndUnionGPU(preds[task][shot], target[task][shot],
num_classes, ignore_index=ignore_index) # i,u, t are of size()
area_intersection[task, shot, :] = i
area_union[task, shot, :] = u
area_target[task, shot, :] = t
return area_intersection, area_union, area_target
def intersectionAndUnionGPU(preds: torch.tensor,
target: torch.tensor,
num_classes: int,
ignore_index=255) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
"""
inputs:
preds : shape [H, W]
target : shape [H, W]
num_classes : Number of classes
returns :
area_intersection : shape [num_class]
area_union : shape [num_class]
area_target : shape [num_class]
"""
assert (preds.dim() in [1, 2, 3])
assert preds.shape == target.shape
preds = preds.view(-1)
target = target.view(-1)
preds[target == ignore_index] = ignore_index
intersection = preds[preds == target]
# Addind .float() becausue histc not working with long() on CPU
area_intersection = torch.histc(intersection.float(), bins=num_classes, min=0, max=num_classes-1)
area_output = torch.histc(preds.float(), bins=num_classes, min=0, max=num_classes-1)
area_target = torch.histc(target.float(), bins=num_classes, min=0, max=num_classes-1)
area_union = area_output + area_target - area_intersection
# print(torch.unique(intersection))
return area_intersection, area_union, area_target
# ======================================================================================================================
# ======== All following helper functions have been borrowed from from https://github.com/Jia-Research-Lab/PFENet ======
# ======================================================================================================================
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
super(CfgNode, self).__init__(init_dict)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def _decode_cfg_value(v):
if not isinstance(v, str):
return v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
casts = [(tuple, list), (list, tuple)]
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def load_cfg_from_cfg_file(file: str):
cfg = {}
assert os.path.isfile(file) and file.endswith('.yaml'), \
'{} is not a yaml file'.format(file)
with open(file, 'r') as f:
cfg_from_file = yaml.safe_load(f)
for key in cfg_from_file:
for k, v in cfg_from_file[key].items():
cfg[k] = v
cfg = CfgNode(cfg)
return cfg
class ClassIoUNew():
def __init__(self, class_size):
self.class_size = class_size
self.cls_iou = torch.zeros(self.class_size)
self.cls_counts = torch.zeros(self.class_size)
def update(self, intersection: torch.Tensor, union: torch.Tensor, classes: torch.Tensor):
for i, task_cls in enumerate(classes):
self.cls_iou[(task_cls - 1) % 5] += intersection[i, 0, 1] / union[i, 0, 1]
self.cls_counts[(task_cls - 1) % 5] += 1
def compute(self):
return torch.mean(self.cls_iou[self.cls_counts != 0] / self.cls_counts[self.cls_counts != 0])
def reset(self):
self.cls_iou = torch.zeros(self.class_size)
self.cls_counts = torch.zeros(self.class_size)
def merge_cfg_from_list(cfg: CfgNode,
cfg_list: List[str]):
new_cfg = copy.deepcopy(cfg)
assert len(cfg_list) % 2 == 0, cfg_list
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
subkey = full_key.split('.')[-1]
assert subkey in cfg, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, cfg[subkey], subkey, full_key
)
setattr(new_cfg, subkey, value)
return new_cfg | src/util.py | import os
import torch
import torch.nn.functional as F
import yaml
import copy
from ast import literal_eval
from typing import Callable, Iterable, List, TypeVar
import torch.distributed as dist
from typing import Tuple
import argparse
A = TypeVar("A")
B = TypeVar("B")
class TimeDistributed(torch.nn.Module):
"""
Given an input shaped like ``(batch_size, time_steps, [rest])`` and a ``Module`` that takes
inputs like ``(batch_size, [rest])``, ``TimeDistributed`` reshapes the input to be
``(batch_size * time_steps, [rest])``, applies the contained ``Module``, then reshapes it back.
Note that while the above gives shapes with ``batch_size`` first, this ``Module`` also works if
``batch_size`` is second - we always just combine the first two dimensions, then split them.
It also reshapes keyword arguments unless they are not tensors or their name is specified in
the optional ``pass_through`` iterable.
"""
def __init__(self, module):
super().__init__()
self._module = module
def forward(self, *inputs, pass_through: List[str] = None, **kwargs):
# pylint: disable=arguments-differ
pass_through = pass_through or []
reshaped_inputs = [self._reshape_tensor(input_tensor) for input_tensor in inputs]
# Need some input to then get the batch_size and time_steps.
some_input = None
if inputs:
some_input = inputs[-1]
reshaped_kwargs = {}
for key, value in kwargs.items():
if isinstance(value, torch.Tensor) and key not in pass_through:
if some_input is None:
some_input = value
value = self._reshape_tensor(value)
reshaped_kwargs[key] = value
reshaped_outputs = self._module(*reshaped_inputs, **reshaped_kwargs)
if some_input is None:
raise RuntimeError("No input tensor to time-distribute")
# Now get the output back into the right shape.
# (batch_size, time_steps, **output_size)
new_size = some_input.size()[:2] + reshaped_outputs.size()[1:]
outputs = reshaped_outputs.contiguous().view(new_size)
return outputs
@staticmethod
def _reshape_tensor(input_tensor):
input_size = input_tensor.size()
if len(input_size) <= 2:
raise RuntimeError(f"No dimension to distribute: {input_size}")
# Squash batch_size and time_steps into a single axis; result has shape
# (batch_size * time_steps, **input_size).
squashed_shape = [-1] + list(input_size[2:])
return input_tensor.contiguous().view(*squashed_shape)
def main_process(args: argparse.Namespace) -> bool:
if args.distributed:
rank = dist.get_rank()
if rank == 0:
return True
else:
return False
else:
return True
def setup(args: argparse.Namespace,
rank: int,
world_size: int) -> None:
"""
Used for distributed learning
"""
os.environ['MASTER_ADDR'] = 'localhost'
os.environ['MASTER_PORT'] = str(args.port)
# initialize the process group
dist.init_process_group("nccl", rank=rank, world_size=world_size)
def cleanup() -> None:
"""
Used for distributed learning
"""
dist.destroy_process_group()
def find_free_port() -> int:
"""
Used for distributed learning
"""
import socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("", 0))
port = sock.getsockname()[1]
sock.close()
return port
def map_(fn: Callable[[A], B], iter: Iterable[A]) -> List[B]:
"""
Used for multiprocessing
"""
return list(map(fn, iter))
def get_model_dir(args: argparse.Namespace) -> str:
"""
Obtain the directory to save/load the model
"""
path = os.path.join('model_ckpt',
args.train_name,
f'split={args.train_split}',
'model',
f'pspnet_{args.arch}{args.layers}',
f'smoothing={args.smoothing}',
f'mixup={args.mixup}')
return path
def to_one_hot(mask: torch.tensor,
num_classes: int) -> torch.tensor:
"""
inputs:
mask : shape [n_task, shot, h, w]
num_classes : Number of classes
returns :
one_hot_mask : shape [n_task, shot, num_class, h, w]
"""
n_tasks, shot, h, w = mask.size()
one_hot_mask = torch.zeros(n_tasks, shot, num_classes, h, w).to(dist.get_rank())
new_mask = mask.unsqueeze(2).clone()
new_mask[torch.where(new_mask == 255)] = 0 # Ignore_pixels are anyways filtered out in the losses
one_hot_mask.scatter_(2, new_mask, 1).long()
return one_hot_mask
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def batch_intersectionAndUnionGPU(logits: torch.Tensor,
target: torch.Tensor,
num_classes: int,
ignore_index=255) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
"""
inputs:
logits : shape [n_task, shot, num_class, h, w]
target : shape [n_task, shot, H, W]
num_classes : Number of classes
returns :
area_intersection : shape [n_task, shot, num_class]
area_union : shape [n_task, shot, num_class]
area_target : shape [n_task, shot, num_class]
"""
n_task, shots, num_classes, h, w = logits.size()
H, W = target.size()[-2:]
logits = F.interpolate(logits.view(n_task * shots, num_classes, h, w),
size=(H, W), mode='bilinear', align_corners=True).view(n_task, shots, num_classes, H, W)
preds = logits.argmax(2) # [n_task, shot, H, W]
n_tasks, shot, num_classes, H, W = logits.size()
area_intersection = torch.zeros(n_tasks, shot, num_classes)
area_union = torch.zeros(n_tasks, shot, num_classes)
area_target = torch.zeros(n_tasks, shot, num_classes)
for task in range(n_tasks):
for shot in range(shots):
i, u, t = intersectionAndUnionGPU(preds[task][shot], target[task][shot],
num_classes, ignore_index=ignore_index) # i,u, t are of size()
area_intersection[task, shot, :] = i
area_union[task, shot, :] = u
area_target[task, shot, :] = t
return area_intersection, area_union, area_target
def intersectionAndUnionGPU(preds: torch.tensor,
target: torch.tensor,
num_classes: int,
ignore_index=255) -> Tuple[torch.tensor, torch.tensor, torch.tensor]:
"""
inputs:
preds : shape [H, W]
target : shape [H, W]
num_classes : Number of classes
returns :
area_intersection : shape [num_class]
area_union : shape [num_class]
area_target : shape [num_class]
"""
assert (preds.dim() in [1, 2, 3])
assert preds.shape == target.shape
preds = preds.view(-1)
target = target.view(-1)
preds[target == ignore_index] = ignore_index
intersection = preds[preds == target]
# Addind .float() becausue histc not working with long() on CPU
area_intersection = torch.histc(intersection.float(), bins=num_classes, min=0, max=num_classes-1)
area_output = torch.histc(preds.float(), bins=num_classes, min=0, max=num_classes-1)
area_target = torch.histc(target.float(), bins=num_classes, min=0, max=num_classes-1)
area_union = area_output + area_target - area_intersection
# print(torch.unique(intersection))
return area_intersection, area_union, area_target
# ======================================================================================================================
# ======== All following helper functions have been borrowed from from https://github.com/Jia-Research-Lab/PFENet ======
# ======================================================================================================================
class CfgNode(dict):
"""
CfgNode represents an internal node in the configuration tree. It's a simple
dict-like container that allows for attribute-based access to keys.
"""
def __init__(self, init_dict=None, key_list=None, new_allowed=False):
# Recursively convert nested dictionaries in init_dict into CfgNodes
init_dict = {} if init_dict is None else init_dict
key_list = [] if key_list is None else key_list
for k, v in init_dict.items():
if type(v) is dict:
# Convert dict to CfgNode
init_dict[k] = CfgNode(v, key_list=key_list + [k])
super(CfgNode, self).__init__(init_dict)
def __getattr__(self, name):
if name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
self[name] = value
def __str__(self):
def _indent(s_, num_spaces):
s = s_.split("\n")
if len(s) == 1:
return s_
first = s.pop(0)
s = [(num_spaces * " ") + line for line in s]
s = "\n".join(s)
s = first + "\n" + s
return s
r = ""
s = []
for k, v in sorted(self.items()):
seperator = "\n" if isinstance(v, CfgNode) else " "
attr_str = "{}:{}{}".format(str(k), seperator, str(v))
attr_str = _indent(attr_str, 2)
s.append(attr_str)
r += "\n".join(s)
return r
def __repr__(self):
return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
def _decode_cfg_value(v):
if not isinstance(v, str):
return v
try:
v = literal_eval(v)
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
original_type = type(original)
replacement_type = type(replacement)
# The types must match (with some exceptions)
if replacement_type == original_type:
return replacement
def conditional_cast(from_type, to_type):
if replacement_type == from_type and original_type == to_type:
return True, to_type(replacement)
else:
return False, None
casts = [(tuple, list), (list, tuple)]
try:
casts.append((str, unicode)) # noqa: F821
except Exception:
pass
for (from_type, to_type) in casts:
converted, converted_value = conditional_cast(from_type, to_type)
if converted:
return converted_value
raise ValueError(
"Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
"key: {}".format(
original_type, replacement_type, original, replacement, full_key
)
)
def load_cfg_from_cfg_file(file: str):
cfg = {}
assert os.path.isfile(file) and file.endswith('.yaml'), \
'{} is not a yaml file'.format(file)
with open(file, 'r') as f:
cfg_from_file = yaml.safe_load(f)
for key in cfg_from_file:
for k, v in cfg_from_file[key].items():
cfg[k] = v
cfg = CfgNode(cfg)
return cfg
class ClassIoUNew():
def __init__(self, class_size):
self.class_size = class_size
self.cls_iou = torch.zeros(self.class_size)
self.cls_counts = torch.zeros(self.class_size)
def update(self, intersection: torch.Tensor, union: torch.Tensor, classes: torch.Tensor):
for i, task_cls in enumerate(classes):
self.cls_iou[(task_cls - 1) % 5] += intersection[i, 0, 1] / union[i, 0, 1]
self.cls_counts[(task_cls - 1) % 5] += 1
def compute(self):
return torch.mean(self.cls_iou[self.cls_counts != 0] / self.cls_counts[self.cls_counts != 0])
def reset(self):
self.cls_iou = torch.zeros(self.class_size)
self.cls_counts = torch.zeros(self.class_size)
def merge_cfg_from_list(cfg: CfgNode,
cfg_list: List[str]):
new_cfg = copy.deepcopy(cfg)
assert len(cfg_list) % 2 == 0, cfg_list
for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
subkey = full_key.split('.')[-1]
assert subkey in cfg, 'Non-existent key: {}'.format(full_key)
value = _decode_cfg_value(v)
value = _check_and_coerce_cfg_value_type(
value, cfg[subkey], subkey, full_key
)
setattr(new_cfg, subkey, value)
return new_cfg | 0.883845 | 0.498901 |
import pandas as pd
import numpy as np
import argparse
import sys
import os
import pdb
import collections
import glob
import rpy2
from multiprocessing import Pool
sys.path.append('../common/')
import utilities as util
import analysis
import mutation_base
def get_options():
parser = argparse.ArgumentParser(description='Get mutation, cnv, and clinical directories. optional output dir')
parser.add_argument('-i', action='store', dest='cnv_directory')
parser.add_argument('-c', action='store', dest='clinical')
parser.add_argument('-f', action='store', dest='interesting_genes_file')
parser.add_argument('-o', action='store', dest='output_directory', default='.')
ns = parser.parse_args()
return (ns.cnv_directory, ns.clinical, ns.interesting_genes_file, ns.output_directory)
def make_cn_zscores(copy_number, clinical, interesting_genes=None, outdir='.'):
clinical_data = util.get_clinical_data(clinical)
cnv = pd.read_csv(copy_number, index_col=0)
cnv_by_patient = cnv.transpose()
cancer_type = util.get_cancer_type(copy_number)
relevant_genes = '\'' + interesting_genes.index
relevant_genes = list(relevant_genes)
cnv = cnv_by_patient[relevant_genes]
cnv = cnv.join(clinical_data, how='inner')
results = []
for gene in cnv:
if gene in ('time', 'censor'): # skip metadata
continue
if cnv[gene].count() > 10:
cnv[gene + '_split'] = np.nan
cnv.loc[cnv[gene] <= -0.3, gene + '_split'] = -1
cnv.loc[cnv[gene].between(-0.3, 0.3), gene + '_split'] = 0
cnv.loc[cnv[gene] >= 0.3, gene + '_split'] = 1
cox_dict = analysis.do_cox(cnv.time,
cnv.censor,
cnv[gene + '_split'])
cox_dict['gene'] = gene
cox_dict['cancer_type'] = cancer_type
results.append(cox_dict)
cnv.to_csv(os.path.join(outdir, cancer_type + '_trichotomized.csv'))
return results
def main(argv=None):
cnv_dir, clinical, interesting_genes_file, outdir = get_options()
cnv_files = os.listdir(cnv_dir)
cnv_files = util.remove_extraneous_files(cnv_files)
cnv_files = [os.path.join(cnv_dir, i) for i in cnv_files]
interesting_genes = pd.read_csv(interesting_genes_file, index_col=0, header=None)
results = []
for cnv in cnv_files:
cancer_type = util.get_cancer_type(cnv)
clinical_file = glob.glob(os.path.join(clinical, '*' + cancer_type + '*'))[0]
results += make_cn_zscores(cnv, clinical_file, interesting_genes, outdir)
results_df = pd.DataFrame(results)
results_df = results_df.set_index(['cancer_type', 'gene'])
results_df.to_csv(os.path.join(outdir, 'trichotomized_copy_number_zscores.csv'))
if __name__ == "__main__":
main() | copy_number/interesting_genes_trichotomized_zscores.py | import pandas as pd
import numpy as np
import argparse
import sys
import os
import pdb
import collections
import glob
import rpy2
from multiprocessing import Pool
sys.path.append('../common/')
import utilities as util
import analysis
import mutation_base
def get_options():
parser = argparse.ArgumentParser(description='Get mutation, cnv, and clinical directories. optional output dir')
parser.add_argument('-i', action='store', dest='cnv_directory')
parser.add_argument('-c', action='store', dest='clinical')
parser.add_argument('-f', action='store', dest='interesting_genes_file')
parser.add_argument('-o', action='store', dest='output_directory', default='.')
ns = parser.parse_args()
return (ns.cnv_directory, ns.clinical, ns.interesting_genes_file, ns.output_directory)
def make_cn_zscores(copy_number, clinical, interesting_genes=None, outdir='.'):
clinical_data = util.get_clinical_data(clinical)
cnv = pd.read_csv(copy_number, index_col=0)
cnv_by_patient = cnv.transpose()
cancer_type = util.get_cancer_type(copy_number)
relevant_genes = '\'' + interesting_genes.index
relevant_genes = list(relevant_genes)
cnv = cnv_by_patient[relevant_genes]
cnv = cnv.join(clinical_data, how='inner')
results = []
for gene in cnv:
if gene in ('time', 'censor'): # skip metadata
continue
if cnv[gene].count() > 10:
cnv[gene + '_split'] = np.nan
cnv.loc[cnv[gene] <= -0.3, gene + '_split'] = -1
cnv.loc[cnv[gene].between(-0.3, 0.3), gene + '_split'] = 0
cnv.loc[cnv[gene] >= 0.3, gene + '_split'] = 1
cox_dict = analysis.do_cox(cnv.time,
cnv.censor,
cnv[gene + '_split'])
cox_dict['gene'] = gene
cox_dict['cancer_type'] = cancer_type
results.append(cox_dict)
cnv.to_csv(os.path.join(outdir, cancer_type + '_trichotomized.csv'))
return results
def main(argv=None):
cnv_dir, clinical, interesting_genes_file, outdir = get_options()
cnv_files = os.listdir(cnv_dir)
cnv_files = util.remove_extraneous_files(cnv_files)
cnv_files = [os.path.join(cnv_dir, i) for i in cnv_files]
interesting_genes = pd.read_csv(interesting_genes_file, index_col=0, header=None)
results = []
for cnv in cnv_files:
cancer_type = util.get_cancer_type(cnv)
clinical_file = glob.glob(os.path.join(clinical, '*' + cancer_type + '*'))[0]
results += make_cn_zscores(cnv, clinical_file, interesting_genes, outdir)
results_df = pd.DataFrame(results)
results_df = results_df.set_index(['cancer_type', 'gene'])
results_df.to_csv(os.path.join(outdir, 'trichotomized_copy_number_zscores.csv'))
if __name__ == "__main__":
main() | 0.241042 | 0.118819 |
from footmark.ecs.connection import ECSConnection
from tests.unit import ACSMockServiceTestCase
import json
DESCRIBE_INSTANCE = '''
{
"Instances": {
"Instance": [
{
"CreationTime": "2016-06-20T21:37Z",
"DeviceAvailable": true,
"EipAddress": {},
"ExpiredTime": "2016-10-22T16:00Z",
"HostName": "xiaozhu_test",
"ImageId": "centos6u5_64_40G_cloudinit_20160427.raw",
"InnerIpAddress": {
"IpAddress": [
"10.170.106.80"
]
},
"InstanceChargeType": "PostPaid",
"InstanceId": "i-94dehop6n",
"InstanceNetworkType": "classic",
"InstanceType": "ecs.s2.large",
"InternetChargeType": "PayByTraffic",
"InternetMaxBandwidthIn": -1,
"InternetMaxBandwidthOut": 1,
"IoOptimized": false,
"OperationLocks": {
"LockReason": []
},
"PublicIpAddress": {
"IpAddress": [
"192.168.127.12"
]
},
"RegionId": "cn-shenzhen",
"SecurityGroupIds": {
"SecurityGroupId": [
"sg-94kd0cyg0"
]
},
"SerialNumber": "51d1353b-22bf-4567-a176-8b3e12e43135",
"Status": "Running",
"Tags":{
"Tag":[
{
"TagValue":"1.20",
"TagKey":"xz_test"
},
{
"TagValue":"1.20",
"TagKey":"xz_test_2"
}
]
},
"VpcAttributes": {
"PrivateIpAddress": {
"IpAddress": []
}
},
"ZoneId": "cn-shenzhen-a"
}
]
},
"PageNumber": 1,
"PageSize": 10,
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
"TotalCount": 1
}
'''
MANAGE_INSTANCE = '''
{
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
}
'''
CREATE_INSTANCE = '''
{
"InstanceId":"i-2zeg0900kzwn7dpo7zrb",
"RequestId":"9206E7A7-BFD5-457F-9173-91CF4525DE21"
}
'''
MODIFY_INSTANCE= '''
{
"RequestId":"0C7EFCF3-1517-44CD-B61B-60FA49FEF04E"
}
'''
QUERYING_INSTANCE='''
{
"PageNumber": 1,
"InstanceStatuses":
{"InstanceStatus": [
{"Status": "Running", "InstanceId": "i-2zehcagr3vt06iyir7hc"},
{"Status": "Running", "InstanceId": "i-2zedup3d5p01daky1622"},
{"Status": "Stopped", "InstanceId": "i-2zei2zq55lx87st85x2j"},
{"Status": "Running", "InstanceId": "i-2zeaoq67u62vmkbo71o7"},
{"Status": "Running", "InstanceId": "i-2ze5wl5aeq8kbblmjsx1"}
]},
"TotalCount": 9,
"PageSize": 5,
"RequestId": "5D464158-D291-4C69-AA9E-84839A669B9D"
}
'''
JOIN_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
LEAVE_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
ATTACH_DISK='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
class TestDescribeInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
def default_body(self):
return DESCRIBE_INSTANCE
def test_instance_attribute(self):
self.set_http_response(status_code=200, body=DESCRIBE_INSTANCE)
filters = {}
instance_ids = ["i-94dehop6n"]
tag_key = 'xz_test'
tag_value = '1.20'
filters['tag:' + tag_key] = tag_value
instances = self.service_connection.get_all_instances(instance_ids=instance_ids, filters=filters)
self.assertEqual(len(instances), 1)
instance = instances[0]
self.assertEqual(instance.id, 'i-94dehop6n')
print 'group_id:', instance.group_id
self.assertEqual(instance.group_id, 'sg-94kd0cyg0')
self.assertEqual(instance.public_ip, '192.168.127.12')
self.assertEqual(instance.tags, {"xz_test": "1.20", "xz_test_2": "1.20"})
self.assertFalse(instance.io_optimized)
self.assertEqual(instance.status, 'running')
self.assertEqual(instance.image_id, 'centos6u5_64_40G_cloudinit_20160427.raw')
return instances
def test_manage_instances(self):
self.set_http_response(status_code=200, body=MANAGE_INSTANCE)
instances = self.test_instance_attribute()
for inst in instances:
if inst.state == 'running':
inst.stop()
elif inst.state == 'stopped':
inst.start()
else:
inst.reboot()
class TestManageInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
instance_ids = ['i-94dehop6n', 'i-95dertop6m']
def default_body(self):
return MANAGE_INSTANCE
def test_start_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.start_instances(instance_ids=self.instance_ids)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_stop_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.stop_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_reboot_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.reboot_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_terminate_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.terminate_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
# C2C : Unit Test For CreateInstance Method
class TestCreateInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key_id = "<KEY>"
acs_secret_access_key = "fqbuZIKPxOdu36yhFvaBtihNqD2qQ2"
region_id = "cn-beijing"
image_id = "ubuntu1404_64_40G_cloudinit_20160727.raw"
instance_type = "ecs.n1.small"
group_id = "sg-25y6ag32b"
zone_id = "cn-beijing-b"
io_optimized = "optimized"
instance_name = "MyInstance"
description = None
internet_data = {
'charge_type': 'PayByBandwidth',
'max_bandwidth_in': 200,
'max_bandwidth_out': 0
}
host_name = None
password = <PASSWORD>
system_disk = {"disk_category": "cloud_efficiency", "disk_size": 50 }
volumes = [
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume1",
"device_description": "volume 1 description comes here"
},
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume2",
"device_description": "volume 2 description comes here"
}
]
vswitch_id = None
instance_tags = [
{
"tag_key": "create_test_1",
"tag_value": "0.01"
},
{
"tag_key": "create_test_2",
"tag_value": "0.02"
}
]
allocate_public_ip = True
bind_eip = False
instance_charge_type = None
period = None
auto_renew = False
ids = None
count = 1
def default_body(self):
return CREATE_INSTANCE
def test_create_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.create_instance(region_id=self.region_id, image_id=self.image_id,
instance_type=self.instance_type, group_id=self.group_id,
zone_id=self.zone_id, instance_name=self.instance_name,
description=self.description, internet_data=self.internet_data,
host_name=self.host_name, password=self.password,
io_optimized=self.io_optimized, system_disk=self.system_disk,
volumes=self.volumes, vswitch_id=self.vswitch_id,
instance_tags=self.instance_tags,
allocate_public_ip=self.allocate_public_ip,
bind_eip=self.bind_eip, count=self.count,
instance_charge_type=self.instance_charge_type,
period=self.period, auto_renew=self.auto_renew, ids=self.ids)
self.assertEqual(len(result), self.count)
self.assertIn(result[0], "i-2zeg0900kzwn7dpo7zrb")
class TestModifyInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
attributes = [
{
"id": "i-2zebgzk74po3gx1dwvuo",
"name": "new_once_again",
"description": "volumedecsription",
"password": "<PASSWORD>",
"host_name": "hostingAdmin"
},
{
"id": "i-2zeaoq67u62vmkbo71o7",
"host_name": "adminhostadmin"
}
]
def default_body(self):
return MODIFY_INSTANCE
def test_modify_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.modify_instance(attributes=self.attributes)
self.assertEqual(len(result), len(self.attributes))
self.assertIn(result[0], "success")
class TestQueryingInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
region_id="cn-beijing"
page_number=1
page_size=5
def default_body(self):
return QUERYING_INSTANCE
def test_querying_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.querying_instance(region_id=self.region_id, zone_id=None,
page_number=self.page_number,
page_size=self.page_size)
self.assertEqual(result[u'PageNumber'], self.page_number)
self.assertEqual(result[u'PageSize'], self.page_size)
class TestJoinSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'join'
def default_body(self):
return JOIN_GROUP
def test_join_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.join_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestLeaveSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'remove'
def default_body(self):
return LEAVE_GROUP
def test_leave_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.leave_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestAttachDisk(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
disk_id = 'd-j6cc9ssgxbkjdf55w8p7'
region = 'cn-hongkong'
device = None
delete_with_instance = None
state = 'attach'
def default_body(self):
return ATTACH_DISK
def attach_disk(self):
self.set_http_response(status_code=200)
result = self.service_connection.attach_disk_to_instance(disk_id = self.disk_id, instance_id = self.instance_ids,region_id = self.region, device = self.device,delete_with_instance = self.delete_with_instance)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success") | tests/unit/ecs/test_instance.py | from footmark.ecs.connection import ECSConnection
from tests.unit import ACSMockServiceTestCase
import json
DESCRIBE_INSTANCE = '''
{
"Instances": {
"Instance": [
{
"CreationTime": "2016-06-20T21:37Z",
"DeviceAvailable": true,
"EipAddress": {},
"ExpiredTime": "2016-10-22T16:00Z",
"HostName": "xiaozhu_test",
"ImageId": "centos6u5_64_40G_cloudinit_20160427.raw",
"InnerIpAddress": {
"IpAddress": [
"10.170.106.80"
]
},
"InstanceChargeType": "PostPaid",
"InstanceId": "i-94dehop6n",
"InstanceNetworkType": "classic",
"InstanceType": "ecs.s2.large",
"InternetChargeType": "PayByTraffic",
"InternetMaxBandwidthIn": -1,
"InternetMaxBandwidthOut": 1,
"IoOptimized": false,
"OperationLocks": {
"LockReason": []
},
"PublicIpAddress": {
"IpAddress": [
"192.168.127.12"
]
},
"RegionId": "cn-shenzhen",
"SecurityGroupIds": {
"SecurityGroupId": [
"sg-94kd0cyg0"
]
},
"SerialNumber": "51d1353b-22bf-4567-a176-8b3e12e43135",
"Status": "Running",
"Tags":{
"Tag":[
{
"TagValue":"1.20",
"TagKey":"xz_test"
},
{
"TagValue":"1.20",
"TagKey":"xz_test_2"
}
]
},
"VpcAttributes": {
"PrivateIpAddress": {
"IpAddress": []
}
},
"ZoneId": "cn-shenzhen-a"
}
]
},
"PageNumber": 1,
"PageSize": 10,
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
"TotalCount": 1
}
'''
MANAGE_INSTANCE = '''
{
"RequestId": "14A07460-EBE7-47CA-9757-12CC4761D47A",
}
'''
CREATE_INSTANCE = '''
{
"InstanceId":"i-2zeg0900kzwn7dpo7zrb",
"RequestId":"9206E7A7-BFD5-457F-9173-91CF4525DE21"
}
'''
MODIFY_INSTANCE= '''
{
"RequestId":"0C7EFCF3-1517-44CD-B61B-60FA49FEF04E"
}
'''
QUERYING_INSTANCE='''
{
"PageNumber": 1,
"InstanceStatuses":
{"InstanceStatus": [
{"Status": "Running", "InstanceId": "i-2zehcagr3vt06iyir7hc"},
{"Status": "Running", "InstanceId": "i-2zedup3d5p01daky1622"},
{"Status": "Stopped", "InstanceId": "i-2zei2zq55lx87st85x2j"},
{"Status": "Running", "InstanceId": "i-2zeaoq67u62vmkbo71o7"},
{"Status": "Running", "InstanceId": "i-2ze5wl5aeq8kbblmjsx1"}
]},
"TotalCount": 9,
"PageSize": 5,
"RequestId": "5D464158-D291-4C69-AA9E-84839A669B9D"
}
'''
JOIN_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
LEAVE_GROUP='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
ATTACH_DISK='''
{
"RequestId": "AF3991A3-5203-4F83-8FAD-FDC1253AF15D"
}
'''
class TestDescribeInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
def default_body(self):
return DESCRIBE_INSTANCE
def test_instance_attribute(self):
self.set_http_response(status_code=200, body=DESCRIBE_INSTANCE)
filters = {}
instance_ids = ["i-94dehop6n"]
tag_key = 'xz_test'
tag_value = '1.20'
filters['tag:' + tag_key] = tag_value
instances = self.service_connection.get_all_instances(instance_ids=instance_ids, filters=filters)
self.assertEqual(len(instances), 1)
instance = instances[0]
self.assertEqual(instance.id, 'i-94dehop6n')
print 'group_id:', instance.group_id
self.assertEqual(instance.group_id, 'sg-94kd0cyg0')
self.assertEqual(instance.public_ip, '192.168.127.12')
self.assertEqual(instance.tags, {"xz_test": "1.20", "xz_test_2": "1.20"})
self.assertFalse(instance.io_optimized)
self.assertEqual(instance.status, 'running')
self.assertEqual(instance.image_id, 'centos6u5_64_40G_cloudinit_20160427.raw')
return instances
def test_manage_instances(self):
self.set_http_response(status_code=200, body=MANAGE_INSTANCE)
instances = self.test_instance_attribute()
for inst in instances:
if inst.state == 'running':
inst.stop()
elif inst.state == 'stopped':
inst.start()
else:
inst.reboot()
class TestManageInstances(ACSMockServiceTestCase):
connection_class = ECSConnection
instance_ids = ['i-94dehop6n', 'i-95dertop6m']
def default_body(self):
return MANAGE_INSTANCE
def test_start_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.start_instances(instance_ids=self.instance_ids)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_stop_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.stop_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_reboot_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.reboot_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
def test_terminate_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.terminate_instances(instance_ids=self.instance_ids, force=True)
self.assertEqual(len(result), len(self.instance_ids))
self.assertIn(result[0], self.instance_ids)
# C2C : Unit Test For CreateInstance Method
class TestCreateInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key_id = "<KEY>"
acs_secret_access_key = "fqbuZIKPxOdu36yhFvaBtihNqD2qQ2"
region_id = "cn-beijing"
image_id = "ubuntu1404_64_40G_cloudinit_20160727.raw"
instance_type = "ecs.n1.small"
group_id = "sg-25y6ag32b"
zone_id = "cn-beijing-b"
io_optimized = "optimized"
instance_name = "MyInstance"
description = None
internet_data = {
'charge_type': 'PayByBandwidth',
'max_bandwidth_in': 200,
'max_bandwidth_out': 0
}
host_name = None
password = <PASSWORD>
system_disk = {"disk_category": "cloud_efficiency", "disk_size": 50 }
volumes = [
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume1",
"device_description": "volume 1 description comes here"
},
{
"device_category": "cloud_efficiency",
"device_size": 20,
"device_name": "volume2",
"device_description": "volume 2 description comes here"
}
]
vswitch_id = None
instance_tags = [
{
"tag_key": "create_test_1",
"tag_value": "0.01"
},
{
"tag_key": "create_test_2",
"tag_value": "0.02"
}
]
allocate_public_ip = True
bind_eip = False
instance_charge_type = None
period = None
auto_renew = False
ids = None
count = 1
def default_body(self):
return CREATE_INSTANCE
def test_create_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.create_instance(region_id=self.region_id, image_id=self.image_id,
instance_type=self.instance_type, group_id=self.group_id,
zone_id=self.zone_id, instance_name=self.instance_name,
description=self.description, internet_data=self.internet_data,
host_name=self.host_name, password=self.password,
io_optimized=self.io_optimized, system_disk=self.system_disk,
volumes=self.volumes, vswitch_id=self.vswitch_id,
instance_tags=self.instance_tags,
allocate_public_ip=self.allocate_public_ip,
bind_eip=self.bind_eip, count=self.count,
instance_charge_type=self.instance_charge_type,
period=self.period, auto_renew=self.auto_renew, ids=self.ids)
self.assertEqual(len(result), self.count)
self.assertIn(result[0], "i-2zeg0900kzwn7dpo7zrb")
class TestModifyInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
attributes = [
{
"id": "i-2zebgzk74po3gx1dwvuo",
"name": "new_once_again",
"description": "volumedecsription",
"password": "<PASSWORD>",
"host_name": "hostingAdmin"
},
{
"id": "i-2zeaoq67u62vmkbo71o7",
"host_name": "adminhostadmin"
}
]
def default_body(self):
return MODIFY_INSTANCE
def test_modify_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.modify_instance(attributes=self.attributes)
self.assertEqual(len(result), len(self.attributes))
self.assertIn(result[0], "success")
class TestQueryingInstance(ACSMockServiceTestCase):
connection_class = ECSConnection
region_id="cn-beijing"
page_number=1
page_size=5
def default_body(self):
return QUERYING_INSTANCE
def test_querying_instance(self):
self.set_http_response(status_code=200)
result = self.service_connection.querying_instance(region_id=self.region_id, zone_id=None,
page_number=self.page_number,
page_size=self.page_size)
self.assertEqual(result[u'PageNumber'], self.page_number)
self.assertEqual(result[u'PageSize'], self.page_size)
class TestJoinSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'join'
def default_body(self):
return JOIN_GROUP
def test_join_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.join_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestLeaveSecGrp(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
group_id = 'sg-j6c34iujuqbw29zpd53u'
region = 'cn-hongkong'
state = 'remove'
def default_body(self):
return LEAVE_GROUP
def test_leave_grp(self):
self.set_http_response(status_code=200)
result = self.service_connection.leave_security_group(instance_id = self.instance_ids, security_group_id = self.group_id)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success")
class TestAttachDisk(ACSMockServiceTestCase):
connection_class = ECSConnection
acs_access_key = '<KEY>'
acs_secret_access_key = '<KEY>'
instance_ids = ["i-j6c5txh3q0wivxt5m807"]
disk_id = 'd-j6cc9ssgxbkjdf55w8p7'
region = 'cn-hongkong'
device = None
delete_with_instance = None
state = 'attach'
def default_body(self):
return ATTACH_DISK
def attach_disk(self):
self.set_http_response(status_code=200)
result = self.service_connection.attach_disk_to_instance(disk_id = self.disk_id, instance_id = self.instance_ids,region_id = self.region, device = self.device,delete_with_instance = self.delete_with_instance)
###self.assertEqual(len(result), len(self.attributes))
self.assertEqual(result[0], "success") | 0.464416 | 0.27506 |
import argparse
import tempfile
import hashlib
from bioconverters import convert
import shutil
import urllib.request as request
from contextlib import closing
import time
import gzip
import sys
import string
import re
import os
from datetime import datetime
from dbutils import saveDocumentsToDatabase
def download_file(url,local_filename):
with closing(request.urlopen(url)) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r, f)
def download_file_and_check_md5sum(url, local_filename):
with tempfile.NamedTemporaryFile() as tf:
md5_url = "%s.md5" % url
download_file(md5_url, tf.name)
with open(tf.name) as f:
expected_md5 = f.read().strip()
assert expected_md5.startswith('MD5(') and '=' in expected_md5
expected_md5 = expected_md5.split('=')[1].strip()
#print("expected:", expected_md5)
download_file(url, local_filename)
with open(local_filename,'rb') as f:
got_md5 = hashlib.md5(f.read()).hexdigest()
#print("got:", got_md5)
if expected_md5 != got_md5:
raise RuntimeError("MD5 of downloaded file doesn't match expected: %s != %s" % (expected_md5,got_md5))
def download_file_with_retries(url, local_filename, check_md5=False, retries=10):
for tryno in range(retries):
try:
if check_md5:
download_file_and_check_md5sum(url, local_filename)
else:
download_file(url,local_filename)
return
except:
print("Unexpected error:", sys.exc_info()[0], sys.exc_info()[1])
time.sleep(5*(tryno+1))
raise RuntimeError("Unable to download %s" % url)
def get_pubmed_timestamp(url):
assert url.startswith('ftp://ftp.ncbi.nlm.nih.gov/pubmed/')
listing_url = os.path.dirname(url).replace('ftp://','http://')
filename = os.path.basename(url)
with tempfile.NamedTemporaryFile() as tf:
download_file_with_retries(listing_url,tf.name)
with open(tf.name) as f:
listing_page = f.read()
match = re.search('<a href="%s">%s</a>\s+(\d+-\d+-\d+\s+\d+:\d+)' % (filename,filename), listing_page)
assert match, "Could not find timestamp for url: %s" % url
found_date = match.groups()[0]
date_obj = datetime.strptime(found_date, '%Y-%m-%d %H:%M')
timestamp = int(datetime.timestamp(date_obj))
return timestamp
def get_pubmed_fileindex(url):
filename = os.path.basename(url)
digits_in_filename = [ c for c in filename if c in string.digits ]
assert len(digits_in_filename) == 6, "Expected exactly 6 digits in filename: %s" % filename
file_index = int("".join(digits_in_filename))
return int(file_index)
accepted_out_formats = ['biocxml','txt']
def main():
parser = argparse.ArgumentParser(description='Tool to convert corpus between different formats')
parser.add_argument('--url',type=str,required=True,help="URL to PubMed GZipped XML file")
parser.add_argument('--o',type=str,required=True,help="Where to store resulting converted docs")
parser.add_argument('--oFormat',type=str,required=True,help="Format for output corpus. Options: %s" % "/".join(accepted_out_formats))
parser.add_argument('--db',action='store_true',help="Whether to output as an SQLite database")
args = parser.parse_args()
in_format = 'pubmedxml'
out_format = args.oFormat.lower()
if args.db:
assert out_format == 'biocxml', "Output format must be biocxml when storing to the database"
assert out_format in accepted_out_formats, "%s is not an accepted output format. Options are: %s" % (out_format, "/".join(accepted_out_formats))
file_index = get_pubmed_fileindex(args.url)
with tempfile.NamedTemporaryFile() as tf_pubmed, tempfile.NamedTemporaryFile() as tf_out:
print("Downloading...")
download_file_with_retries(args.url, tf_pubmed.name, check_md5=True)
out_file = tf_out.name if args.db else args.o
print("Converting...")
with gzip.open(tf_pubmed.name) as f:
convert([f],in_format,out_file,out_format)
if args.db:
saveDocumentsToDatabase(args.o,tf_out.name,is_fulltext=False,file_index=file_index)
print("Output to %s complete" % args.o)
if __name__ == '__main__':
main() | convertPubmed.py | import argparse
import tempfile
import hashlib
from bioconverters import convert
import shutil
import urllib.request as request
from contextlib import closing
import time
import gzip
import sys
import string
import re
import os
from datetime import datetime
from dbutils import saveDocumentsToDatabase
def download_file(url,local_filename):
with closing(request.urlopen(url)) as r:
with open(local_filename, 'wb') as f:
shutil.copyfileobj(r, f)
def download_file_and_check_md5sum(url, local_filename):
with tempfile.NamedTemporaryFile() as tf:
md5_url = "%s.md5" % url
download_file(md5_url, tf.name)
with open(tf.name) as f:
expected_md5 = f.read().strip()
assert expected_md5.startswith('MD5(') and '=' in expected_md5
expected_md5 = expected_md5.split('=')[1].strip()
#print("expected:", expected_md5)
download_file(url, local_filename)
with open(local_filename,'rb') as f:
got_md5 = hashlib.md5(f.read()).hexdigest()
#print("got:", got_md5)
if expected_md5 != got_md5:
raise RuntimeError("MD5 of downloaded file doesn't match expected: %s != %s" % (expected_md5,got_md5))
def download_file_with_retries(url, local_filename, check_md5=False, retries=10):
for tryno in range(retries):
try:
if check_md5:
download_file_and_check_md5sum(url, local_filename)
else:
download_file(url,local_filename)
return
except:
print("Unexpected error:", sys.exc_info()[0], sys.exc_info()[1])
time.sleep(5*(tryno+1))
raise RuntimeError("Unable to download %s" % url)
def get_pubmed_timestamp(url):
assert url.startswith('ftp://ftp.ncbi.nlm.nih.gov/pubmed/')
listing_url = os.path.dirname(url).replace('ftp://','http://')
filename = os.path.basename(url)
with tempfile.NamedTemporaryFile() as tf:
download_file_with_retries(listing_url,tf.name)
with open(tf.name) as f:
listing_page = f.read()
match = re.search('<a href="%s">%s</a>\s+(\d+-\d+-\d+\s+\d+:\d+)' % (filename,filename), listing_page)
assert match, "Could not find timestamp for url: %s" % url
found_date = match.groups()[0]
date_obj = datetime.strptime(found_date, '%Y-%m-%d %H:%M')
timestamp = int(datetime.timestamp(date_obj))
return timestamp
def get_pubmed_fileindex(url):
filename = os.path.basename(url)
digits_in_filename = [ c for c in filename if c in string.digits ]
assert len(digits_in_filename) == 6, "Expected exactly 6 digits in filename: %s" % filename
file_index = int("".join(digits_in_filename))
return int(file_index)
accepted_out_formats = ['biocxml','txt']
def main():
parser = argparse.ArgumentParser(description='Tool to convert corpus between different formats')
parser.add_argument('--url',type=str,required=True,help="URL to PubMed GZipped XML file")
parser.add_argument('--o',type=str,required=True,help="Where to store resulting converted docs")
parser.add_argument('--oFormat',type=str,required=True,help="Format for output corpus. Options: %s" % "/".join(accepted_out_formats))
parser.add_argument('--db',action='store_true',help="Whether to output as an SQLite database")
args = parser.parse_args()
in_format = 'pubmedxml'
out_format = args.oFormat.lower()
if args.db:
assert out_format == 'biocxml', "Output format must be biocxml when storing to the database"
assert out_format in accepted_out_formats, "%s is not an accepted output format. Options are: %s" % (out_format, "/".join(accepted_out_formats))
file_index = get_pubmed_fileindex(args.url)
with tempfile.NamedTemporaryFile() as tf_pubmed, tempfile.NamedTemporaryFile() as tf_out:
print("Downloading...")
download_file_with_retries(args.url, tf_pubmed.name, check_md5=True)
out_file = tf_out.name if args.db else args.o
print("Converting...")
with gzip.open(tf_pubmed.name) as f:
convert([f],in_format,out_file,out_format)
if args.db:
saveDocumentsToDatabase(args.o,tf_out.name,is_fulltext=False,file_index=file_index)
print("Output to %s complete" % args.o)
if __name__ == '__main__':
main() | 0.217836 | 0.29438 |
from decapod_common import log
from decapod_common import playbook_plugin
from decapod_common import playbook_plugin_hints
from decapod_common.models import cluster_data
DESCRIPTION = "Add RBD Mirroring host"
"""Plugin description."""
HINTS_SCHEMA = {
"remote_username": {
"description": "Remote user keyring to use",
"typename": "string",
"type": "string",
"default_value": "admin"
},
"remote_clustername": {
"description": "Name of the remote cluster to use",
"typename": "string",
"type": "string",
"default_value": ""
},
"poolname": {
"description": "Name of the pool to setup mirroring",
"typename": "string",
"type": "string",
"default_value": ""
},
"add_peers": {
"description": "Add peers",
"typename": "boolean",
"type": "boolean",
"default_value": True
},
"ceph_version_verify": {
"description": "Verify Ceph version consistency on install",
"typename": "boolean",
"type": "boolean",
"default_value": True
}
}
"""Schema for playbook hints."""
LOG = log.getLogger(__name__)
"""Logger."""
class AddRbdmirror(playbook_plugin.CephAnsibleNewWithVerification):
NAME = "Add RBD Mirroring host"
DESCRIPTION = DESCRIPTION
HINTS = playbook_plugin_hints.Hints(HINTS_SCHEMA)
def on_pre_execute(self, task):
super().on_pre_execute(["rbdmirrors"], task)
playbook_config = self.get_playbook_configuration(task)
config = playbook_config.configuration["inventory"]
cluster = playbook_config.cluster
data = cluster_data.ClusterData.find_one(cluster.model_id)
hostvars = config.get("_meta", {}).get("hostvars", {})
for hostname, values in hostvars.items():
data.update_host_vars(hostname, values)
data.save()
def get_dynamic_inventory(self):
inventory = super().get_dynamic_inventory()
hostvars = inventory["_meta"]["hostvars"]
for data in hostvars.values():
data["ceph_rbd_mirror_configure"] = False
if "rbd_mirrors" not in data:
continue
reworked = {}
for usercluster, pools in data["rbd_mirrors"].items():
user, cluster = usercluster.split("@")
pool_list = reworked.setdefault(user, {})
for pool in pools:
pool_list.setdefault(cluster, []).append(pool)
data["rbd_mirrors"] = reworked
return inventory
def get_extra_vars(self, task):
extra_vars = super().get_extra_vars(task)
extra_vars.pop("ceph_rbd_mirror_configure", None)
extra_vars.setdefault("ceph_rbd_mirror_local_user", "admin")
return extra_vars
def make_global_vars(self, cluster, data, servers, hints):
base = super().make_global_vars(cluster, data, servers, hints)
base["add_peers"] = bool(hints["add_peers"])
return base
def make_inventory(self, cluster, data, servers, hints):
groups = self.get_inventory_groups(cluster, servers, hints)
inventory = {"_meta": {"hostvars": {}}}
for name, group_servers in groups.items():
for srv in group_servers:
inventory.setdefault(name, []).append(srv.ip)
hostvars = inventory["_meta"]["hostvars"].setdefault(
srv.ip, {})
hostvars.update(data.get_host_vars(srv.ip))
hostvars["ansible_user"] = srv.username
if name == "rbdmirrors":
self.update_hostvars(hostvars, srv, hints)
return inventory
def get_inventory_groups(self, cluster, servers, hints):
base = super().get_inventory_groups(cluster, servers, hints)
base["rbdmirrors"] = servers
return base
def update_hostvars(self, hostvars, srv, hints):
pools = hostvars.setdefault("rbd_mirrors", {})
mirror_for = "{0}@{1}".format(
hints["remote_username"], hints["remote_clustername"])
pool_list = set(pools.get(mirror_for, []))
pool_list.add(hints["poolname"])
pools[mirror_for] = sorted(pool_list)
hostvars["rbd_mirrors"] = pools | plugins/playbook/add_rbdmirror/decapod_plugin_playbook_add_rbdmirror/plugin.py | from decapod_common import log
from decapod_common import playbook_plugin
from decapod_common import playbook_plugin_hints
from decapod_common.models import cluster_data
DESCRIPTION = "Add RBD Mirroring host"
"""Plugin description."""
HINTS_SCHEMA = {
"remote_username": {
"description": "Remote user keyring to use",
"typename": "string",
"type": "string",
"default_value": "admin"
},
"remote_clustername": {
"description": "Name of the remote cluster to use",
"typename": "string",
"type": "string",
"default_value": ""
},
"poolname": {
"description": "Name of the pool to setup mirroring",
"typename": "string",
"type": "string",
"default_value": ""
},
"add_peers": {
"description": "Add peers",
"typename": "boolean",
"type": "boolean",
"default_value": True
},
"ceph_version_verify": {
"description": "Verify Ceph version consistency on install",
"typename": "boolean",
"type": "boolean",
"default_value": True
}
}
"""Schema for playbook hints."""
LOG = log.getLogger(__name__)
"""Logger."""
class AddRbdmirror(playbook_plugin.CephAnsibleNewWithVerification):
NAME = "Add RBD Mirroring host"
DESCRIPTION = DESCRIPTION
HINTS = playbook_plugin_hints.Hints(HINTS_SCHEMA)
def on_pre_execute(self, task):
super().on_pre_execute(["rbdmirrors"], task)
playbook_config = self.get_playbook_configuration(task)
config = playbook_config.configuration["inventory"]
cluster = playbook_config.cluster
data = cluster_data.ClusterData.find_one(cluster.model_id)
hostvars = config.get("_meta", {}).get("hostvars", {})
for hostname, values in hostvars.items():
data.update_host_vars(hostname, values)
data.save()
def get_dynamic_inventory(self):
inventory = super().get_dynamic_inventory()
hostvars = inventory["_meta"]["hostvars"]
for data in hostvars.values():
data["ceph_rbd_mirror_configure"] = False
if "rbd_mirrors" not in data:
continue
reworked = {}
for usercluster, pools in data["rbd_mirrors"].items():
user, cluster = usercluster.split("@")
pool_list = reworked.setdefault(user, {})
for pool in pools:
pool_list.setdefault(cluster, []).append(pool)
data["rbd_mirrors"] = reworked
return inventory
def get_extra_vars(self, task):
extra_vars = super().get_extra_vars(task)
extra_vars.pop("ceph_rbd_mirror_configure", None)
extra_vars.setdefault("ceph_rbd_mirror_local_user", "admin")
return extra_vars
def make_global_vars(self, cluster, data, servers, hints):
base = super().make_global_vars(cluster, data, servers, hints)
base["add_peers"] = bool(hints["add_peers"])
return base
def make_inventory(self, cluster, data, servers, hints):
groups = self.get_inventory_groups(cluster, servers, hints)
inventory = {"_meta": {"hostvars": {}}}
for name, group_servers in groups.items():
for srv in group_servers:
inventory.setdefault(name, []).append(srv.ip)
hostvars = inventory["_meta"]["hostvars"].setdefault(
srv.ip, {})
hostvars.update(data.get_host_vars(srv.ip))
hostvars["ansible_user"] = srv.username
if name == "rbdmirrors":
self.update_hostvars(hostvars, srv, hints)
return inventory
def get_inventory_groups(self, cluster, servers, hints):
base = super().get_inventory_groups(cluster, servers, hints)
base["rbdmirrors"] = servers
return base
def update_hostvars(self, hostvars, srv, hints):
pools = hostvars.setdefault("rbd_mirrors", {})
mirror_for = "{0}@{1}".format(
hints["remote_username"], hints["remote_clustername"])
pool_list = set(pools.get(mirror_for, []))
pool_list.add(hints["poolname"])
pools[mirror_for] = sorted(pool_list)
hostvars["rbd_mirrors"] = pools | 0.719088 | 0.259126 |
from univers.gem import GemRequirement
from univers.gem import GemVersion
from univers.gem import InvalidVersionError
def assert_bumped_version_equal(expected, unbumped):
# Assert that bumping the +unbumped+ version yields the +expected+.
assert_version_eql(expected, GemVersion(unbumped).bump())
def test_bump():
assert_bumped_version_equal("5.3", "5.2.4")
def test_bump_alpha():
assert_bumped_version_equal("5.3", "5.2.4.a")
def test_bump_alphanumeric():
assert_bumped_version_equal("5.3", "5.2.4.a10")
def test_bump_trailing_zeros():
assert_bumped_version_equal("5.1", "5.0.0")
def test_bump_one_level():
assert_bumped_version_equal("6", "5")
def test_eql_is_same():
assert_version_eql("1.2", "1.2")
assert_version_strict_equal("1.2", "1.2")
refute_version_eql("1.2", "1.3")
refute_version_strict_equal("1.2", "1.3")
refute_version_strict_equal("1.2", "1.2.0")
assert_version_eql("1.2", "1.2.0")
assert_version_eql("1.2.b1", "1.2.b.1")
refute_version_strict_equal("1.2.b1", "1.2.b.1")
refute_version_strict_equal("1.2.pre.1", "1.2.0.pre.1.0")
assert_version_eql("1.2.pre.1", "1.2.0.pre.1.0")
def test_initialize():
for good in ["1.0", "1.0 ", " 1.0 ", "1.0\n", "\n1.0\n", "1.0"]:
assert_version_eql("1.0", good)
assert_version_eql("1", 1)
def test_initialize_invalid():
invalid_versions = [
"whatever",
"junk",
"1.0\n2.0" "1..2",
"1.2\ 3.4",
]
# DON'T TOUCH THIS WITHOUT CHECKING CVE-2013-4287
invalid_versions += ["2.3422222.222.222222222.22222.ads0as.dasd0.ddd2222.2.qd3e."]
for invalid in invalid_versions:
try:
GemVersion(invalid)
raise Exception(f"exception not raised for: {invalid!r}")
except InvalidVersionError:
pass
def test_empty_version():
assert GemVersion("").version == "0"
assert GemVersion(" ").version == "0"
assert GemVersion(" ").version == "0"
def test_prerelease():
assert_prerelease("1.2.0.a")
assert_prerelease("2.9.b")
assert_prerelease("192.168.3.11.d")
assert_prerelease("1.2.d.42")
assert_prerelease("1.A")
assert_prerelease("1-1")
assert_prerelease("1-a")
refute_prerelease("1.2.0")
refute_prerelease("2.9")
refute_prerelease("192.168.3.11")
def test_release():
assert_release_equal("1.2.0", "1.2.0.a")
assert_release_equal("1.1", "1.1.rc10")
assert_release_equal("1.9.3", "1.9.3.alpha.5")
assert_release_equal("1.9.3", "1.9.3")
def test_spaceship_cmp():
def cmp(a, b):
return a.__cmp__(b)
# Ruby spaceship <=> is the same as Python legacy cmp()
assert cmp(GemVersion("1.0"), GemVersion("1.0.0")) == 0
assert cmp(GemVersion("1.0"), GemVersion("1.0.a")) == 1
assert cmp(GemVersion("1.8.2"), GemVersion("0.0.0")) == 1
assert cmp(GemVersion("1.8.2"), GemVersion("1.8.2.a")) == 1
assert cmp(GemVersion("1.8.2.b"), GemVersion("1.8.2.a")) == 1
assert cmp(GemVersion("1.8.2.a"), GemVersion("1.8.2")) == -1
assert cmp(GemVersion("1.8.2.a10"), GemVersion("1.8.2.a9")) == 1
assert cmp(GemVersion(""), GemVersion("0")) == 0
assert cmp(GemVersion("0.beta.1"), GemVersion("0.0.beta.1")) == 0
assert cmp(GemVersion("0.0.beta"), GemVersion("0.0.beta.1")) == -1
assert cmp(GemVersion("0.0.beta"), GemVersion("0.beta.1")) == -1
assert cmp(GemVersion("5.a"), GemVersion("5.0.0.rc2")) == -1
assert cmp(GemVersion("5.x"), GemVersion("5.0.0.rc2")) == 1
def assert_version_satisfies_requirement(requirement, version):
# Assert that +version+ satisfies the "approximate" ~> +requirement+.
req = GemRequirement.create(requirement)
ver = GemVersion(version)
assert req.satisfied_by(ver)
def test_satisfies_requirement():
assert_version_satisfies_requirement("~> 1.0", "1")
assert_version_satisfies_requirement("~> 1.0", "1.0")
assert_version_satisfies_requirement("~> 1.2", "1.2")
assert_version_satisfies_requirement("~> 1.2", "1.2.0")
assert_version_satisfies_requirement("~> 1.2", "1.2.3")
assert_version_satisfies_requirement("~> 1.2.a", "1.2.3.a.4")
assert_version_satisfies_requirement("~> 1.9.a", "1.9.0.dev")
def test_to_s():
assert GemVersion("5.2.4").to_string() == "5.2.4"
def test_compare():
assert GemVersion("0.0.1.0") > GemVersion("0.0.0.1")
assert not GemVersion("0.0.1.0") < GemVersion("0.0.0.1")
assert GemVersion("0.0.1.0") >= GemVersion("0.0.0.1")
assert not GemVersion("0.0.1.0") <= GemVersion("0.0.0.1")
def test_semver():
assert_less_than("1.0.0-alpha", "1.0.0-alpha.1")
assert_less_than("1.0.0-alpha.1", "1.0.0-beta.2")
assert_less_than("1.0.0-beta.2", "1.0.0-beta.11")
assert_less_than("1.0.0-beta.11", "1.0.0-rc.1")
assert_less_than("1.0.0-rc1", "1.0.0")
assert_less_than("1.0.0-1", "1")
def test_segments():
# modifying the segments of a version should not affect the segments of the cached version object
ver = GemVersion("9.8.7")
secondseg = ver.segments[2]
secondseg += 1
refute_version_eql("9.8.8", "9.8.7")
assert GemVersion("9.8.7").segments == [9, 8, 7]
def test_split_segments():
assert GemVersion("3.2.4-2").split_segments() == ([3, 2, 4], ["pre", 2])
def test_canonical_segments():
assert GemVersion("1.0.0").canonical_segments == [1]
assert GemVersion("1.0.0.a.1.0").canonical_segments == [1, "a", 1]
assert GemVersion("1.2.3-1").canonical_segments == [1, 2, 3, "pre", 1]
def test_frozen_version():
ver = GemVersion("1.test")
assert_less_than(ver, GemVersion("1"))
assert_version_eql(GemVersion("1"), ver.release())
assert_version_eql(GemVersion("2"), ver.bump())
def assert_prerelease(version):
# Asserts that +version+ is a prerelease.
assert GemVersion(version).prerelease(), "#{version} is a prerelease"
def assert_release_equal(release, version):
# Assert that +release+ is the correct non-prerelease +version+.
assert_version_eql(release, GemVersion(version).release())
def assert_version_eql(first, second):
# Assert that two versions are eql?. Checks both directions.
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first == second
assert second == first
def refute_version_eql(first, second):
# Refute the assumption that two versions are eql?. Checks both
# directions.
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first != second
assert second != first
def assert_version_strict_equal(first, second):
# Assert that two versions are strictly equal
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first.equal_strictly(second)
assert second.equal_strictly(first)
def refute_version_strict_equal(first, second):
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert not first.equal_strictly(second)
assert not second.equal_strictly(first)
def assert_less_than(left, right):
assert GemVersion(left) < GemVersion(right)
def refute_prerelease(version):
# Refute the assumption that +version+ is a prerelease.
assert not GemVersion(version).prerelease() | tests/test_rubygems_gem_version.py |
from univers.gem import GemRequirement
from univers.gem import GemVersion
from univers.gem import InvalidVersionError
def assert_bumped_version_equal(expected, unbumped):
# Assert that bumping the +unbumped+ version yields the +expected+.
assert_version_eql(expected, GemVersion(unbumped).bump())
def test_bump():
assert_bumped_version_equal("5.3", "5.2.4")
def test_bump_alpha():
assert_bumped_version_equal("5.3", "5.2.4.a")
def test_bump_alphanumeric():
assert_bumped_version_equal("5.3", "5.2.4.a10")
def test_bump_trailing_zeros():
assert_bumped_version_equal("5.1", "5.0.0")
def test_bump_one_level():
assert_bumped_version_equal("6", "5")
def test_eql_is_same():
assert_version_eql("1.2", "1.2")
assert_version_strict_equal("1.2", "1.2")
refute_version_eql("1.2", "1.3")
refute_version_strict_equal("1.2", "1.3")
refute_version_strict_equal("1.2", "1.2.0")
assert_version_eql("1.2", "1.2.0")
assert_version_eql("1.2.b1", "1.2.b.1")
refute_version_strict_equal("1.2.b1", "1.2.b.1")
refute_version_strict_equal("1.2.pre.1", "1.2.0.pre.1.0")
assert_version_eql("1.2.pre.1", "1.2.0.pre.1.0")
def test_initialize():
for good in ["1.0", "1.0 ", " 1.0 ", "1.0\n", "\n1.0\n", "1.0"]:
assert_version_eql("1.0", good)
assert_version_eql("1", 1)
def test_initialize_invalid():
invalid_versions = [
"whatever",
"junk",
"1.0\n2.0" "1..2",
"1.2\ 3.4",
]
# DON'T TOUCH THIS WITHOUT CHECKING CVE-2013-4287
invalid_versions += ["2.3422222.222.222222222.22222.ads0as.dasd0.ddd2222.2.qd3e."]
for invalid in invalid_versions:
try:
GemVersion(invalid)
raise Exception(f"exception not raised for: {invalid!r}")
except InvalidVersionError:
pass
def test_empty_version():
assert GemVersion("").version == "0"
assert GemVersion(" ").version == "0"
assert GemVersion(" ").version == "0"
def test_prerelease():
assert_prerelease("1.2.0.a")
assert_prerelease("2.9.b")
assert_prerelease("192.168.3.11.d")
assert_prerelease("1.2.d.42")
assert_prerelease("1.A")
assert_prerelease("1-1")
assert_prerelease("1-a")
refute_prerelease("1.2.0")
refute_prerelease("2.9")
refute_prerelease("192.168.3.11")
def test_release():
assert_release_equal("1.2.0", "1.2.0.a")
assert_release_equal("1.1", "1.1.rc10")
assert_release_equal("1.9.3", "1.9.3.alpha.5")
assert_release_equal("1.9.3", "1.9.3")
def test_spaceship_cmp():
def cmp(a, b):
return a.__cmp__(b)
# Ruby spaceship <=> is the same as Python legacy cmp()
assert cmp(GemVersion("1.0"), GemVersion("1.0.0")) == 0
assert cmp(GemVersion("1.0"), GemVersion("1.0.a")) == 1
assert cmp(GemVersion("1.8.2"), GemVersion("0.0.0")) == 1
assert cmp(GemVersion("1.8.2"), GemVersion("1.8.2.a")) == 1
assert cmp(GemVersion("1.8.2.b"), GemVersion("1.8.2.a")) == 1
assert cmp(GemVersion("1.8.2.a"), GemVersion("1.8.2")) == -1
assert cmp(GemVersion("1.8.2.a10"), GemVersion("1.8.2.a9")) == 1
assert cmp(GemVersion(""), GemVersion("0")) == 0
assert cmp(GemVersion("0.beta.1"), GemVersion("0.0.beta.1")) == 0
assert cmp(GemVersion("0.0.beta"), GemVersion("0.0.beta.1")) == -1
assert cmp(GemVersion("0.0.beta"), GemVersion("0.beta.1")) == -1
assert cmp(GemVersion("5.a"), GemVersion("5.0.0.rc2")) == -1
assert cmp(GemVersion("5.x"), GemVersion("5.0.0.rc2")) == 1
def assert_version_satisfies_requirement(requirement, version):
# Assert that +version+ satisfies the "approximate" ~> +requirement+.
req = GemRequirement.create(requirement)
ver = GemVersion(version)
assert req.satisfied_by(ver)
def test_satisfies_requirement():
assert_version_satisfies_requirement("~> 1.0", "1")
assert_version_satisfies_requirement("~> 1.0", "1.0")
assert_version_satisfies_requirement("~> 1.2", "1.2")
assert_version_satisfies_requirement("~> 1.2", "1.2.0")
assert_version_satisfies_requirement("~> 1.2", "1.2.3")
assert_version_satisfies_requirement("~> 1.2.a", "1.2.3.a.4")
assert_version_satisfies_requirement("~> 1.9.a", "1.9.0.dev")
def test_to_s():
assert GemVersion("5.2.4").to_string() == "5.2.4"
def test_compare():
assert GemVersion("0.0.1.0") > GemVersion("0.0.0.1")
assert not GemVersion("0.0.1.0") < GemVersion("0.0.0.1")
assert GemVersion("0.0.1.0") >= GemVersion("0.0.0.1")
assert not GemVersion("0.0.1.0") <= GemVersion("0.0.0.1")
def test_semver():
assert_less_than("1.0.0-alpha", "1.0.0-alpha.1")
assert_less_than("1.0.0-alpha.1", "1.0.0-beta.2")
assert_less_than("1.0.0-beta.2", "1.0.0-beta.11")
assert_less_than("1.0.0-beta.11", "1.0.0-rc.1")
assert_less_than("1.0.0-rc1", "1.0.0")
assert_less_than("1.0.0-1", "1")
def test_segments():
# modifying the segments of a version should not affect the segments of the cached version object
ver = GemVersion("9.8.7")
secondseg = ver.segments[2]
secondseg += 1
refute_version_eql("9.8.8", "9.8.7")
assert GemVersion("9.8.7").segments == [9, 8, 7]
def test_split_segments():
assert GemVersion("3.2.4-2").split_segments() == ([3, 2, 4], ["pre", 2])
def test_canonical_segments():
assert GemVersion("1.0.0").canonical_segments == [1]
assert GemVersion("1.0.0.a.1.0").canonical_segments == [1, "a", 1]
assert GemVersion("1.2.3-1").canonical_segments == [1, 2, 3, "pre", 1]
def test_frozen_version():
ver = GemVersion("1.test")
assert_less_than(ver, GemVersion("1"))
assert_version_eql(GemVersion("1"), ver.release())
assert_version_eql(GemVersion("2"), ver.bump())
def assert_prerelease(version):
# Asserts that +version+ is a prerelease.
assert GemVersion(version).prerelease(), "#{version} is a prerelease"
def assert_release_equal(release, version):
# Assert that +release+ is the correct non-prerelease +version+.
assert_version_eql(release, GemVersion(version).release())
def assert_version_eql(first, second):
# Assert that two versions are eql?. Checks both directions.
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first == second
assert second == first
def refute_version_eql(first, second):
# Refute the assumption that two versions are eql?. Checks both
# directions.
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first != second
assert second != first
def assert_version_strict_equal(first, second):
# Assert that two versions are strictly equal
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert first.equal_strictly(second)
assert second.equal_strictly(first)
def refute_version_strict_equal(first, second):
first = GemVersion(first)
second = GemVersion(second)
assert first is not second
assert not first.equal_strictly(second)
assert not second.equal_strictly(first)
def assert_less_than(left, right):
assert GemVersion(left) < GemVersion(right)
def refute_prerelease(version):
# Refute the assumption that +version+ is a prerelease.
assert not GemVersion(version).prerelease() | 0.747708 | 0.640397 |
import argparse
import json
import requests
#### Gather CLI arguments
# Requres a PR message to be passed in as a text file to --prmessage
parser = argparse.ArgumentParser()
parser.add_argument("--prmessage", help="File path to a newline separated PR description", required=True)
parser.add_argument("--outputfile", help="Name of JSON file to output results", default="output.json")
args = parser.parse_args()
pr_input_file = open(args.prmessage).read().splitlines()
#### Set up variables for format check
bug_formats = ['bugfix', 'Bugfix', 'bug', 'Bug']
feature_formats = ['feature', 'Feature']
# Track failure count and tracker
fail_count = 0
fail_list = []
# Set up Output format
parsed_pr = dict()
parsed_pr['featureorbugfix'] = ""
parsed_pr['featurenew'] = []
parsed_pr['bugfixnew'] = []
parsed_pr['ticketcustomer'] = []
parsed_pr['successorfailure'] = "Success"
print('Line check results:')
#### Check the PR Message
for x in pr_input_file:
y = x.split(' - ')
# print(y)
# print(len(y))
# Checking for which format is used
if len(y)==3:
print(len(y))
for check in bug_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Bugfix'
parsed_pr['bugfixnew'].append(y[2])
parsed_pr['ticketcustomer'].append(y[0])
for check in feature_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Feature'
parsed_pr['featurenew'].append(y[2])
parsed_pr['ticketcustomer'].append(y[0])
elif len(y)==2:
print(len(y))
for check in bug_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Bugfix'
parsed_pr['bugfixnew'].append(y[1])
for check in feature_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Feature'
parsed_pr['featurenew'].append(y[1])
elif len(y)>=4:
print(len(y))
fail_count = fail_count + 1
fails = dict()
fails['reason'] = "Too many fields supplied"
fails['offending_entry'] = y
fail_list.append(fails)
elif len(y)==1 and not y[0].strip():
print('Skipping empty line')
elif len(y)==1:
print(len(y))
fail_count = fail_count + 1
fails = dict()
fails['reason'] = "Only one field supplied"
fails['offending_entry'] = y
fail_list.append(fails)
else:
print(y + 'is not a valid line, but we wont count it as a failure')
print()
#### Output
if fail_count>0:
print('Number of failures:' + str(fail_count))
fail_response = dict()
fail_response['successorfailure'] = "Failure"
fail_response['failure_list'] = fail_list
print(json.dumps(fail_response))
with open(args.outputfile, 'w') as f:
json.dump(fail_response, f)
else:
print(json.dumps(parsed_pr))
with open(args.outputfile, 'w') as f:
json.dump(parsed_pr, f)
#print(parsed_pr)
#print(type(pr_input_file))
#print(pr_input_file) | tryodaf-check-pr.py | import argparse
import json
import requests
#### Gather CLI arguments
# Requres a PR message to be passed in as a text file to --prmessage
parser = argparse.ArgumentParser()
parser.add_argument("--prmessage", help="File path to a newline separated PR description", required=True)
parser.add_argument("--outputfile", help="Name of JSON file to output results", default="output.json")
args = parser.parse_args()
pr_input_file = open(args.prmessage).read().splitlines()
#### Set up variables for format check
bug_formats = ['bugfix', 'Bugfix', 'bug', 'Bug']
feature_formats = ['feature', 'Feature']
# Track failure count and tracker
fail_count = 0
fail_list = []
# Set up Output format
parsed_pr = dict()
parsed_pr['featureorbugfix'] = ""
parsed_pr['featurenew'] = []
parsed_pr['bugfixnew'] = []
parsed_pr['ticketcustomer'] = []
parsed_pr['successorfailure'] = "Success"
print('Line check results:')
#### Check the PR Message
for x in pr_input_file:
y = x.split(' - ')
# print(y)
# print(len(y))
# Checking for which format is used
if len(y)==3:
print(len(y))
for check in bug_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Bugfix'
parsed_pr['bugfixnew'].append(y[2])
parsed_pr['ticketcustomer'].append(y[0])
for check in feature_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Feature'
parsed_pr['featurenew'].append(y[2])
parsed_pr['ticketcustomer'].append(y[0])
elif len(y)==2:
print(len(y))
for check in bug_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Bugfix'
parsed_pr['bugfixnew'].append(y[1])
for check in feature_formats:
if check in y:
parsed_pr['featureorbugfix'] = 'Feature'
parsed_pr['featurenew'].append(y[1])
elif len(y)>=4:
print(len(y))
fail_count = fail_count + 1
fails = dict()
fails['reason'] = "Too many fields supplied"
fails['offending_entry'] = y
fail_list.append(fails)
elif len(y)==1 and not y[0].strip():
print('Skipping empty line')
elif len(y)==1:
print(len(y))
fail_count = fail_count + 1
fails = dict()
fails['reason'] = "Only one field supplied"
fails['offending_entry'] = y
fail_list.append(fails)
else:
print(y + 'is not a valid line, but we wont count it as a failure')
print()
#### Output
if fail_count>0:
print('Number of failures:' + str(fail_count))
fail_response = dict()
fail_response['successorfailure'] = "Failure"
fail_response['failure_list'] = fail_list
print(json.dumps(fail_response))
with open(args.outputfile, 'w') as f:
json.dump(fail_response, f)
else:
print(json.dumps(parsed_pr))
with open(args.outputfile, 'w') as f:
json.dump(parsed_pr, f)
#print(parsed_pr)
#print(type(pr_input_file))
#print(pr_input_file) | 0.051 | 0.164684 |
import json
import logging
import logging.config
import os
import sys
from pathlib import Path
import pretty_errors # NOQA: F401 (imported but unused)
from rich.logging import RichHandler
# Configuration
NOCACHE = os.environ.get("SOCCERDATA_NOCACHE", 'False').lower() in ('true', '1', 't')
NOSTORE = os.environ.get("SOCCERDATA_NOSTORE", 'False').lower() in ('true', '1', 't')
LOGLEVEL = os.environ.get('SOCCERDATA_LOGLEVEL', 'INFO').upper()
# Directories
BASE_DIR = Path(os.environ.get("SOCCERDATA_DIR", Path.home() / "soccerdata"))
LOGS_DIR = Path(BASE_DIR, "logs")
DATA_DIR = Path(BASE_DIR, "data")
CONFIG_DIR = Path(BASE_DIR, "config")
# Create dirs
LOGS_DIR.mkdir(parents=True, exist_ok=True)
DATA_DIR.mkdir(parents=True, exist_ok=True)
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Logger
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"minimal": {"format": "%(message)s"},
"detailed": {
"format": "%(levelname)s %(asctime)s [%(filename)s:%(funcName)s:%(lineno)d]\n%(message)s\n" # noqa: E501
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "minimal",
"level": logging.DEBUG,
},
"info": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "info.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.INFO,
},
"error": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "error.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.ERROR,
},
},
"loggers": {
"root": {
"handlers": ["console", "info", "error"],
"level": LOGLEVEL,
"propagate": True,
},
},
}
logging.config.dictConfig(logging_config)
logger = logging.getLogger("root")
logger.handlers[0] = RichHandler(markup=True)
# Team name replacements
TEAMNAME_REPLACEMENTS = {}
_f_custom_teamnname_replacements = CONFIG_DIR / "teamname_replacements.json"
if _f_custom_teamnname_replacements.is_file():
with open(_f_custom_teamnname_replacements, encoding='utf8') as json_file:
for team, to_replace_list in json.load(json_file).items():
for to_replace in to_replace_list:
TEAMNAME_REPLACEMENTS[to_replace] = team
logger.info("Custom team name replacements loaded from %s.", _f_custom_teamnname_replacements)
else:
logger.info(
"No custom team name replacements found. You can configure these in %s.",
_f_custom_teamnname_replacements,
)
# League dict
LEAGUE_DICT = {
"ENG-Premier League": {
"ClubElo": "ENG_1",
"MatchHistory": "E0",
"FiveThirtyEight": "premier-league",
"FBref": "Premier League",
"ESPN": "eng.1",
"SoFIFA": "English Premier League (1)",
"WhoScored": "England - Premier League",
"season_start": "Aug",
"season_end": "May",
},
"ESP-La Liga": {
"ClubElo": "ESP_1",
"MatchHistory": "SP1",
"FiveThirtyEight": "la-liga",
"FBref": "La Liga",
"ESPN": "esp.1",
"SoFIFA": "Spain Primera Division (1)",
"WhoScored": "Spain - LaLiga",
"season_start": "Aug",
"season_end": "May",
},
"ITA-Serie A": {
"ClubElo": "ITA_1",
"MatchHistory": "I1",
"FiveThirtyEight": "serie-a",
"FBref": "Serie A",
"ESPN": "ita.1",
"SoFIFA": " Italian Serie A (1)",
"WhoScored": "Italy - Serie A",
"season_start": "Aug",
"season_end": "May",
},
"GER-Bundesliga": {
"ClubElo": "GER_1",
"MatchHistory": "D1",
"FiveThirtyEight": "bundesliga",
"FBref": "Fußball-Bundesliga",
"ESPN": "ger.1",
"SoFIFA": "German 1. Bundesliga (1)",
"WhoScored": "Germany - Bundesliga",
"season_start": "Aug",
"season_end": "May",
},
"FRA-Ligue 1": {
"ClubElo": "FRA_1",
"MatchHistory": "F1",
"FiveThirtyEight": "ligue-1",
"FBref": "Ligue 1",
"ESPN": "fra.1",
"SoFIFA": "French Ligue 1 (1)",
"WhoScored": "France - Ligue 1",
"season_start": "Aug",
"season_end": "May",
},
}
_f_custom_league_dict = CONFIG_DIR / "league_dict.json"
if _f_custom_league_dict.is_file():
with open(_f_custom_league_dict, encoding='utf8') as json_file:
LEAGUE_DICT = {**LEAGUE_DICT, **json.load(json_file)}
logger.info("Custom league dict loaded from %s.", _f_custom_league_dict)
else:
logger.info(
"No custom league dict found. You can configure additional leagues in %s.",
_f_custom_league_dict,
) | soccerdata/_config.py |
import json
import logging
import logging.config
import os
import sys
from pathlib import Path
import pretty_errors # NOQA: F401 (imported but unused)
from rich.logging import RichHandler
# Configuration
NOCACHE = os.environ.get("SOCCERDATA_NOCACHE", 'False').lower() in ('true', '1', 't')
NOSTORE = os.environ.get("SOCCERDATA_NOSTORE", 'False').lower() in ('true', '1', 't')
LOGLEVEL = os.environ.get('SOCCERDATA_LOGLEVEL', 'INFO').upper()
# Directories
BASE_DIR = Path(os.environ.get("SOCCERDATA_DIR", Path.home() / "soccerdata"))
LOGS_DIR = Path(BASE_DIR, "logs")
DATA_DIR = Path(BASE_DIR, "data")
CONFIG_DIR = Path(BASE_DIR, "config")
# Create dirs
LOGS_DIR.mkdir(parents=True, exist_ok=True)
DATA_DIR.mkdir(parents=True, exist_ok=True)
CONFIG_DIR.mkdir(parents=True, exist_ok=True)
# Logger
logging_config = {
"version": 1,
"disable_existing_loggers": False,
"formatters": {
"minimal": {"format": "%(message)s"},
"detailed": {
"format": "%(levelname)s %(asctime)s [%(filename)s:%(funcName)s:%(lineno)d]\n%(message)s\n" # noqa: E501
},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"stream": sys.stdout,
"formatter": "minimal",
"level": logging.DEBUG,
},
"info": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "info.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.INFO,
},
"error": {
"class": "logging.handlers.RotatingFileHandler",
"filename": Path(LOGS_DIR, "error.log"),
"maxBytes": 10485760, # 1 MB
"backupCount": 10,
"formatter": "detailed",
"level": logging.ERROR,
},
},
"loggers": {
"root": {
"handlers": ["console", "info", "error"],
"level": LOGLEVEL,
"propagate": True,
},
},
}
logging.config.dictConfig(logging_config)
logger = logging.getLogger("root")
logger.handlers[0] = RichHandler(markup=True)
# Team name replacements
TEAMNAME_REPLACEMENTS = {}
_f_custom_teamnname_replacements = CONFIG_DIR / "teamname_replacements.json"
if _f_custom_teamnname_replacements.is_file():
with open(_f_custom_teamnname_replacements, encoding='utf8') as json_file:
for team, to_replace_list in json.load(json_file).items():
for to_replace in to_replace_list:
TEAMNAME_REPLACEMENTS[to_replace] = team
logger.info("Custom team name replacements loaded from %s.", _f_custom_teamnname_replacements)
else:
logger.info(
"No custom team name replacements found. You can configure these in %s.",
_f_custom_teamnname_replacements,
)
# League dict
LEAGUE_DICT = {
"ENG-Premier League": {
"ClubElo": "ENG_1",
"MatchHistory": "E0",
"FiveThirtyEight": "premier-league",
"FBref": "Premier League",
"ESPN": "eng.1",
"SoFIFA": "English Premier League (1)",
"WhoScored": "England - Premier League",
"season_start": "Aug",
"season_end": "May",
},
"ESP-La Liga": {
"ClubElo": "ESP_1",
"MatchHistory": "SP1",
"FiveThirtyEight": "la-liga",
"FBref": "La Liga",
"ESPN": "esp.1",
"SoFIFA": "Spain Primera Division (1)",
"WhoScored": "Spain - LaLiga",
"season_start": "Aug",
"season_end": "May",
},
"ITA-Serie A": {
"ClubElo": "ITA_1",
"MatchHistory": "I1",
"FiveThirtyEight": "serie-a",
"FBref": "Serie A",
"ESPN": "ita.1",
"SoFIFA": " Italian Serie A (1)",
"WhoScored": "Italy - Serie A",
"season_start": "Aug",
"season_end": "May",
},
"GER-Bundesliga": {
"ClubElo": "GER_1",
"MatchHistory": "D1",
"FiveThirtyEight": "bundesliga",
"FBref": "Fußball-Bundesliga",
"ESPN": "ger.1",
"SoFIFA": "German 1. Bundesliga (1)",
"WhoScored": "Germany - Bundesliga",
"season_start": "Aug",
"season_end": "May",
},
"FRA-Ligue 1": {
"ClubElo": "FRA_1",
"MatchHistory": "F1",
"FiveThirtyEight": "ligue-1",
"FBref": "Ligue 1",
"ESPN": "fra.1",
"SoFIFA": "French Ligue 1 (1)",
"WhoScored": "France - Ligue 1",
"season_start": "Aug",
"season_end": "May",
},
}
_f_custom_league_dict = CONFIG_DIR / "league_dict.json"
if _f_custom_league_dict.is_file():
with open(_f_custom_league_dict, encoding='utf8') as json_file:
LEAGUE_DICT = {**LEAGUE_DICT, **json.load(json_file)}
logger.info("Custom league dict loaded from %s.", _f_custom_league_dict)
else:
logger.info(
"No custom league dict found. You can configure additional leagues in %s.",
_f_custom_league_dict,
) | 0.171963 | 0.198899 |
import hashlib
import unittest
from binascii import hexlify, unhexlify
from context import bitcoinutils
from bitcoinutils.setup import setup
from bitcoinutils.keys import PrivateKey, P2pkhAddress, P2shAddress, P2wpkhAddress
from bitcoinutils.constants import SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE, \
SIGHASH_ANYONECANPAY, TYPE_RELATIVE_TIMELOCK
from bitcoinutils.transactions import TxInput, TxOutput, Transaction, Sequence
from bitcoinutils.script import Script
class TestCreateP2wpkhTransaction(unittest.TestCase):
def setUp(self):
setup('testnet')
self.sk = PrivateKey.from_wif("<KEY>")
# n4bkvTyU1dVdzsrhWBqBw8fEMbHjJvtmJR
self.p2pkh_addr = self.sk.get_public_key().get_address()
# tb1ql5eh45als8sgdkt2drsl344q55g03sj2krzqe3
self.p2wpkh_addr = self.sk.get_public_key().get_segwit_address()
# P2PKH to P2WPKH
self.txin1 = TxInput("5a7b3aaa66d6b7b7abcdc9f1d05db4eee94a700297a319e19454e143875e1078", 0)
self.txout1 = TxOutput(0.0099, self.p2wpkh_addr.to_script_pub_key())
# P2WPKH to P2PKH
self.txin_spend = TxInput("b3ca1c4cc778380d1e5376a5517445104e46e97176e40741508a3b07a6483ad3", 0)
self.txin_spend_amount = 0.0099
self.txout2 = TxOutput(0.0098, self.p2pkh_addr.to_script_pub_key())
self.p2pkh_redeem_script = Script(['OP_DUP', 'OP_HASH160', self.p2pkh_addr.to_hash160(), 'OP_EQUALVERIFY',
'OP_CHECKSIG'])
# P2WPKH P2PKH to P2PKH
self.txin_spend_p2pkh = TxInput("1e2a5279c868d61fb2ff0b1c2b04aa3eff02cd74952a8b4e799532635a9132cc", 0)
self.txin_spend_p2pkh_amount = 0.01
self.txin_spend_p2wpkh = TxInput("fff39047310fbf04bdd0e0bc75dde4267ae4d25219d8ad95e0ca1cee907a60da", 0)
self.txin_spend_p2wpkh_amount = 0.0095
self.txout3 = TxOutput(0.0194, self.p2pkh_addr.to_script_pub_key())
# SIGHASH NONE type send
self.txin1_signone = TxInput("fb4c338a00a75d73f9a6bd203ed4bd8884edeb111fac25a7946d5df6562f1942", 0)
self.txin1_signone_amount = 0.01
self.txout1_signone = TxOutput(0.0080, self.p2pkh_addr.to_script_pub_key())
self.txout2_signone = TxOutput(0.0019, self.p2pkh_addr.to_script_pub_key())
# SIGHASH SINGLE type send
self.txin1_sigsingle = TxInput("b04909d4b5239a56d676c1d9d722f325a86878c9aa535915aa0df97df47cedeb", 0)
self.txin1_sigsingle_amount = 0.0193
self.txout1_sigsingle = TxOutput(0.01, self.p2pkh_addr.to_script_pub_key())
self.txout2_sigsingle = TxOutput(0.0092, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_ALL | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_all = TxInput("f67e97a2564dceed405e214843e3c954b47dd4f8b26ea48f82382f51f7626036", 0)
self.txin1_siganyonecanpay_all_amount = 0.0018
self.txin2_siganyonecanpay_all = TxInput("f4afddb77cd11a79bed059463085382c50d60c7f9e4075d8469cfe60040f68eb", 0)
self.txin2_siganyonecanpay_all_amount = 0.0018
self.txout1_siganyonecanpay_all = TxOutput(0.0018, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_all = TxOutput(0.0017, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_NONE | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_none = TxInput("d2ae5d4a3f390f108769139c9b5757846be6693b785c4e21eab777eec7289095", 0)
self.txin1_siganyonecanpay_none_amount = 0.009
self.txin2_siganyonecanpay_none = TxInput("ee5062d426677372e6de96e2eb47d572af5deaaef3ef225f3179dfa1ece3f4f5", 0)
self.txin2_siganyonecanpay_none_amount = 0.007
self.txout1_siganyonecanpay_none = TxOutput(0.008, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_none = TxOutput(0.007, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_SINGLE | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_single = TxInput("c7bb5672266c8a5b64fe91e953a9e23e3206e3b1a2ddc8e5999b607b82485042", 0)
self.txin1_siganyonecanpay_single_amount = 0.01
self.txout1_siganyonecanpay_single = TxOutput(0.005, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_single = TxOutput(0.0049, self.p2pkh_addr.to_script_pub_key())
# result
self.create_send_to_p2wpkh_result = "020000000178105e8743e15494e119a39702704ae9eeb45dd0f1c9cdabb7b7d666aa3a7b5a000000006b4830450221009ad68e1ecdd38d6abe515a52582a441a56f0fedb21816eb2f583183685da2eb502203c4fc7522ad7ab0c1854180cfd337e484ad3ba70d23bcf4380c6e2ff4e6e7985012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a546ffffffff01301b0f0000000000160014fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a00000000"
self.spend_p2pkh_result = "02000000000101d33a48a6073b8a504107e47671e9464e10457451a576531e0d3878c74c1ccab30000000000ffffffff0120f40e00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac0247304402201c7ec9b049daa99c78675810b5e36b0b61add3f84180eaeaa613f8525904bdc302204854830d463a4699b6d69e37c08b8d3c6158185d46499170cfcc24d4a9e9a37f012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.p2pkh_and_p2wpkh_to_p2pkh_result = "02000000000102cc32915a633295794e8b2a9574cd02ff3eaa042b1c0bffb21fd668c879522a1e000000006a47304402200fe842622e656a6780093f60b0597a36a57481611543a2e9576f9e8f1b34edb8022008ba063961c600834760037be20f45bbe077541c533b3fd257eae8e08d0de3b3012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a546ffffffffda607a90ee1ccae095add81952d2e47a26e4dd75bce0d0bd04bf0f314790f3ff0000000000ffffffff01209a1d00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac00024730440220274bb5445294033a36c360c48cc5e441ba8cc2bc1554dcb7d367088ec40a0d0302202a36f6e03f969e1b0c582f006257eec8fa2ada8cd34fe41ae2aa90d6728999d1012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_signone_send_result = "0200000000010142192f56f65d6d94a725ac1f11ebed8488bdd43e20bda6f9735da7008a334cfb0000000000ffffffff0200350c00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac30e60200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100d3e7d4fceb7cded91f5d09ef192b5308d325ead1047ee5972a62747b8a937da502205e6bdeebe048f7923be75e36b6d39a78891fccbf0084ac1445f27a77261a13c2022102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_sigsingle_send_result = "02000000000101ebed7cf47df90daa155953aac97868a825f322d7d9c176d6569a23b5d40949b00000000000ffffffff0240420f00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88acc0090e00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100e315efea11d21b0819425f164751e4bbdd20f7fee8b0ee949da466ee013b73b7022048cb056d4823272518023222b39cdead68dc3a9b1e60aae37a8dd5a5108d2a62032102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_all_send_result = "02000000000102366062f7512f38828fa46eb2f8d47db454c9e34348215e40edce4d56a2977ef60000000000ffffffffeb680f0460fe9c46d875409e7f0cd6502c3885304659d0be791ad17cb7ddaff40000000000ffffffff0220bf0200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac10980200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100b963e68c5d133c16c0bb9cdf82c2ace5acd5c03fc03a4572699ac2712bbe772202202075cf8e35d4093e71635c49844a009a16ff08b9ee2ff5876ef2f3bd17b93c63812102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a5460247304402206fb60dc79b5ca6c699d04ec96c4f196938332c2909fd17c04023ebcc7408f36e02202b071771a58c84e20b7bf1fcec05c0ef55c1100436a055bfcb2bf7ed1c0683a9012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_none_send_result = "02000000000102959028c7ee77b7ea214e5c783b69e66b8457579b9c136987100f393f4a5daed20000000000fffffffff5f4e3eca1df79315f22eff3aeea5daf72d547ebe296dee672736726d46250ee0000000000ffffffff0200350c00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac60ae0a00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac0247304402203bbcbd2003244e9ccde7f705d3017f3baa2cb2d47efb63ede7e39704eff3987702206932aa4b402de898ff2fd3b2182f344dc9051b4c326dacc07b1e59059042f3ad822102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54602483045022100bd00b75aaefc0ab139f44b5347a8159d7158ae911bbf5a76f7fe1b93e2b0f1d50220724189279c0c497e15b4e3e1d1291f0b15e3dc460e8b2adf8597dbbd3af32440012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_single_send_result = "02000000000101425048827b609b99e5c8dda2b1e306323ee2a953e991fe645b8a6c267256bbc70000000000ffffffff0220a10700000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac107a0700000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100ff22bf77115243a01f1c39eca2d3a222e1e176d272b3eab561b6d625af0ee21a0220520b07b72ba5ab11f33a0ed921aac29a05ad09cc65107f3931a25711679562b0832102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
def test_signed_send_to_p2wpkh(self):
# Non-segregated witness transaction
tx = Transaction([self.txin1], [self.txout1],witnesses = [])
sig = self.sk.sign_input(tx, 0, self.p2pkh_addr.to_script_pub_key())
pk = self.sk.get_public_key().to_hex()
self.txin1.script_sig = Script([sig, pk])
self.assertEqual(tx.serialize(), self.create_send_to_p2wpkh_result)
def test_spend_p2wpkh(self):
tx = Transaction([self.txin_spend], [self.txout2], has_segwit=True,witnesses = [])
sig = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin_spend_amount)
pk = self.sk.get_public_key().to_hex()
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.spend_p2pkh_result)
def test_p2pkh_and_p2wpkh_to_p2pkh(self):
tx = Transaction([self.txin_spend_p2pkh, self.txin_spend_p2wpkh], [self.txout3], has_segwit=True,witnesses = [])
# spend_p2pkh
sig1 = self.sk.sign_input(tx, 0, self.p2pkh_addr.to_script_pub_key())
pk1 = self.sk.get_public_key().to_hex()
self.txin_spend_p2pkh.script_sig = Script([sig1, pk1])
tx.witnesses.append(Script([]))
# spend_p2wpkh
sig2 = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin_spend_p2wpkh_amount)
pk2 = self.sk.get_public_key().to_hex()
tx.witnesses.append(Script([sig2, pk2]))
self.assertEqual(tx.serialize(), self.p2pkh_and_p2wpkh_to_p2pkh_result)
def test_signone_send(self):
"""
SIGHASH_NONE:signs all of the inputs
"""
# First, only txin1 and txout1 are added to the transaction.
tx = Transaction([self.txin1_signone], [self.txout1_signone], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_signone_amount,
SIGHASH_NONE)
tx.witnesses.append(Script([sig_signone, pk]))
# Adding additional output signatures will not be affected
tx.outputs.append(self.txout2_signone)
self.assertEqual(tx.serialize(), self.test_signone_send_result)
def test_sigsingle_send(self):
"""
SIGHASH_SINGLE:signs all inputs but only txin_index output
"""
tx = Transaction([self.txin1_sigsingle], [self.txout1_sigsingle], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_sigsingle_amount,
SIGHASH_SINGLE)
tx.witnesses.append(Script([sig_signone, pk]))
tx.outputs.append(self.txout2_sigsingle)
self.assertEqual(tx.serialize(), self.test_sigsingle_send_result)
def test_siganyonecanpay_all_send(self):
"""
SIGHASH_ALL | SIGHASH_ANYONECANPAY:signs all outputs but only txin_index input
"""
tx = Transaction([self.txin1_siganyonecanpay_all], [self.txout1_siganyonecanpay_all,self.txout2_siganyonecanpay_all], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_all_amount,
SIGHASH_ALL | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.inputs.append(self.txin2_siganyonecanpay_all)
sig = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin2_siganyonecanpay_all_amount,
SIGHASH_ALL)
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_all_send_result)
def test_siganyonecanpay_none_send(self):
"""
SIGHASH_NONE | SIGHASH_ANYONECANPAY:signs only the txin_index input
"""
tx = Transaction([self.txin1_siganyonecanpay_none], [self.txout1_siganyonecanpay_none], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_none_amount,
SIGHASH_NONE | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.inputs.append(self.txin2_siganyonecanpay_none)
tx.outputs.append(self.txout2_siganyonecanpay_none)
sig = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin2_siganyonecanpay_none_amount,
SIGHASH_ALL)
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_none_send_result)
def test_siganyonecanpay_single_send(self):
"""
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY:signs txin_index input and output
"""
tx = Transaction([self.txin1_siganyonecanpay_single], [self.txout1_siganyonecanpay_single], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_single_amount,
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.outputs.append(self.txout2_siganyonecanpay_single)
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_single_send_result)
if __name__ == '__main__':
unittest.main() | tests/test_p2wpkh_txs.py | import hashlib
import unittest
from binascii import hexlify, unhexlify
from context import bitcoinutils
from bitcoinutils.setup import setup
from bitcoinutils.keys import PrivateKey, P2pkhAddress, P2shAddress, P2wpkhAddress
from bitcoinutils.constants import SIGHASH_ALL, SIGHASH_NONE, SIGHASH_SINGLE, \
SIGHASH_ANYONECANPAY, TYPE_RELATIVE_TIMELOCK
from bitcoinutils.transactions import TxInput, TxOutput, Transaction, Sequence
from bitcoinutils.script import Script
class TestCreateP2wpkhTransaction(unittest.TestCase):
def setUp(self):
setup('testnet')
self.sk = PrivateKey.from_wif("<KEY>")
# n4bkvTyU1dVdzsrhWBqBw8fEMbHjJvtmJR
self.p2pkh_addr = self.sk.get_public_key().get_address()
# tb1ql5eh45als8sgdkt2drsl344q55g03sj2krzqe3
self.p2wpkh_addr = self.sk.get_public_key().get_segwit_address()
# P2PKH to P2WPKH
self.txin1 = TxInput("5a7b3aaa66d6b7b7abcdc9f1d05db4eee94a700297a319e19454e143875e1078", 0)
self.txout1 = TxOutput(0.0099, self.p2wpkh_addr.to_script_pub_key())
# P2WPKH to P2PKH
self.txin_spend = TxInput("b3ca1c4cc778380d1e5376a5517445104e46e97176e40741508a3b07a6483ad3", 0)
self.txin_spend_amount = 0.0099
self.txout2 = TxOutput(0.0098, self.p2pkh_addr.to_script_pub_key())
self.p2pkh_redeem_script = Script(['OP_DUP', 'OP_HASH160', self.p2pkh_addr.to_hash160(), 'OP_EQUALVERIFY',
'OP_CHECKSIG'])
# P2WPKH P2PKH to P2PKH
self.txin_spend_p2pkh = TxInput("1e2a5279c868d61fb2ff0b1c2b04aa3eff02cd74952a8b4e799532635a9132cc", 0)
self.txin_spend_p2pkh_amount = 0.01
self.txin_spend_p2wpkh = TxInput("fff39047310fbf04bdd0e0bc75dde4267ae4d25219d8ad95e0ca1cee907a60da", 0)
self.txin_spend_p2wpkh_amount = 0.0095
self.txout3 = TxOutput(0.0194, self.p2pkh_addr.to_script_pub_key())
# SIGHASH NONE type send
self.txin1_signone = TxInput("fb4c338a00a75d73f9a6bd203ed4bd8884edeb111fac25a7946d5df6562f1942", 0)
self.txin1_signone_amount = 0.01
self.txout1_signone = TxOutput(0.0080, self.p2pkh_addr.to_script_pub_key())
self.txout2_signone = TxOutput(0.0019, self.p2pkh_addr.to_script_pub_key())
# SIGHASH SINGLE type send
self.txin1_sigsingle = TxInput("b04909d4b5239a56d676c1d9d722f325a86878c9aa535915aa0df97df47cedeb", 0)
self.txin1_sigsingle_amount = 0.0193
self.txout1_sigsingle = TxOutput(0.01, self.p2pkh_addr.to_script_pub_key())
self.txout2_sigsingle = TxOutput(0.0092, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_ALL | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_all = TxInput("f67e97a2564dceed405e214843e3c954b47dd4f8b26ea48f82382f51f7626036", 0)
self.txin1_siganyonecanpay_all_amount = 0.0018
self.txin2_siganyonecanpay_all = TxInput("f4afddb77cd11a79bed059463085382c50d60c7f9e4075d8469cfe60040f68eb", 0)
self.txin2_siganyonecanpay_all_amount = 0.0018
self.txout1_siganyonecanpay_all = TxOutput(0.0018, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_all = TxOutput(0.0017, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_NONE | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_none = TxInput("d2ae5d4a3f390f108769139c9b5757846be6693b785c4e21eab777eec7289095", 0)
self.txin1_siganyonecanpay_none_amount = 0.009
self.txin2_siganyonecanpay_none = TxInput("ee5062d426677372e6de96e2eb47d572af5deaaef3ef225f3179dfa1ece3f4f5", 0)
self.txin2_siganyonecanpay_none_amount = 0.007
self.txout1_siganyonecanpay_none = TxOutput(0.008, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_none = TxOutput(0.007, self.p2pkh_addr.to_script_pub_key())
# SIGHASH_SINGLE | SIGHASH_ANYONECANPAY type send
self.txin1_siganyonecanpay_single = TxInput("c7bb5672266c8a5b64fe91e953a9e23e3206e3b1a2ddc8e5999b607b82485042", 0)
self.txin1_siganyonecanpay_single_amount = 0.01
self.txout1_siganyonecanpay_single = TxOutput(0.005, self.p2pkh_addr.to_script_pub_key())
self.txout2_siganyonecanpay_single = TxOutput(0.0049, self.p2pkh_addr.to_script_pub_key())
# result
self.create_send_to_p2wpkh_result = "020000000178105e8743e15494e119a39702704ae9eeb45dd0f1c9cdabb7b7d666aa3a7b5a000000006b4830450221009ad68e1ecdd38d6abe515a52582a441a56f0fedb21816eb2f583183685da2eb502203c4fc7522ad7ab0c1854180cfd337e484ad3ba70d23bcf4380c6e2ff4e6e7985012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a546ffffffff01301b0f0000000000160014fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a00000000"
self.spend_p2pkh_result = "02000000000101d33a48a6073b8a504107e47671e9464e10457451a576531e0d3878c74c1ccab30000000000ffffffff0120f40e00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac0247304402201c7ec9b049daa99c78675810b5e36b0b61add3f84180eaeaa613f8525904bdc302204854830d463a4699b6d69e37c08b8d3c6158185d46499170cfcc24d4a9e9a37f012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.p2pkh_and_p2wpkh_to_p2pkh_result = "02000000000102cc32915a633295794e8b2a9574cd02ff3eaa042b1c0bffb21fd668c879522a1e000000006a47304402200fe842622e656a6780093f60b0597a36a57481611543a2e9576f9e8f1b34edb8022008ba063961c600834760037be20f45bbe077541c533b3fd257eae8e08d0de3b3012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a546ffffffffda607a90ee1ccae095add81952d2e47a26e4dd75bce0d0bd04bf0f314790f3ff0000000000ffffffff01209a1d00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac00024730440220274bb5445294033a36c360c48cc5e441ba8cc2bc1554dcb7d367088ec40a0d0302202a36f6e03f969e1b0c582f006257eec8fa2ada8cd34fe41ae2aa90d6728999d1012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_signone_send_result = "0200000000010142192f56f65d6d94a725ac1f11ebed8488bdd43e20bda6f9735da7008a334cfb0000000000ffffffff0200350c00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac30e60200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100d3e7d4fceb7cded91f5d09ef192b5308d325ead1047ee5972a62747b8a937da502205e6bdeebe048f7923be75e36b6d39a78891fccbf0084ac1445f27a77261a13c2022102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_sigsingle_send_result = "02000000000101ebed7cf47df90daa155953aac97868a825f322d7d9c176d6569a23b5d40949b00000000000ffffffff0240420f00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88acc0090e00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100e315efea11d21b0819425f164751e4bbdd20f7fee8b0ee949da466ee013b73b7022048cb056d4823272518023222b39cdead68dc3a9b1e60aae37a8dd5a5108d2a62032102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_all_send_result = "02000000000102366062f7512f38828fa46eb2f8d47db454c9e34348215e40edce4d56a2977ef60000000000ffffffffeb680f0460fe9c46d875409e7f0cd6502c3885304659d0be791ad17cb7ddaff40000000000ffffffff0220bf0200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac10980200000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100b963e68c5d133c16c0bb9cdf82c2ace5acd5c03fc03a4572699ac2712bbe772202202075cf8e35d4093e71635c49844a009a16ff08b9ee2ff5876ef2f3bd17b93c63812102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a5460247304402206fb60dc79b5ca6c699d04ec96c4f196938332c2909fd17c04023ebcc7408f36e02202b071771a58c84e20b7bf1fcec05c0ef55c1100436a055bfcb2bf7ed1c0683a9012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_none_send_result = "02000000000102959028c7ee77b7ea214e5c783b69e66b8457579b9c136987100f393f4a5daed20000000000fffffffff5f4e3eca1df79315f22eff3aeea5daf72d547ebe296dee672736726d46250ee0000000000ffffffff0200350c00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac60ae0a00000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac0247304402203bbcbd2003244e9ccde7f705d3017f3baa2cb2d47efb63ede7e39704eff3987702206932aa4b402de898ff2fd3b2182f344dc9051b4c326dacc07b1e59059042f3ad822102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54602483045022100bd00b75aaefc0ab139f44b5347a8159d7158ae911bbf5a76f7fe1b93e2b0f1d50220724189279c0c497e15b4e3e1d1291f0b15e3dc460e8b2adf8597dbbd3af32440012102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
self.test_siganyonecanpay_single_send_result = "02000000000101425048827b609b99e5c8dda2b1e306323ee2a953e991fe645b8a6c267256bbc70000000000ffffffff0220a10700000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac107a0700000000001976a914fd337ad3bf81e086d96a68e1f8d6a0a510f8c24a88ac02483045022100ff22bf77115243a01f1c39eca2d3a222e1e176d272b3eab561b6d625af0ee21a0220520b07b72ba5ab11f33a0ed921aac29a05ad09cc65107f3931a25711679562b0832102d82c9860e36f15d7b72aa59e29347f951277c21cd4d34822acdeeadbcff8a54600000000"
def test_signed_send_to_p2wpkh(self):
# Non-segregated witness transaction
tx = Transaction([self.txin1], [self.txout1],witnesses = [])
sig = self.sk.sign_input(tx, 0, self.p2pkh_addr.to_script_pub_key())
pk = self.sk.get_public_key().to_hex()
self.txin1.script_sig = Script([sig, pk])
self.assertEqual(tx.serialize(), self.create_send_to_p2wpkh_result)
def test_spend_p2wpkh(self):
tx = Transaction([self.txin_spend], [self.txout2], has_segwit=True,witnesses = [])
sig = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin_spend_amount)
pk = self.sk.get_public_key().to_hex()
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.spend_p2pkh_result)
def test_p2pkh_and_p2wpkh_to_p2pkh(self):
tx = Transaction([self.txin_spend_p2pkh, self.txin_spend_p2wpkh], [self.txout3], has_segwit=True,witnesses = [])
# spend_p2pkh
sig1 = self.sk.sign_input(tx, 0, self.p2pkh_addr.to_script_pub_key())
pk1 = self.sk.get_public_key().to_hex()
self.txin_spend_p2pkh.script_sig = Script([sig1, pk1])
tx.witnesses.append(Script([]))
# spend_p2wpkh
sig2 = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin_spend_p2wpkh_amount)
pk2 = self.sk.get_public_key().to_hex()
tx.witnesses.append(Script([sig2, pk2]))
self.assertEqual(tx.serialize(), self.p2pkh_and_p2wpkh_to_p2pkh_result)
def test_signone_send(self):
"""
SIGHASH_NONE:signs all of the inputs
"""
# First, only txin1 and txout1 are added to the transaction.
tx = Transaction([self.txin1_signone], [self.txout1_signone], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_signone_amount,
SIGHASH_NONE)
tx.witnesses.append(Script([sig_signone, pk]))
# Adding additional output signatures will not be affected
tx.outputs.append(self.txout2_signone)
self.assertEqual(tx.serialize(), self.test_signone_send_result)
def test_sigsingle_send(self):
"""
SIGHASH_SINGLE:signs all inputs but only txin_index output
"""
tx = Transaction([self.txin1_sigsingle], [self.txout1_sigsingle], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_sigsingle_amount,
SIGHASH_SINGLE)
tx.witnesses.append(Script([sig_signone, pk]))
tx.outputs.append(self.txout2_sigsingle)
self.assertEqual(tx.serialize(), self.test_sigsingle_send_result)
def test_siganyonecanpay_all_send(self):
"""
SIGHASH_ALL | SIGHASH_ANYONECANPAY:signs all outputs but only txin_index input
"""
tx = Transaction([self.txin1_siganyonecanpay_all], [self.txout1_siganyonecanpay_all,self.txout2_siganyonecanpay_all], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_all_amount,
SIGHASH_ALL | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.inputs.append(self.txin2_siganyonecanpay_all)
sig = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin2_siganyonecanpay_all_amount,
SIGHASH_ALL)
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_all_send_result)
def test_siganyonecanpay_none_send(self):
"""
SIGHASH_NONE | SIGHASH_ANYONECANPAY:signs only the txin_index input
"""
tx = Transaction([self.txin1_siganyonecanpay_none], [self.txout1_siganyonecanpay_none], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_none_amount,
SIGHASH_NONE | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.inputs.append(self.txin2_siganyonecanpay_none)
tx.outputs.append(self.txout2_siganyonecanpay_none)
sig = self.sk.sign_segwit_input(tx, 1, self.p2pkh_redeem_script, self.txin2_siganyonecanpay_none_amount,
SIGHASH_ALL)
tx.witnesses.append(Script([sig, pk]))
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_none_send_result)
def test_siganyonecanpay_single_send(self):
"""
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY:signs txin_index input and output
"""
tx = Transaction([self.txin1_siganyonecanpay_single], [self.txout1_siganyonecanpay_single], has_segwit=True,witnesses = [])
pk = self.sk.get_public_key().to_hex()
sig_signone = self.sk.sign_segwit_input(tx, 0, self.p2pkh_redeem_script, self.txin1_siganyonecanpay_single_amount,
SIGHASH_SINGLE | SIGHASH_ANYONECANPAY)
tx.witnesses.append(Script([sig_signone, pk]))
tx.outputs.append(self.txout2_siganyonecanpay_single)
self.assertEqual(tx.serialize(), self.test_siganyonecanpay_single_send_result)
if __name__ == '__main__':
unittest.main() | 0.341692 | 0.266906 |
UNO_CARDS = [
":R1:549406471633371147", ":R2:549406503602356245", ":R3:549406530298970124", ":R4:549406528642220033", ":R5:549406529602846742", ":R6:549406531347808284", ":R7:549406528470253579", ":R8:549406531079372815", ":R9:549406531700129792", ":RR:549406530437644289", ":RS:549406531679158272", ":RP:549406530932310017",
":G1:549406885946589185", ":G2:549406888127889409", ":G3:549406890812112896", ":G4:549406892787630080", ":G5:549406894956216320", ":G6:549406897053368352", ":G7:549406899460898838", ":G8:549406901763309569", ":G9:549406903969513493", ":GR:549406910944641054", ":GS:549406911171395604", ":GP:549406911410208768",
":B1:549406714248822786", ":B2:549406716912074772", ":B3:549406755847798787", ":B4:549406720854720533", ":B5:549406732485394433", ":B6:549406755722100736", ":B7:549406754254094376", ":B8:549406755701129216", ":B9:549406755680157716", ":BR:549406755218653194", ":BS:549406755596009476", ":BP:549406755755393064",
":Y1:549407012996513803", ":Y2:549407014808453120", ":Y3:549407016607809536", ":Y4:549407018298114060", ":Y5:549407020231688193", ":Y6:549407022215593984", ":Y7:549407023603777547", ":Y8:549407025394745345", ":Y9:549407027911196683", ":YR:549407033720569857", ":YS:549407029656289283", ":YP:549407031757635624",
":R1:549406471633371147", ":R2:549406503602356245", ":R3:549406530298970124", ":R4:549406528642220033", ":R5:549406529602846742", ":R6:549406531347808284", ":R7:549406528470253579", ":R8:549406531079372815", ":R9:549406531700129792", ":RR:549406530437644289", ":RS:549406531679158272", ":RP:549406530932310017",
":G1:549406885946589185", ":G2:549406888127889409", ":G3:549406890812112896", ":G4:549406892787630080", ":G5:549406894956216320", ":G6:549406897053368352", ":G7:549406899460898838", ":G8:549406901763309569", ":G9:549406903969513493", ":GR:549406910944641054", ":GS:549406911171395604", ":GP:549406911410208768",
":B1:549406714248822786", ":B2:549406716912074772", ":B3:549406755847798787", ":B4:549406720854720533", ":B5:549406732485394433", ":B6:549406755722100736", ":B7:549406754254094376", ":B8:549406755701129216", ":B9:549406755680157716", ":BR:549406755218653194", ":BS:549406755596009476", ":BP:549406755755393064",
":Y1:549407012996513803", ":Y2:549407014808453120", ":Y3:549407016607809536", ":Y4:549407018298114060", ":Y5:549407020231688193", ":Y6:549407022215593984", ":Y7:549407023603777547", ":Y8:549407025394745345", ":Y9:549407027911196683", ":YR:549407033720569857", ":YS:549407029656289283", ":YP:549407031757635624",
":W4:549407153736253460", ":WR:549407154118066189", ":W4:549407153736253460", ":WR:549407154118066189"
]
COLOR_CARDS = [
UNO_CARDS[0],
UNO_CARDS[12],
UNO_CARDS[24],
UNO_CARDS[36]
]
RED_CARD = COLOR_CARDS[0]
GREEN_CARD = COLOR_CARDS[1]
BLUE_CARD = COLOR_CARDS[2]
YELLOW_CARD = COLOR_CARDS[3]
REVERSE_CARDS = [
UNO_CARDS[9],
UNO_CARDS[21],
UNO_CARDS[33],
UNO_CARDS[45]
]
SKIP_CARDS = [
UNO_CARDS[10],
UNO_CARDS[22],
UNO_CARDS[34],
UNO_CARDS[46]
]
ADD_2_CARDS = [
UNO_CARDS[11],
UNO_CARDS[23],
UNO_CARDS[35],
UNO_CARDS[47]
]
WILD_CARDS = [
":W4:549407153736253460", ":WR:549407154118066189"
]
ADD_4_CARD = WILD_CARDS[0]
DRAW_UNO = "❓"
QUIT = "❌"
CHALLENGE = "✅"
NO_CHALLENGE = "❎" | cogs/game/minigames/uno/variables.py | UNO_CARDS = [
":R1:549406471633371147", ":R2:549406503602356245", ":R3:549406530298970124", ":R4:549406528642220033", ":R5:549406529602846742", ":R6:549406531347808284", ":R7:549406528470253579", ":R8:549406531079372815", ":R9:549406531700129792", ":RR:549406530437644289", ":RS:549406531679158272", ":RP:549406530932310017",
":G1:549406885946589185", ":G2:549406888127889409", ":G3:549406890812112896", ":G4:549406892787630080", ":G5:549406894956216320", ":G6:549406897053368352", ":G7:549406899460898838", ":G8:549406901763309569", ":G9:549406903969513493", ":GR:549406910944641054", ":GS:549406911171395604", ":GP:549406911410208768",
":B1:549406714248822786", ":B2:549406716912074772", ":B3:549406755847798787", ":B4:549406720854720533", ":B5:549406732485394433", ":B6:549406755722100736", ":B7:549406754254094376", ":B8:549406755701129216", ":B9:549406755680157716", ":BR:549406755218653194", ":BS:549406755596009476", ":BP:549406755755393064",
":Y1:549407012996513803", ":Y2:549407014808453120", ":Y3:549407016607809536", ":Y4:549407018298114060", ":Y5:549407020231688193", ":Y6:549407022215593984", ":Y7:549407023603777547", ":Y8:549407025394745345", ":Y9:549407027911196683", ":YR:549407033720569857", ":YS:549407029656289283", ":YP:549407031757635624",
":R1:549406471633371147", ":R2:549406503602356245", ":R3:549406530298970124", ":R4:549406528642220033", ":R5:549406529602846742", ":R6:549406531347808284", ":R7:549406528470253579", ":R8:549406531079372815", ":R9:549406531700129792", ":RR:549406530437644289", ":RS:549406531679158272", ":RP:549406530932310017",
":G1:549406885946589185", ":G2:549406888127889409", ":G3:549406890812112896", ":G4:549406892787630080", ":G5:549406894956216320", ":G6:549406897053368352", ":G7:549406899460898838", ":G8:549406901763309569", ":G9:549406903969513493", ":GR:549406910944641054", ":GS:549406911171395604", ":GP:549406911410208768",
":B1:549406714248822786", ":B2:549406716912074772", ":B3:549406755847798787", ":B4:549406720854720533", ":B5:549406732485394433", ":B6:549406755722100736", ":B7:549406754254094376", ":B8:549406755701129216", ":B9:549406755680157716", ":BR:549406755218653194", ":BS:549406755596009476", ":BP:549406755755393064",
":Y1:549407012996513803", ":Y2:549407014808453120", ":Y3:549407016607809536", ":Y4:549407018298114060", ":Y5:549407020231688193", ":Y6:549407022215593984", ":Y7:549407023603777547", ":Y8:549407025394745345", ":Y9:549407027911196683", ":YR:549407033720569857", ":YS:549407029656289283", ":YP:549407031757635624",
":W4:549407153736253460", ":WR:549407154118066189", ":W4:549407153736253460", ":WR:549407154118066189"
]
COLOR_CARDS = [
UNO_CARDS[0],
UNO_CARDS[12],
UNO_CARDS[24],
UNO_CARDS[36]
]
RED_CARD = COLOR_CARDS[0]
GREEN_CARD = COLOR_CARDS[1]
BLUE_CARD = COLOR_CARDS[2]
YELLOW_CARD = COLOR_CARDS[3]
REVERSE_CARDS = [
UNO_CARDS[9],
UNO_CARDS[21],
UNO_CARDS[33],
UNO_CARDS[45]
]
SKIP_CARDS = [
UNO_CARDS[10],
UNO_CARDS[22],
UNO_CARDS[34],
UNO_CARDS[46]
]
ADD_2_CARDS = [
UNO_CARDS[11],
UNO_CARDS[23],
UNO_CARDS[35],
UNO_CARDS[47]
]
WILD_CARDS = [
":W4:549407153736253460", ":WR:549407154118066189"
]
ADD_4_CARD = WILD_CARDS[0]
DRAW_UNO = "❓"
QUIT = "❌"
CHALLENGE = "✅"
NO_CHALLENGE = "❎" | 0.177098 | 0.482917 |
import torch
from torch.optim.optimizer import Optimizer
from pytorch_optimizer.base_optimizer import BaseOptimizer
from pytorch_optimizer.types import CLOSURE, DEFAULTS, LOSS, PARAMETERS
from pytorch_optimizer.utils import neuron_mean, neuron_norm
class Nero(Optimizer, BaseOptimizer):
"""
Reference : https://github.com/jxbz/nero
Example :
from pytorch_optimizer import Nero
...
model = YourModel()
optimizer = Nero(model.parameters())
...
for input, output in data:
optimizer.zero_grad()
loss = loss_function(output, model(input))
loss.backward()
optimizer.step()
"""
def __init__(self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.999, constraints: bool = True):
"""AdamP optimizer
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
:param lr: float. learning rate
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace
:param constraints: bool.
"""
self.lr = lr
self.beta = beta
self.validate_parameters()
defaults: DEFAULTS = dict(lr=lr, constraints=constraints)
super().__init__(params, defaults)
def validate_parameters(self):
self.validate_learning_rate(self.lr)
self.validate_beta(self.beta)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
state = self.state[p]
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Nero does not support sparse gradients')
state = self.state[p]
if len(state) == 0:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
state['step'] += 1
bias_correction: float = 1.0 - self.beta ** state['step']
state['exp_avg_sq'] = self.beta * state['exp_avg_sq'] + (1.0 - self.beta) * neuron_norm(grad) ** 2
grad_normed = grad / (state['exp_avg_sq'] / bias_correction).sqrt()
grad_normed[torch.isnan(grad_normed)] = 0.0
p.sub_(group['lr'] * state['scale'] * grad_normed)
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
return loss | pytorch_optimizer/nero.py | import torch
from torch.optim.optimizer import Optimizer
from pytorch_optimizer.base_optimizer import BaseOptimizer
from pytorch_optimizer.types import CLOSURE, DEFAULTS, LOSS, PARAMETERS
from pytorch_optimizer.utils import neuron_mean, neuron_norm
class Nero(Optimizer, BaseOptimizer):
"""
Reference : https://github.com/jxbz/nero
Example :
from pytorch_optimizer import Nero
...
model = YourModel()
optimizer = Nero(model.parameters())
...
for input, output in data:
optimizer.zero_grad()
loss = loss_function(output, model(input))
loss.backward()
optimizer.step()
"""
def __init__(self, params: PARAMETERS, lr: float = 0.01, beta: float = 0.999, constraints: bool = True):
"""AdamP optimizer
:param params: PARAMETERS. iterable of parameters to optimize or dicts defining parameter groups
:param lr: float. learning rate
:param beta: float. coefficients used for computing running averages of gradient and the squared hessian trace
:param constraints: bool.
"""
self.lr = lr
self.beta = beta
self.validate_parameters()
defaults: DEFAULTS = dict(lr=lr, constraints=constraints)
super().__init__(params, defaults)
def validate_parameters(self):
self.validate_learning_rate(self.lr)
self.validate_beta(self.beta)
@torch.no_grad()
def reset(self):
for group in self.param_groups:
for p in group['params']:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
state = self.state[p]
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
@torch.no_grad()
def step(self, closure: CLOSURE = None) -> LOSS:
loss: LOSS = None
if closure is not None:
with torch.enable_grad():
loss = closure()
for group in self.param_groups:
for p in group['params']:
if p.grad is None:
continue
grad = p.grad
if grad.is_sparse:
raise RuntimeError('Nero does not support sparse gradients')
state = self.state[p]
if len(state) == 0:
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
state['step'] = 0
state['exp_avg_sq'] = torch.zeros_like(neuron_norm(p))
state['scale'] = neuron_norm(p).mean()
if state['scale'] == 0.0:
state['scale'] = 0.01
state['step'] += 1
bias_correction: float = 1.0 - self.beta ** state['step']
state['exp_avg_sq'] = self.beta * state['exp_avg_sq'] + (1.0 - self.beta) * neuron_norm(grad) ** 2
grad_normed = grad / (state['exp_avg_sq'] / bias_correction).sqrt()
grad_normed[torch.isnan(grad_normed)] = 0.0
p.sub_(group['lr'] * state['scale'] * grad_normed)
if group['constraints'] and p.dim() > 1:
p.sub_(neuron_mean(p))
p.div_(neuron_norm(p))
return loss | 0.90226 | 0.412441 |
from __future__ import absolute_import
import fileinfo
import os
import numpy as np
def ie_filename(theoryname, R = 1.0, oper=False, tag="", fullpath = False, clusteronly = False, theta = 0.0, absh=1.0):
if fullpath:
dirname = fileinfo.XARPATH
else:
dirname = ""
if oper:
if clusteronly:
ending = "xarcluster"
hstring = "absh%0.8fth%0.4f" % (absh,theta)
else:
ending = "xar"
hstring = ""
basename = theoryname + hstring + tag + "oper" + "." + ending
else:
if clusteronly:
ending = "xarcluster"
anglestring = "th%0.4f" % theta
else:
ending = "xar"
anglestring = ""
basename = theoryname + ("R%0.10f" % R) + anglestring + tag + "." + ending
return os.path.join(dirname,basename)
def fd_filename(theoryname, R=1.0, theta=0.0, oper=False, tag = "", fullpath = False, absh = 1.0, pde_nmesh=None):
if fullpath:
dirname= fileinfo.FRAMEPATH
else:
dirname = ""
if oper:
basename = theoryname + ("absh%0.8fth%0.4f" % (absh,theta)) + tag + "oper.frames"
else:
basename = theoryname + ("N%dR%0.10fth%0.4f" % (pde_nmesh,R,theta)) + tag + ".frames"
return os.path.join(dirname, basename)
def ieq_metric_filename(c, Lambda, fullpath = False):
if fullpath:
dirname= fileinfo.HKMETRICPATH
else:
dirname = ""
basename = ("c%0.8fLambda%0.8f" % (c,Lambda)) + ".ieqmetric"
return os.path.join(dirname, basename)
def fd_metric_filename(c, Lambda, fullpath = False):
if fullpath:
dirname= fileinfo.HKMETRICPATH
else:
dirname = ""
basename = ("c%0.8fLambda%0.8f" % (c,Lambda)) + ".fdmetric"
return os.path.join(dirname, basename)
# Is not currently used; usually we need both the check and the filename
def ie_file_exists(theoryname, R, oper=False, tag="", clusteronly = False, theta = 0.0, absh=1.0):
return os.path.isfile(ie_filename(theoryname, R, oper=oper, tag=tag, fullpath=True, clusteronly=clusteronly, theta = theta, absh = absh))
# Is not currently used; usually we need both the check and the filename
def fd_file_exists(theoryname, R, theta = 0.0, oper=False, tag="", absh=1.0, pde_nmesh=None):
return os.path.isfile(fd_filename(theoryname, R, oper=oper, theta=theta, tag=tag, fullpath=True, absh=absh, ode_thresh=ode_thresh, pde_nmesh=pde_nmesh)) | namegen.py |
from __future__ import absolute_import
import fileinfo
import os
import numpy as np
def ie_filename(theoryname, R = 1.0, oper=False, tag="", fullpath = False, clusteronly = False, theta = 0.0, absh=1.0):
if fullpath:
dirname = fileinfo.XARPATH
else:
dirname = ""
if oper:
if clusteronly:
ending = "xarcluster"
hstring = "absh%0.8fth%0.4f" % (absh,theta)
else:
ending = "xar"
hstring = ""
basename = theoryname + hstring + tag + "oper" + "." + ending
else:
if clusteronly:
ending = "xarcluster"
anglestring = "th%0.4f" % theta
else:
ending = "xar"
anglestring = ""
basename = theoryname + ("R%0.10f" % R) + anglestring + tag + "." + ending
return os.path.join(dirname,basename)
def fd_filename(theoryname, R=1.0, theta=0.0, oper=False, tag = "", fullpath = False, absh = 1.0, pde_nmesh=None):
if fullpath:
dirname= fileinfo.FRAMEPATH
else:
dirname = ""
if oper:
basename = theoryname + ("absh%0.8fth%0.4f" % (absh,theta)) + tag + "oper.frames"
else:
basename = theoryname + ("N%dR%0.10fth%0.4f" % (pde_nmesh,R,theta)) + tag + ".frames"
return os.path.join(dirname, basename)
def ieq_metric_filename(c, Lambda, fullpath = False):
if fullpath:
dirname= fileinfo.HKMETRICPATH
else:
dirname = ""
basename = ("c%0.8fLambda%0.8f" % (c,Lambda)) + ".ieqmetric"
return os.path.join(dirname, basename)
def fd_metric_filename(c, Lambda, fullpath = False):
if fullpath:
dirname= fileinfo.HKMETRICPATH
else:
dirname = ""
basename = ("c%0.8fLambda%0.8f" % (c,Lambda)) + ".fdmetric"
return os.path.join(dirname, basename)
# Is not currently used; usually we need both the check and the filename
def ie_file_exists(theoryname, R, oper=False, tag="", clusteronly = False, theta = 0.0, absh=1.0):
return os.path.isfile(ie_filename(theoryname, R, oper=oper, tag=tag, fullpath=True, clusteronly=clusteronly, theta = theta, absh = absh))
# Is not currently used; usually we need both the check and the filename
def fd_file_exists(theoryname, R, theta = 0.0, oper=False, tag="", absh=1.0, pde_nmesh=None):
return os.path.isfile(fd_filename(theoryname, R, oper=oper, theta=theta, tag=tag, fullpath=True, absh=absh, ode_thresh=ode_thresh, pde_nmesh=pde_nmesh)) | 0.401805 | 0.065995 |
from unittest.mock import AsyncMock
import pytest
from nano_magic.adapters.client_channel import ClientChannel
from nano_magic.adapters.messages import END_DECK
from nano_magic.adapters.messages import POSITIVES
@pytest.fixture
def cards():
return [str(i) for i in range(7)]
@pytest.mark.asyncio
async def test_request_player_id():
expected_id = '0'
channel = AsyncMock()
channel.receive.return_value = expected_id
client = ClientChannel(channel)
player_id = await client.request_player_id()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(player_id) is str
assert expected_id == player_id
@pytest.mark.asyncio
async def test_request_deck(cards):
channel = AsyncMock()
cards.append(END_DECK)
channel.receive.side_effect = cards
client = ClientChannel(channel)
deck_entries = [i async for i in client.request_deck()]
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert len(deck_entries) == len(cards) - 1
assert type(deck_entries) is list
assert type(deck_entries[0]) is str
assert deck_entries == cards[:-1]
@pytest.mark.asyncio
async def test_request_match_id():
channel = AsyncMock()
expected_id = '0'
channel.receive.return_value = expected_id
client = ClientChannel(channel)
match_id = await client.request_match_id()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(match_id) is str
assert expected_id == match_id
@pytest.mark.asyncio
async def test_request_match_password():
channel = AsyncMock()
expected_password = '0'
channel.receive.return_value = expected_password
client = ClientChannel(channel)
password = await client.request_match_password()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(password) is str
assert expected_password == password
@pytest.mark.asyncio
async def test_prompt_mulligan_positive(cards):
channel = AsyncMock()
expected_answer = POSITIVES[0]
channel.receive.return_value = expected_answer
client = ClientChannel(channel)
mulligan = await client.prompt_mulligan(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(mulligan) is bool
assert mulligan
@pytest.mark.asyncio
async def test_prompt_mulligan_negative(cards):
channel = AsyncMock()
# the sum of all elements in a set does not belong to the set.
# sum(range(10)) not in range(10)
expected_answer = ''.join(POSITIVES)
channel.receive.return_value = expected_answer
client = ClientChannel(channel)
mulligan = await client.prompt_mulligan(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(mulligan) is bool
assert not mulligan
@pytest.mark.asyncio
async def test_request_card_in_hand(cards):
channel = AsyncMock()
client = ClientChannel(channel)
expected_index = 0
channel.receive.return_value = str(expected_index)
index = await client.request_card_in_hand(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(index) is int
assert index == expected_index
@pytest.mark.asyncio
async def test_set_hand(cards):
channel = AsyncMock()
client = ClientChannel(channel)
await client.set_hand(cards)
channel.send.assert_awaited()
@pytest.mark.asyncio
async def test_send_wait():
channel = AsyncMock()
client = ClientChannel(channel)
await client.send_wait()
channel.send.assert_awaited()
@pytest.mark.asyncio
async def test_set_board(cards):
channel = AsyncMock()
client = ClientChannel(channel)
await client.set_board(cards)
channel.send.assert_awaited() | tests/unit/nano_tcg/adapters/test_client_channel.py | from unittest.mock import AsyncMock
import pytest
from nano_magic.adapters.client_channel import ClientChannel
from nano_magic.adapters.messages import END_DECK
from nano_magic.adapters.messages import POSITIVES
@pytest.fixture
def cards():
return [str(i) for i in range(7)]
@pytest.mark.asyncio
async def test_request_player_id():
expected_id = '0'
channel = AsyncMock()
channel.receive.return_value = expected_id
client = ClientChannel(channel)
player_id = await client.request_player_id()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(player_id) is str
assert expected_id == player_id
@pytest.mark.asyncio
async def test_request_deck(cards):
channel = AsyncMock()
cards.append(END_DECK)
channel.receive.side_effect = cards
client = ClientChannel(channel)
deck_entries = [i async for i in client.request_deck()]
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert len(deck_entries) == len(cards) - 1
assert type(deck_entries) is list
assert type(deck_entries[0]) is str
assert deck_entries == cards[:-1]
@pytest.mark.asyncio
async def test_request_match_id():
channel = AsyncMock()
expected_id = '0'
channel.receive.return_value = expected_id
client = ClientChannel(channel)
match_id = await client.request_match_id()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(match_id) is str
assert expected_id == match_id
@pytest.mark.asyncio
async def test_request_match_password():
channel = AsyncMock()
expected_password = '0'
channel.receive.return_value = expected_password
client = ClientChannel(channel)
password = await client.request_match_password()
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(password) is str
assert expected_password == password
@pytest.mark.asyncio
async def test_prompt_mulligan_positive(cards):
channel = AsyncMock()
expected_answer = POSITIVES[0]
channel.receive.return_value = expected_answer
client = ClientChannel(channel)
mulligan = await client.prompt_mulligan(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(mulligan) is bool
assert mulligan
@pytest.mark.asyncio
async def test_prompt_mulligan_negative(cards):
channel = AsyncMock()
# the sum of all elements in a set does not belong to the set.
# sum(range(10)) not in range(10)
expected_answer = ''.join(POSITIVES)
channel.receive.return_value = expected_answer
client = ClientChannel(channel)
mulligan = await client.prompt_mulligan(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(mulligan) is bool
assert not mulligan
@pytest.mark.asyncio
async def test_request_card_in_hand(cards):
channel = AsyncMock()
client = ClientChannel(channel)
expected_index = 0
channel.receive.return_value = str(expected_index)
index = await client.request_card_in_hand(cards)
channel.send.assert_awaited()
channel.receive.assert_awaited()
assert type(index) is int
assert index == expected_index
@pytest.mark.asyncio
async def test_set_hand(cards):
channel = AsyncMock()
client = ClientChannel(channel)
await client.set_hand(cards)
channel.send.assert_awaited()
@pytest.mark.asyncio
async def test_send_wait():
channel = AsyncMock()
client = ClientChannel(channel)
await client.send_wait()
channel.send.assert_awaited()
@pytest.mark.asyncio
async def test_set_board(cards):
channel = AsyncMock()
client = ClientChannel(channel)
await client.set_board(cards)
channel.send.assert_awaited() | 0.744099 | 0.601067 |
import wx
import time
import analyse as m
# Define the tab content as classes:
class tabGather(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.text = wx.StaticText(self, -1, "Please enter a #hashtag to search on Twitter", (45,20))
self.hashtagTextBox=wx.TextCtrl(self, -1, pos=(40,50),size= (300,-1))
self.button = wx.Button(self, id=wx.ID_ANY, label="Get Tweets", pos=(150,100))
self.button.Bind(wx.EVT_BUTTON, self.onClick)
self.workingText=wx.StaticText(self,-1,'Working on it...',((150,140)))
self.workingText.SetForegroundColour((69,139,0))
self.workingText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.doneText=wx.StaticText(self,-1,"Done!",((150,140)))
self.doneText.SetForegroundColour((69,139,0))
self.doneText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.errText=wx.StaticText(self,-1,'Please enter a valid hashtag',((100,140)))
self.errText.SetForegroundColour((255,0,0))
self.errText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.workingText.Hide()
self.errText.Hide()
self.doneText.Hide()
def onClick(self, event):
"""
This method is fired when its corresponding button is pressed
"""
hashtag = str(self.hashtagTextBox.GetValue())
self.doneText.Hide()
if hashtag is '':
self.errText.Show()
else:
self.doneText.Hide()
self.errText.Hide()
self.button.Disable()
self.workingText.Show()
wx.Yield()
m.fillTweets(hashtag)
self.hashtagTextBox.ChangeValue('')
self.button.Enable()
self.workingText.Hide()
self.doneText.Show()
class tabCompare(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
t = wx.StaticText(self, -1, "Select a search key to compare", (45,20))
search_keys = m.getSearchKeys()
self.combo = wx.ComboBox(self,-1, "All",choices = search_keys,pos=(40,50),size= (300,-1),style=wx.CB_READONLY)
self.button = wx.Button(self, id=wx.ID_ANY, label="Compare", pos=(150,100))
self.button.Bind(wx.EVT_BUTTON, self.onClick)
self.workingText=wx.StaticText(self,-1,'Working on it...',((150,140)))
self.workingText.SetForegroundColour((69,139,0))
self.workingText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.workingText.Hide()
self.doneText=wx.StaticText(self,-1,"Done!",((150,140)))
self.doneText.SetForegroundColour((69,139,0))
self.doneText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
def onClick(self, event):
self.searchkey = str(self.combo.GetValue())
self.doneText.Hide()
self.workingText.Show()
wx.Yield()
m.compare(self.searchkey)
self.workingText.Hide()
self.doneText.Show()
class tabAbout(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
t = wx.StaticText(self, -1, "This software has been developed by <NAME>", (20,20))
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="TweetSentimental",size=(400,250), style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)
# Create a panel and notebook (tabs holder)
p = wx.Panel(self)
nb = wx.Notebook(p)
# Create the tab windows
tab1 = tabGather(nb)
tab2 = tabCompare(nb)
tab3 = tabAbout(nb)
# Add the windows to tabs and name them.
nb.AddPage(tab1, "Get Tweets")
nb.AddPage(tab2, "Compare")
nb.AddPage(tab3, "About")
# Set noteboook in a sizer to create the layout
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
frame = MainFrame()
frame.Center()
frame.Show()
app.MainLoop() | main.py | import wx
import time
import analyse as m
# Define the tab content as classes:
class tabGather(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
self.text = wx.StaticText(self, -1, "Please enter a #hashtag to search on Twitter", (45,20))
self.hashtagTextBox=wx.TextCtrl(self, -1, pos=(40,50),size= (300,-1))
self.button = wx.Button(self, id=wx.ID_ANY, label="Get Tweets", pos=(150,100))
self.button.Bind(wx.EVT_BUTTON, self.onClick)
self.workingText=wx.StaticText(self,-1,'Working on it...',((150,140)))
self.workingText.SetForegroundColour((69,139,0))
self.workingText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.doneText=wx.StaticText(self,-1,"Done!",((150,140)))
self.doneText.SetForegroundColour((69,139,0))
self.doneText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.errText=wx.StaticText(self,-1,'Please enter a valid hashtag',((100,140)))
self.errText.SetForegroundColour((255,0,0))
self.errText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.workingText.Hide()
self.errText.Hide()
self.doneText.Hide()
def onClick(self, event):
"""
This method is fired when its corresponding button is pressed
"""
hashtag = str(self.hashtagTextBox.GetValue())
self.doneText.Hide()
if hashtag is '':
self.errText.Show()
else:
self.doneText.Hide()
self.errText.Hide()
self.button.Disable()
self.workingText.Show()
wx.Yield()
m.fillTweets(hashtag)
self.hashtagTextBox.ChangeValue('')
self.button.Enable()
self.workingText.Hide()
self.doneText.Show()
class tabCompare(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
t = wx.StaticText(self, -1, "Select a search key to compare", (45,20))
search_keys = m.getSearchKeys()
self.combo = wx.ComboBox(self,-1, "All",choices = search_keys,pos=(40,50),size= (300,-1),style=wx.CB_READONLY)
self.button = wx.Button(self, id=wx.ID_ANY, label="Compare", pos=(150,100))
self.button.Bind(wx.EVT_BUTTON, self.onClick)
self.workingText=wx.StaticText(self,-1,'Working on it...',((150,140)))
self.workingText.SetForegroundColour((69,139,0))
self.workingText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
self.workingText.Hide()
self.doneText=wx.StaticText(self,-1,"Done!",((150,140)))
self.doneText.SetForegroundColour((69,139,0))
self.doneText.SetFont(wx.Font(10, wx.DEFAULT, wx.ITALIC, wx.NORMAL))
def onClick(self, event):
self.searchkey = str(self.combo.GetValue())
self.doneText.Hide()
self.workingText.Show()
wx.Yield()
m.compare(self.searchkey)
self.workingText.Hide()
self.doneText.Show()
class tabAbout(wx.Panel):
def __init__(self, parent):
wx.Panel.__init__(self, parent)
t = wx.StaticText(self, -1, "This software has been developed by <NAME>", (20,20))
class MainFrame(wx.Frame):
def __init__(self):
wx.Frame.__init__(self, None, title="TweetSentimental",size=(400,250), style= wx.SYSTEM_MENU | wx.CAPTION | wx.CLOSE_BOX)
# Create a panel and notebook (tabs holder)
p = wx.Panel(self)
nb = wx.Notebook(p)
# Create the tab windows
tab1 = tabGather(nb)
tab2 = tabCompare(nb)
tab3 = tabAbout(nb)
# Add the windows to tabs and name them.
nb.AddPage(tab1, "Get Tweets")
nb.AddPage(tab2, "Compare")
nb.AddPage(tab3, "About")
# Set noteboook in a sizer to create the layout
sizer = wx.BoxSizer()
sizer.Add(nb, 1, wx.EXPAND)
p.SetSizer(sizer)
if __name__ == "__main__":
app = wx.App()
frame = MainFrame()
frame.Center()
frame.Show()
app.MainLoop() | 0.251096 | 0.058588 |
import mxnet as mx
import json
import os
import logging
class MXNetVisionServiceBatching(object):
def __init__(self):
"""
Initialization for MXNet Vision Service supporting batch inference
"""
self.mxnet_ctx = None
self.mx_model = None
self.labels = None
self.epoch = 0
self._context = None
self._batch_size = 0
self.initialized = False
self.erroneous_reqs = set()
def initialize(self, context):
"""
Initialize model. This will be called during model loading time
:param context: Initial context contains model server system properties.
:return:
"""
self._context = context
self._batch_size = context.system_properties["batch_size"] if context is not None else "1"
self.initialized = True
properties = context.system_properties if context is not None \
else {"model_dir": os.getcwd()}
model_dir = properties.get("model_dir")
gpu_id = properties.get("gpu_id")
model_files_prefix = context.manifest["model"]["modelName"] if context is not None else "mnist_cnn"
data_names = ["/conv2d_1_input1"]
data_shapes = [(data_names[0], (1, 28, 28, 1))]
checkpoint_prefix = "{}/{}".format(model_dir, model_files_prefix)
# Load MXNet module
self.mxnet_ctx = mx.cpu() if gpu_id is None else mx.gpu(gpu_id)
sym, arg_params, aux_params = mx.model.load_checkpoint(checkpoint_prefix, self.epoch)
# noinspection PyTypeChecker
self.mx_model = mx.mod.Module(symbol=sym, context=self.mxnet_ctx,
data_names=data_names, label_names=None)
self.mx_model.bind(for_training=False, data_shapes=data_shapes)
self.mx_model.set_params(arg_params, aux_params, allow_missing=True, allow_extra=True)
def inference(self, model_input):
"""
Internal inference methods for MXNet. Run forward computation and
return output.
:param model_input: list of NDArray
Preprocessed inputs in NDArray format.
:return: list of NDArray
Inference output.
"""
data_iter = mx.io.NDArrayIter(model_input, None, 1)
outputs = self.mx_model.predict(data_iter)
res = mx.ndarray.split(outputs[0], axis=0, num_outputs=outputs[0].shape[0])
return res
def preprocess(self, request):
"""
Decode all input images into ndarray.
Note: This implementation doesn't properly handle error cases in batch mode,
If one of the input images is corrupted, all requests in the batch will fail.
:param request:
:return:
"""
img_list = []
param_name = "/conv2d_1_input1"
input_shape = [128, 28, 28, 1] # Channels last
h = input_shape[1]
w = input_shape[2]
for idx, data in enumerate(request):
img = data.get(param_name)
if img is None:
img = data.get("body")
if img is None:
img = data.get("data")
if img is None or len(img) == 0:
logging.error("Error processing request")
self.erroneous_reqs.add(idx)
continue
try:
img_arr = mx.image.imdecode(img, 0, True, None)
except Exception as e:
logging.error(e, exc_info=True)
raise
img_arr = mx.image.imresize(img_arr, w, h)
img_arr = img_arr.astype("float32")
img_arr /= 255
img_list.append(img_arr)
reqs = mx.nd.stack(*img_list)
reqs = reqs.as_in_context(self.mxnet_ctx)
return reqs
def postprocess(self, data):
m = max(data)
val = [i for i, j in enumerate(data) if j == m]
return ["Prediction is {} with probability of {}%".format(val, m.asscalar()*100)]
_service = MXNetVisionServiceBatching()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
try:
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
except Exception as e:
logging.error(e, exc_info=True)
raise
if __name__ == "__main__":
f = open("utils/9.png", "rb")
img = f.read()
d_in = [{"data": img}]
print(handle(d_in, None)) | samples/mnist/inference/mxnet/mnist_cnn_inference.py |
import mxnet as mx
import json
import os
import logging
class MXNetVisionServiceBatching(object):
def __init__(self):
"""
Initialization for MXNet Vision Service supporting batch inference
"""
self.mxnet_ctx = None
self.mx_model = None
self.labels = None
self.epoch = 0
self._context = None
self._batch_size = 0
self.initialized = False
self.erroneous_reqs = set()
def initialize(self, context):
"""
Initialize model. This will be called during model loading time
:param context: Initial context contains model server system properties.
:return:
"""
self._context = context
self._batch_size = context.system_properties["batch_size"] if context is not None else "1"
self.initialized = True
properties = context.system_properties if context is not None \
else {"model_dir": os.getcwd()}
model_dir = properties.get("model_dir")
gpu_id = properties.get("gpu_id")
model_files_prefix = context.manifest["model"]["modelName"] if context is not None else "mnist_cnn"
data_names = ["/conv2d_1_input1"]
data_shapes = [(data_names[0], (1, 28, 28, 1))]
checkpoint_prefix = "{}/{}".format(model_dir, model_files_prefix)
# Load MXNet module
self.mxnet_ctx = mx.cpu() if gpu_id is None else mx.gpu(gpu_id)
sym, arg_params, aux_params = mx.model.load_checkpoint(checkpoint_prefix, self.epoch)
# noinspection PyTypeChecker
self.mx_model = mx.mod.Module(symbol=sym, context=self.mxnet_ctx,
data_names=data_names, label_names=None)
self.mx_model.bind(for_training=False, data_shapes=data_shapes)
self.mx_model.set_params(arg_params, aux_params, allow_missing=True, allow_extra=True)
def inference(self, model_input):
"""
Internal inference methods for MXNet. Run forward computation and
return output.
:param model_input: list of NDArray
Preprocessed inputs in NDArray format.
:return: list of NDArray
Inference output.
"""
data_iter = mx.io.NDArrayIter(model_input, None, 1)
outputs = self.mx_model.predict(data_iter)
res = mx.ndarray.split(outputs[0], axis=0, num_outputs=outputs[0].shape[0])
return res
def preprocess(self, request):
"""
Decode all input images into ndarray.
Note: This implementation doesn't properly handle error cases in batch mode,
If one of the input images is corrupted, all requests in the batch will fail.
:param request:
:return:
"""
img_list = []
param_name = "/conv2d_1_input1"
input_shape = [128, 28, 28, 1] # Channels last
h = input_shape[1]
w = input_shape[2]
for idx, data in enumerate(request):
img = data.get(param_name)
if img is None:
img = data.get("body")
if img is None:
img = data.get("data")
if img is None or len(img) == 0:
logging.error("Error processing request")
self.erroneous_reqs.add(idx)
continue
try:
img_arr = mx.image.imdecode(img, 0, True, None)
except Exception as e:
logging.error(e, exc_info=True)
raise
img_arr = mx.image.imresize(img_arr, w, h)
img_arr = img_arr.astype("float32")
img_arr /= 255
img_list.append(img_arr)
reqs = mx.nd.stack(*img_list)
reqs = reqs.as_in_context(self.mxnet_ctx)
return reqs
def postprocess(self, data):
m = max(data)
val = [i for i, j in enumerate(data) if j == m]
return ["Prediction is {} with probability of {}%".format(val, m.asscalar()*100)]
_service = MXNetVisionServiceBatching()
def handle(data, context):
if not _service.initialized:
_service.initialize(context)
if data is None:
return None
try:
data = _service.preprocess(data)
data = _service.inference(data)
data = _service.postprocess(data)
return data
except Exception as e:
logging.error(e, exc_info=True)
raise
if __name__ == "__main__":
f = open("utils/9.png", "rb")
img = f.read()
d_in = [{"data": img}]
print(handle(d_in, None)) | 0.705379 | 0.211335 |
import sys
import time
import sunspec2.modbus.client as client
import sunspec2.file.client as file_client
from optparse import OptionParser
"""
Original suns options:
-o: output mode for data (text, xml)
-x: export model description (slang, xml)
-t: transport type: tcp or rtu (default: tcp)
-a: modbus slave address (default: 1)
-i: ip address to use for modbus tcp (default: localhost)
-P: port number for modbus tcp (default: 502)
-p: serial port for modbus rtu (default: /dev/ttyUSB0)
-R: parity for modbus rtu: None, E (default: None)
-b: baud rate for modbus rtu (default: 9600)
-T: timeout, in seconds (can be fractional, such as 1.5; default: 2.0)
-r: number of retries attempted for each modbus read
-m: specify model file
-M: specify directory containing model files
-s: run as a test server
-I: logger id (for sunspec logger xml output)
-N: logger id namespace (for sunspec logger xml output, defaults to 'mac')
-l: limit number of registers requested in a single read (max is 125)
-c: check models for internal consistency then exit
-v: verbose level (up to -vvvv for most verbose)
-V: print current release number and exit
"""
if __name__ == "__main__":
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('-t', metavar=' ',
default='tcp',
help='transport type: rtu, tcp, file [default: tcp]')
parser.add_option('-a', metavar=' ', type='int',
default=1,
help='modbus slave address [default: 1]')
parser.add_option('-i', metavar=' ',
default='localhost',
help='ip address to use for modbus tcp [default: localhost]')
parser.add_option('-P', metavar=' ', type='int',
default=502,
help='port number for modbus tcp [default: 502]')
parser.add_option('-p', metavar=' ',
default='/dev/ttyUSB0',
help='serial port for modbus rtu [default: /dev/ttyUSB0]')
parser.add_option('-b', metavar=' ',
default=9600,
help='baud rate for modbus rtu [default: 9600]')
parser.add_option('-R', metavar=' ',
default=None,
help='parity for modbus rtu: None, E [default: None]')
parser.add_option('-T', metavar=' ', type='float',
default=2.0,
help='timeout, in seconds (can be fractional, such as 1.5) [default: 2.0]')
parser.add_option('-m', metavar=' ',
help='modbus map file')
options, args = parser.parse_args()
try:
if options.t == 'tcp':
sd = client.SunSpecModbusClientDeviceTCP(slave_id=options.a, ipaddr=options.i, ipport=options.P,
timeout=options.T)
elif options.t == 'rtu':
sd = client.SunSpecModbusClientDeviceRTU(slave_id=options.a, name=options.p, baudrate=options.b,
parity=options.R, timeout=options.T)
elif options.t == 'file':
sd = file_client.FileClientDevice(filename=options.m)
else:
print('Unknown -t option: %s' % (options.t))
sys.exit(1)
except client.SunSpecModbusClientError as e:
print('Error: %s' % e)
sys.exit(1)
except file_client.FileClientError as e:
print('Error: %s' % e)
sys.exit(1)
if sd is not None:
print( '\nTimestamp: %s' % (time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())))
# read all models in the device
sd.scan()
print(sd.get_text()) | scripts/suns.py | import sys
import time
import sunspec2.modbus.client as client
import sunspec2.file.client as file_client
from optparse import OptionParser
"""
Original suns options:
-o: output mode for data (text, xml)
-x: export model description (slang, xml)
-t: transport type: tcp or rtu (default: tcp)
-a: modbus slave address (default: 1)
-i: ip address to use for modbus tcp (default: localhost)
-P: port number for modbus tcp (default: 502)
-p: serial port for modbus rtu (default: /dev/ttyUSB0)
-R: parity for modbus rtu: None, E (default: None)
-b: baud rate for modbus rtu (default: 9600)
-T: timeout, in seconds (can be fractional, such as 1.5; default: 2.0)
-r: number of retries attempted for each modbus read
-m: specify model file
-M: specify directory containing model files
-s: run as a test server
-I: logger id (for sunspec logger xml output)
-N: logger id namespace (for sunspec logger xml output, defaults to 'mac')
-l: limit number of registers requested in a single read (max is 125)
-c: check models for internal consistency then exit
-v: verbose level (up to -vvvv for most verbose)
-V: print current release number and exit
"""
if __name__ == "__main__":
usage = 'usage: %prog [options]'
parser = OptionParser(usage=usage)
parser.add_option('-t', metavar=' ',
default='tcp',
help='transport type: rtu, tcp, file [default: tcp]')
parser.add_option('-a', metavar=' ', type='int',
default=1,
help='modbus slave address [default: 1]')
parser.add_option('-i', metavar=' ',
default='localhost',
help='ip address to use for modbus tcp [default: localhost]')
parser.add_option('-P', metavar=' ', type='int',
default=502,
help='port number for modbus tcp [default: 502]')
parser.add_option('-p', metavar=' ',
default='/dev/ttyUSB0',
help='serial port for modbus rtu [default: /dev/ttyUSB0]')
parser.add_option('-b', metavar=' ',
default=9600,
help='baud rate for modbus rtu [default: 9600]')
parser.add_option('-R', metavar=' ',
default=None,
help='parity for modbus rtu: None, E [default: None]')
parser.add_option('-T', metavar=' ', type='float',
default=2.0,
help='timeout, in seconds (can be fractional, such as 1.5) [default: 2.0]')
parser.add_option('-m', metavar=' ',
help='modbus map file')
options, args = parser.parse_args()
try:
if options.t == 'tcp':
sd = client.SunSpecModbusClientDeviceTCP(slave_id=options.a, ipaddr=options.i, ipport=options.P,
timeout=options.T)
elif options.t == 'rtu':
sd = client.SunSpecModbusClientDeviceRTU(slave_id=options.a, name=options.p, baudrate=options.b,
parity=options.R, timeout=options.T)
elif options.t == 'file':
sd = file_client.FileClientDevice(filename=options.m)
else:
print('Unknown -t option: %s' % (options.t))
sys.exit(1)
except client.SunSpecModbusClientError as e:
print('Error: %s' % e)
sys.exit(1)
except file_client.FileClientError as e:
print('Error: %s' % e)
sys.exit(1)
if sd is not None:
print( '\nTimestamp: %s' % (time.strftime('%Y-%m-%dT%H:%M:%SZ', time.gmtime())))
# read all models in the device
sd.scan()
print(sd.get_text()) | 0.282196 | 0.125172 |
from typing import Any, List, Optional, Union
import ee
def getTimeSeriesByRegion(
x: ee.ImageCollection,
reducer: Any,
bands: Optional[Union[str, List[str]]] = None,
geometry: Optional[Union[ee.Geometry, ee.Feature, ee.FeatureCollection]] = None,
scale: Optional[Union[int, float]] = None,
crs: Optional[Any] = None,
crsTransform: Optional[Any] = None,
bestEffort: bool = False,
maxPixels: Union[int, float] = 1e12,
tileScale: int = 1,
dateColumn: str = "date",
dateFormat: str = "ISO",
naValue: Union[int, float] = -9999,
):
"""Gets the time series by region for the given image collection and geometry (feature
or feature collection are also supported) according to the specified reducer (or
reducers).
Args:
x : Image collection to get the time series from.
reducer : Reducer or list of reducers to use for region reduction.
bands : Selection of bands to get the time series from. Defaults to all bands in
the image collection.
geometry : Geometry to perform the region reduction. If ee.Feature or
ee.FeatureCollection, the geometry() method is called. In order to get
reductions by each feature please see the getTimeSeriesByRegions() method.
Defaults to the footprint of the first band for each image in the collection.
scale : Nomical scale in meters.
crs : The projection to work in. If unspecified, the projection of the image's
first band is used. If specified in addition to scale, rescaled to the
specified scale.
crsTransform : The list of CRS transform values. This is a row-major ordering of
the 3x2 transform matrix. This option is mutually exclusive with 'scale', and
replaces any transform already set on the projection.
bestEffort : If the polygon would contain too many pixels at the given scale,
compute and use a larger scale which would allow the operation to succeed.
maxPixels : The maximum number of pixels to reduce.
tileScale : A scaling factor used to reduce aggregation tile size; using a larger
tileScale (e.g. 2 or 4) may enable computations that run out of memory with
the default.
dateColumn : Output name of the date column.
dateFormat : Output format of the date column. Defaults to ISO. Available options:
'ms' (for milliseconds), 'ISO' (for ISO Standard Format) or a custom format
pattern.
naValue : Value to use as NA when the region reduction doesn't retrieve a value
due to masked pixels.
Returns:
Time series by region retrieved as a Feature Collection.
Examples:
>>> import ee
>>> from ee_extra.TimeSeries.core import getTimeSeriesByRegion
>>> ee.Initialize()
>>> f1 = ee.Feature(ee.Geometry.Point([3.984770,48.767221]).buffer(50),{'ID':'A'})
>>> f2 = ee.Feature(ee.Geometry.Point([4.101367,48.748076]).buffer(50),{'ID':'B'})
>>> fc = ee.FeatureCollection([f1,f2])
>>> S2 = (ee.ImageCollection('COPERNICUS/S2_SR')
... .filterBounds(fc)
... .filterDate('2020-01-01','2021-01-01'))
>>> ts = getTimeSeriesByRegion(S2,
... reducer = [ee.Reducer.mean(),ee.Reducer.median()],
... geometry = fc,
... bands = ['B4','B8'],
... scale = 10)
"""
if bands != None:
if not isinstance(bands, list):
bands = [bands]
x = x.select(bands)
else:
bands = x.first().bandNames().getInfo()
if not isinstance(reducer, list):
reducer = [reducer]
if not isinstance(geometry, ee.geometry.Geometry):
geometry = geometry.geometry()
collections = []
for red in reducer:
reducerName = red.getOutputs().get(0)
def reduceImageCollectionByRegion(img):
dictionary = img.reduceRegion(
red,
geometry,
scale,
crs,
crsTransform,
bestEffort,
maxPixels,
tileScale,
)
if dateFormat == "ms":
date = ee.Date(img.get("system:time_start")).millis()
elif dateFormat == "ISO":
date = ee.Date(img.get("system:time_start")).format()
else:
date = ee.Date(img.get("system:time_start")).format(dateFormat)
return ee.Feature(None, dictionary).set(
{dateColumn: date, "reducer": reducerName}
)
collections.append(ee.FeatureCollection(x.map(reduceImageCollectionByRegion)))
flattenfc = ee.FeatureCollection(collections).flatten()
def setNA(feature):
feature = ee.Algorithms.If(
condition=feature.propertyNames().size().eq(3),
trueCase=feature.set(
ee.Dictionary.fromLists(bands, [naValue] * len(bands))
),
falseCase=feature,
)
feature = ee.Feature(feature)
return feature
return flattenfc.map(setNA)
def getTimeSeriesByRegions(
x: ee.ImageCollection,
reducer: Any,
collection: ee.FeatureCollection,
bands: Optional[Union[str, List[str]]] = None,
scale: Optional[Union[int, float]] = None,
crs: Optional[Any] = None,
crsTransform: Optional[Any] = None,
tileScale: int = 1,
dateColumn: str = "date",
dateFormat: str = "ISO",
naValue: Union[int, float] = -9999,
):
"""Gets the time series by regions for the given image collection and feature
collection according to the specified reducer (or reducers).
Args:
x : Image collection to get the time series from.
reducer : Reducer or list of reducers to use for region reduction.
collection : Feature Collection to perform the reductions on. Image reductions are
applied to each feature in the collection.
bands : Selection of bands to get the time series from. Defaults to all bands in
the image collection.
scale : Nomical scale in meters.
crs : The projection to work in. If unspecified, the projection of the image's
first band is used. If specified in addition to scale, rescaled to the
specified scale.
crsTransform : The list of CRS transform values. This is a row-major ordering of
the 3x2 transform matrix. This option is mutually exclusive with 'scale', and
replaces any transform already set on the projection.
tileScale : A scaling factor used to reduce aggregation tile size; using a larger
tileScale (e.g. 2 or 4) may enable computations that run out of memory with
the default.
dateColumn : Output name of the date column.
dateFormat : Output format of the date column. Defaults to ISO. Available options:
'ms' (for milliseconds), 'ISO' (for ISO Standard Format) or a custom format
pattern.
naValue : Value to use as NA when the region reduction doesn't retrieve a value
due to masked pixels.
Returns:
Time series by regions retrieved as a Feature Collection.
Examples:
>>> import ee
>>> from ee_extra.TimeSeries.core import getTimeSeriesByRegions
>>> ee.Initialize()
>>> f1 = ee.Feature(ee.Geometry.Point([3.984770,48.767221]).buffer(50),{'ID':'A'})
>>> f2 = ee.Feature(ee.Geometry.Point([4.101367,48.748076]).buffer(50),{'ID':'B'})
>>> fc = ee.FeatureCollection([f1,f2])
>>> S2 = (ee.ImageCollection('COPERNICUS/S2_SR')
... .filterBounds(fc)
... .filterDate('2020-01-01','2021-01-01'))
>>> ts = getTimeSeriesByRegions(S2,
... reducer = [ee.Reducer.mean(),ee.Reducer.median()],
... collection = fc,
... bands = ['B3','B8'],
... scale = 10)
"""
if bands != None:
if not isinstance(bands, list):
bands = [bands]
x = x.select(bands)
else:
bands = x.first().bandNames().getInfo()
if not isinstance(reducer, list):
reducer = [reducer]
if not isinstance(collection, ee.featurecollection.FeatureCollection):
raise Exception("Parameter collection must be an ee.FeatureCollection!")
props = collection.first().propertyNames()
collections = []
imgList = x.toList(x.size())
for red in reducer:
reducerName = red.getOutputs().get(0)
def reduceImageCollectionByRegions(img):
img = ee.Image(img)
if len(bands) == 1:
img = img.addBands(ee.Image(naValue).rename("eemontTemporal"))
fc = img.reduceRegions(collection, red, scale, crs, crsTransform, tileScale)
if dateFormat == "ms":
date = ee.Date(img.get("system:time_start")).millis()
elif dateFormat == "ISO":
date = ee.Date(img.get("system:time_start")).format()
else:
date = ee.Date(img.get("system:time_start")).format(dateFormat)
def setProperties(feature):
return feature.set({dateColumn: date, "reducer": reducerName})
return fc.map(setProperties)
collections.append(x.map(reduceImageCollectionByRegions).flatten())
flattenfc = ee.FeatureCollection(collections).flatten()
def setNA(feature):
feature = ee.Algorithms.If(
condition=feature.propertyNames().size().eq(props.size().add(2)),
trueCase=feature.set(
ee.Dictionary.fromLists(bands, [naValue] * len(bands))
),
falseCase=feature,
)
feature = ee.Feature(feature)
return feature
flattenfc = flattenfc.map(setNA)
flattenfc = flattenfc.select(props.cat(["reducer", dateColumn]).cat(bands))
return flattenfc | ee_extra/TimeSeries/core.py | from typing import Any, List, Optional, Union
import ee
def getTimeSeriesByRegion(
x: ee.ImageCollection,
reducer: Any,
bands: Optional[Union[str, List[str]]] = None,
geometry: Optional[Union[ee.Geometry, ee.Feature, ee.FeatureCollection]] = None,
scale: Optional[Union[int, float]] = None,
crs: Optional[Any] = None,
crsTransform: Optional[Any] = None,
bestEffort: bool = False,
maxPixels: Union[int, float] = 1e12,
tileScale: int = 1,
dateColumn: str = "date",
dateFormat: str = "ISO",
naValue: Union[int, float] = -9999,
):
"""Gets the time series by region for the given image collection and geometry (feature
or feature collection are also supported) according to the specified reducer (or
reducers).
Args:
x : Image collection to get the time series from.
reducer : Reducer or list of reducers to use for region reduction.
bands : Selection of bands to get the time series from. Defaults to all bands in
the image collection.
geometry : Geometry to perform the region reduction. If ee.Feature or
ee.FeatureCollection, the geometry() method is called. In order to get
reductions by each feature please see the getTimeSeriesByRegions() method.
Defaults to the footprint of the first band for each image in the collection.
scale : Nomical scale in meters.
crs : The projection to work in. If unspecified, the projection of the image's
first band is used. If specified in addition to scale, rescaled to the
specified scale.
crsTransform : The list of CRS transform values. This is a row-major ordering of
the 3x2 transform matrix. This option is mutually exclusive with 'scale', and
replaces any transform already set on the projection.
bestEffort : If the polygon would contain too many pixels at the given scale,
compute and use a larger scale which would allow the operation to succeed.
maxPixels : The maximum number of pixels to reduce.
tileScale : A scaling factor used to reduce aggregation tile size; using a larger
tileScale (e.g. 2 or 4) may enable computations that run out of memory with
the default.
dateColumn : Output name of the date column.
dateFormat : Output format of the date column. Defaults to ISO. Available options:
'ms' (for milliseconds), 'ISO' (for ISO Standard Format) or a custom format
pattern.
naValue : Value to use as NA when the region reduction doesn't retrieve a value
due to masked pixels.
Returns:
Time series by region retrieved as a Feature Collection.
Examples:
>>> import ee
>>> from ee_extra.TimeSeries.core import getTimeSeriesByRegion
>>> ee.Initialize()
>>> f1 = ee.Feature(ee.Geometry.Point([3.984770,48.767221]).buffer(50),{'ID':'A'})
>>> f2 = ee.Feature(ee.Geometry.Point([4.101367,48.748076]).buffer(50),{'ID':'B'})
>>> fc = ee.FeatureCollection([f1,f2])
>>> S2 = (ee.ImageCollection('COPERNICUS/S2_SR')
... .filterBounds(fc)
... .filterDate('2020-01-01','2021-01-01'))
>>> ts = getTimeSeriesByRegion(S2,
... reducer = [ee.Reducer.mean(),ee.Reducer.median()],
... geometry = fc,
... bands = ['B4','B8'],
... scale = 10)
"""
if bands != None:
if not isinstance(bands, list):
bands = [bands]
x = x.select(bands)
else:
bands = x.first().bandNames().getInfo()
if not isinstance(reducer, list):
reducer = [reducer]
if not isinstance(geometry, ee.geometry.Geometry):
geometry = geometry.geometry()
collections = []
for red in reducer:
reducerName = red.getOutputs().get(0)
def reduceImageCollectionByRegion(img):
dictionary = img.reduceRegion(
red,
geometry,
scale,
crs,
crsTransform,
bestEffort,
maxPixels,
tileScale,
)
if dateFormat == "ms":
date = ee.Date(img.get("system:time_start")).millis()
elif dateFormat == "ISO":
date = ee.Date(img.get("system:time_start")).format()
else:
date = ee.Date(img.get("system:time_start")).format(dateFormat)
return ee.Feature(None, dictionary).set(
{dateColumn: date, "reducer": reducerName}
)
collections.append(ee.FeatureCollection(x.map(reduceImageCollectionByRegion)))
flattenfc = ee.FeatureCollection(collections).flatten()
def setNA(feature):
feature = ee.Algorithms.If(
condition=feature.propertyNames().size().eq(3),
trueCase=feature.set(
ee.Dictionary.fromLists(bands, [naValue] * len(bands))
),
falseCase=feature,
)
feature = ee.Feature(feature)
return feature
return flattenfc.map(setNA)
def getTimeSeriesByRegions(
x: ee.ImageCollection,
reducer: Any,
collection: ee.FeatureCollection,
bands: Optional[Union[str, List[str]]] = None,
scale: Optional[Union[int, float]] = None,
crs: Optional[Any] = None,
crsTransform: Optional[Any] = None,
tileScale: int = 1,
dateColumn: str = "date",
dateFormat: str = "ISO",
naValue: Union[int, float] = -9999,
):
"""Gets the time series by regions for the given image collection and feature
collection according to the specified reducer (or reducers).
Args:
x : Image collection to get the time series from.
reducer : Reducer or list of reducers to use for region reduction.
collection : Feature Collection to perform the reductions on. Image reductions are
applied to each feature in the collection.
bands : Selection of bands to get the time series from. Defaults to all bands in
the image collection.
scale : Nomical scale in meters.
crs : The projection to work in. If unspecified, the projection of the image's
first band is used. If specified in addition to scale, rescaled to the
specified scale.
crsTransform : The list of CRS transform values. This is a row-major ordering of
the 3x2 transform matrix. This option is mutually exclusive with 'scale', and
replaces any transform already set on the projection.
tileScale : A scaling factor used to reduce aggregation tile size; using a larger
tileScale (e.g. 2 or 4) may enable computations that run out of memory with
the default.
dateColumn : Output name of the date column.
dateFormat : Output format of the date column. Defaults to ISO. Available options:
'ms' (for milliseconds), 'ISO' (for ISO Standard Format) or a custom format
pattern.
naValue : Value to use as NA when the region reduction doesn't retrieve a value
due to masked pixels.
Returns:
Time series by regions retrieved as a Feature Collection.
Examples:
>>> import ee
>>> from ee_extra.TimeSeries.core import getTimeSeriesByRegions
>>> ee.Initialize()
>>> f1 = ee.Feature(ee.Geometry.Point([3.984770,48.767221]).buffer(50),{'ID':'A'})
>>> f2 = ee.Feature(ee.Geometry.Point([4.101367,48.748076]).buffer(50),{'ID':'B'})
>>> fc = ee.FeatureCollection([f1,f2])
>>> S2 = (ee.ImageCollection('COPERNICUS/S2_SR')
... .filterBounds(fc)
... .filterDate('2020-01-01','2021-01-01'))
>>> ts = getTimeSeriesByRegions(S2,
... reducer = [ee.Reducer.mean(),ee.Reducer.median()],
... collection = fc,
... bands = ['B3','B8'],
... scale = 10)
"""
if bands != None:
if not isinstance(bands, list):
bands = [bands]
x = x.select(bands)
else:
bands = x.first().bandNames().getInfo()
if not isinstance(reducer, list):
reducer = [reducer]
if not isinstance(collection, ee.featurecollection.FeatureCollection):
raise Exception("Parameter collection must be an ee.FeatureCollection!")
props = collection.first().propertyNames()
collections = []
imgList = x.toList(x.size())
for red in reducer:
reducerName = red.getOutputs().get(0)
def reduceImageCollectionByRegions(img):
img = ee.Image(img)
if len(bands) == 1:
img = img.addBands(ee.Image(naValue).rename("eemontTemporal"))
fc = img.reduceRegions(collection, red, scale, crs, crsTransform, tileScale)
if dateFormat == "ms":
date = ee.Date(img.get("system:time_start")).millis()
elif dateFormat == "ISO":
date = ee.Date(img.get("system:time_start")).format()
else:
date = ee.Date(img.get("system:time_start")).format(dateFormat)
def setProperties(feature):
return feature.set({dateColumn: date, "reducer": reducerName})
return fc.map(setProperties)
collections.append(x.map(reduceImageCollectionByRegions).flatten())
flattenfc = ee.FeatureCollection(collections).flatten()
def setNA(feature):
feature = ee.Algorithms.If(
condition=feature.propertyNames().size().eq(props.size().add(2)),
trueCase=feature.set(
ee.Dictionary.fromLists(bands, [naValue] * len(bands))
),
falseCase=feature,
)
feature = ee.Feature(feature)
return feature
flattenfc = flattenfc.map(setNA)
flattenfc = flattenfc.select(props.cat(["reducer", dateColumn]).cat(bands))
return flattenfc | 0.968827 | 0.632786 |
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from udpa.annotations import status_pb2 as udpa_dot_annotations_dot_status__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='xds/core/v3/authority.proto',
package='xds.core.v3',
syntax='proto3',
serialized_options=b'\n\033com.github.udpa.xds.core.v3B\016AuthorityProtoP\001\272\200\310\321\006\002\010\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bxds/core/v3/authority.proto\x12\x0bxds.core.v3\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\"\n\tAuthority\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x42\x37\n\x1b\x63om.github.udpa.xds.core.v3B\x0e\x41uthorityProtoP\x01\xba\x80\xc8\xd1\x06\x02\x08\x01\x62\x06proto3'
,
dependencies=[udpa_dot_annotations_dot_status__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,])
_AUTHORITY = _descriptor.Descriptor(
name='Authority',
full_name='xds.core.v3.Authority',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='xds.core.v3.Authority.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004r\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=134,
)
DESCRIPTOR.message_types_by_name['Authority'] = _AUTHORITY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Authority = _reflection.GeneratedProtocolMessageType('Authority', (_message.Message,), {
'DESCRIPTOR' : _AUTHORITY,
'__module__' : 'xds.core.v3.authority_pb2'
# @@protoc_insertion_point(class_scope:xds.core.v3.Authority)
})
_sym_db.RegisterMessage(Authority)
DESCRIPTOR._options = None
_AUTHORITY.fields_by_name['name']._options = None
# @@protoc_insertion_point(module_scope) | python/pb/xds/core/v3/authority_pb2.py | """Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from udpa.annotations import status_pb2 as udpa_dot_annotations_dot_status__pb2
from validate import validate_pb2 as validate_dot_validate__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='xds/core/v3/authority.proto',
package='xds.core.v3',
syntax='proto3',
serialized_options=b'\n\033com.github.udpa.xds.core.v3B\016AuthorityProtoP\001\272\200\310\321\006\002\010\001',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n\x1bxds/core/v3/authority.proto\x12\x0bxds.core.v3\x1a\x1dudpa/annotations/status.proto\x1a\x17validate/validate.proto\"\"\n\tAuthority\x12\x15\n\x04name\x18\x01 \x01(\tB\x07\xfa\x42\x04r\x02\x10\x01\x42\x37\n\x1b\x63om.github.udpa.xds.core.v3B\x0e\x41uthorityProtoP\x01\xba\x80\xc8\xd1\x06\x02\x08\x01\x62\x06proto3'
,
dependencies=[udpa_dot_annotations_dot_status__pb2.DESCRIPTOR,validate_dot_validate__pb2.DESCRIPTOR,])
_AUTHORITY = _descriptor.Descriptor(
name='Authority',
full_name='xds.core.v3.Authority',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='xds.core.v3.Authority.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\372B\004r\002\020\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=100,
serialized_end=134,
)
DESCRIPTOR.message_types_by_name['Authority'] = _AUTHORITY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Authority = _reflection.GeneratedProtocolMessageType('Authority', (_message.Message,), {
'DESCRIPTOR' : _AUTHORITY,
'__module__' : 'xds.core.v3.authority_pb2'
# @@protoc_insertion_point(class_scope:xds.core.v3.Authority)
})
_sym_db.RegisterMessage(Authority)
DESCRIPTOR._options = None
_AUTHORITY.fields_by_name['name']._options = None
# @@protoc_insertion_point(module_scope) | 0.317744 | 0.10683 |
from abc import ABC, abstractmethod
from wsqluse.wsqluse import Wsqluse
from gc_qdk.main import GCoreQDK
class WTAS(ABC, GCoreQDK):
"""
WServer To AR Sender.
Абстрактный, основной класс, с которого наследуют иные классы, занимающиеся
отправкой данных на AR.
"""
def __init__(self, polygon_ip, polygon_port, *args, **kwargs):
"""
Инициализация.
:param name: Имя обработчика.
:param table_name: Название таблицы в базе данных GDB.
:param ar_method: Какой метод AR должен вернуть ответ.
"""
super().__init__(polygon_ip, polygon_port, *args, **kwargs)
self.make_connection()
@abstractmethod
def send(self, *args, **kwargs):
""" Отправить данные на AR. """
pass
def get(self, *args, **kwargs):
""" Обработать ответ от AR. """
response = self.get_data()
return response
class WTADB(ABC, Wsqluse):
"""
WServer To AR Data Base.
Абстрактный, основной класс, с которого наследуют иные классы, занимающиеся
обработкой ответов от AR.
(Был отделен от WTAS, во имя следования принципу
Single Responsibility Principle)"""
def __init__(self, polygon_id, table_name, column_name,
*args, **kwargs):
"""
Инициация.
"""
super().__init__(*args, **kwargs)
self.table_name = table_name
self.column_name = column_name
self.polygon_id = polygon_id
def fetch_polygon_info(self):
"""
Вернуть всю информацию, необходимую для подключения к полигону по его ID
:return:
"""
command = "SELECT * FROM wta_connection_info WHERE polygon={}"
command = command.format(self.polygon_id)
response = self.get_table_dict(command)
if response['status'] == 'success':
return response['info'][0]
def mark_get(self, wdb_id, report_id):
"""
Обрабатывает полученные данные от AR.
:param wdb_id: ID в WDB, на стороне полигона.
:param report_id: ID отчета об отправке (как правило, это
таблица с именем {tablename}_send_reports).
:return: Результат работы
"""
command = "UPDATE {} SET get_time=now(), wdb_id={} WHERE id={}"
command = command.format(self.table_name, wdb_id, report_id)
response = self.try_execute(command)
return response
def mark_fail(self, info, report_id):
"""
Отметить провал отправки данных на AR.
:param info: Python Traceback, который вернул AR.
:param report_id: ID отчета.
:return:
"""
command = "UPDATE {} SET get_time=now(), additional='{}' WHERE id={}"
command = command.format(self.table_name, info, report_id)
response = self.try_execute(command)
return response
def mark_send(self, gdb_id):
"""
Сделать запись о том, что данные были отправлены.
:return: ID записи.
"""
command = "INSERT INTO {} ({}, polygon, send_time) VALUES ({}, {}, " \
"now())"
command = command.format(self.table_name, self.column_name, gdb_id,
self.polygon_id)
response = self.try_execute(command)
if response['status'] == 'success':
return response['info'][0][0] | wtas/main.py | from abc import ABC, abstractmethod
from wsqluse.wsqluse import Wsqluse
from gc_qdk.main import GCoreQDK
class WTAS(ABC, GCoreQDK):
"""
WServer To AR Sender.
Абстрактный, основной класс, с которого наследуют иные классы, занимающиеся
отправкой данных на AR.
"""
def __init__(self, polygon_ip, polygon_port, *args, **kwargs):
"""
Инициализация.
:param name: Имя обработчика.
:param table_name: Название таблицы в базе данных GDB.
:param ar_method: Какой метод AR должен вернуть ответ.
"""
super().__init__(polygon_ip, polygon_port, *args, **kwargs)
self.make_connection()
@abstractmethod
def send(self, *args, **kwargs):
""" Отправить данные на AR. """
pass
def get(self, *args, **kwargs):
""" Обработать ответ от AR. """
response = self.get_data()
return response
class WTADB(ABC, Wsqluse):
"""
WServer To AR Data Base.
Абстрактный, основной класс, с которого наследуют иные классы, занимающиеся
обработкой ответов от AR.
(Был отделен от WTAS, во имя следования принципу
Single Responsibility Principle)"""
def __init__(self, polygon_id, table_name, column_name,
*args, **kwargs):
"""
Инициация.
"""
super().__init__(*args, **kwargs)
self.table_name = table_name
self.column_name = column_name
self.polygon_id = polygon_id
def fetch_polygon_info(self):
"""
Вернуть всю информацию, необходимую для подключения к полигону по его ID
:return:
"""
command = "SELECT * FROM wta_connection_info WHERE polygon={}"
command = command.format(self.polygon_id)
response = self.get_table_dict(command)
if response['status'] == 'success':
return response['info'][0]
def mark_get(self, wdb_id, report_id):
"""
Обрабатывает полученные данные от AR.
:param wdb_id: ID в WDB, на стороне полигона.
:param report_id: ID отчета об отправке (как правило, это
таблица с именем {tablename}_send_reports).
:return: Результат работы
"""
command = "UPDATE {} SET get_time=now(), wdb_id={} WHERE id={}"
command = command.format(self.table_name, wdb_id, report_id)
response = self.try_execute(command)
return response
def mark_fail(self, info, report_id):
"""
Отметить провал отправки данных на AR.
:param info: Python Traceback, который вернул AR.
:param report_id: ID отчета.
:return:
"""
command = "UPDATE {} SET get_time=now(), additional='{}' WHERE id={}"
command = command.format(self.table_name, info, report_id)
response = self.try_execute(command)
return response
def mark_send(self, gdb_id):
"""
Сделать запись о том, что данные были отправлены.
:return: ID записи.
"""
command = "INSERT INTO {} ({}, polygon, send_time) VALUES ({}, {}, " \
"now())"
command = command.format(self.table_name, self.column_name, gdb_id,
self.polygon_id)
response = self.try_execute(command)
if response['status'] == 'success':
return response['info'][0][0] | 0.605916 | 0.253145 |
import sqlalchemy
from sqlalchemy.orm import sessionmaker
from pathlib import Path
from typing import Optional
from sqlalchemy.orm import Session
from sqlalchemy.future.engine import Engine
from models.model_base import ModelBase
__engine: Optional[Engine] = None
def create_engine(sqlite: bool = False) -> Engine:
"""This function will create the engine for our database
Args:
sqlite (bool, optional): if sqlite = True,
means the user want to use SQLite as default. Defaults to False.
"""
global __engine
if __engine:
return
if sqlite:
file_db = 'db/picoles.sqlite'
folder = Path(file_db).parent
folder.mkdir(parents=True, exist_ok=True)
conn_str = f'sqlite:///{file_db}'
__engine = sqlalchemy.create_engine(
url=conn_str, echo=False, connect_args={'check_same_thread': False}
)
else:
# Essa conn_str pode colocar em arquivo .env para não ocorrer ataques
# admin é usuario do banco, 1234 é a senha
# o número 5432 é a porta do banco de dados
conn_str = 'postgresql://admin:1234@localhost:5432/picoles'
__engine = sqlalchemy.create_engine(url=conn_str, echo=False)
return __engine
def create_session() -> Session:
"""This function will create a session in our database
Returns:
Session: database session
"""
global __engine
if not __engine:
# Caso for usar o SQLite como padrão, chamar a função dessa forma:
# create_engine(sqlite=True)
# Do jeto que está, estamos usando o postgresql como padrão.
create_engine()
__session = sessionmaker(
__engine,
expire_on_commit=False,
class_=Session,
)
session: Session = __session()
return session
def create_tables() -> None:
global __engine
if not __engine:
# Caso for usar o SQLite como padrão, chamar a função dessa forma:
# create_engine(sqlite=True)
# Do jeto que está, estamos usando o postgresql como padrão.
create_engine()
import models.__all_models
ModelBase.metadata.drop_all(__engine)
ModelBase.metadata.create_all(__engine) | src/sqlalchemy/03sqla_sync/conf/db_session.py | import sqlalchemy
from sqlalchemy.orm import sessionmaker
from pathlib import Path
from typing import Optional
from sqlalchemy.orm import Session
from sqlalchemy.future.engine import Engine
from models.model_base import ModelBase
__engine: Optional[Engine] = None
def create_engine(sqlite: bool = False) -> Engine:
"""This function will create the engine for our database
Args:
sqlite (bool, optional): if sqlite = True,
means the user want to use SQLite as default. Defaults to False.
"""
global __engine
if __engine:
return
if sqlite:
file_db = 'db/picoles.sqlite'
folder = Path(file_db).parent
folder.mkdir(parents=True, exist_ok=True)
conn_str = f'sqlite:///{file_db}'
__engine = sqlalchemy.create_engine(
url=conn_str, echo=False, connect_args={'check_same_thread': False}
)
else:
# Essa conn_str pode colocar em arquivo .env para não ocorrer ataques
# admin é usuario do banco, 1234 é a senha
# o número 5432 é a porta do banco de dados
conn_str = 'postgresql://admin:1234@localhost:5432/picoles'
__engine = sqlalchemy.create_engine(url=conn_str, echo=False)
return __engine
def create_session() -> Session:
"""This function will create a session in our database
Returns:
Session: database session
"""
global __engine
if not __engine:
# Caso for usar o SQLite como padrão, chamar a função dessa forma:
# create_engine(sqlite=True)
# Do jeto que está, estamos usando o postgresql como padrão.
create_engine()
__session = sessionmaker(
__engine,
expire_on_commit=False,
class_=Session,
)
session: Session = __session()
return session
def create_tables() -> None:
global __engine
if not __engine:
# Caso for usar o SQLite como padrão, chamar a função dessa forma:
# create_engine(sqlite=True)
# Do jeto que está, estamos usando o postgresql como padrão.
create_engine()
import models.__all_models
ModelBase.metadata.drop_all(__engine)
ModelBase.metadata.create_all(__engine) | 0.781997 | 0.197444 |
import sys
import time
from django.db.backends.base.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = '<PASSWORD>'
class DatabaseCreation(BaseDatabaseCreation):
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': <PASSWORD>,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
self.connection.settings_dict['PASSWORD'] = <PASSWORD>
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': <PASSWORD>,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
def set_autocommit(self):
self.connection.connection.autocommit = True
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
elif f.has_default():
field_output.append(style.SQL_KEYWORD('DEFAULT'))
field_output.append("'%s'" % f.get_default())
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output) - 1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references | django_dmPython/src/django_dmPython/creation.py | import sys
import time
from django.db.backends.base.creation import BaseDatabaseCreation
TEST_DATABASE_PREFIX = 'test_'
PASSWORD = '<PASSWORD>'
class DatabaseCreation(BaseDatabaseCreation):
def _create_test_db(self, verbosity=1, autoclobber=False):
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': <PASSWORD>,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
if self._test_database_create():
try:
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test database: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test database, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_NAME)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test database '%s'..." % self.connection.alias)
self._execute_test_db_destruction(cursor, parameters, verbosity)
self._execute_test_db_creation(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test database: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
if self._test_user_create():
if verbosity >= 1:
print("Creating test user...")
try:
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error creating the test user: %s\n" % e)
if not autoclobber:
confirm = input("It appears the test user, %s, already exists. Type 'yes' to delete it, or 'no' to cancel: " % TEST_USER)
if autoclobber or confirm == 'yes':
try:
if verbosity >= 1:
print("Destroying old test user...")
self._destroy_test_user(cursor, parameters, verbosity)
if verbosity >= 1:
print("Creating test user...")
self._create_test_user(cursor, parameters, verbosity)
except Exception as e:
sys.stderr.write("Got an error recreating the test user: %s\n" % e)
sys.exit(2)
else:
print("Tests cancelled.")
sys.exit(1)
self.connection.settings_dict['SAVED_USER'] = self.connection.settings_dict['USER']
self.connection.settings_dict['SAVED_PASSWORD'] = self.connection.settings_dict['PASSWORD']
self.connection.settings_dict['TEST_USER'] = self.connection.settings_dict['USER'] = TEST_USER
self.connection.settings_dict['PASSWORD'] = <PASSWORD>
return self.connection.settings_dict['NAME']
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
"""
TEST_NAME = self._test_database_name()
TEST_USER = self._test_database_user()
TEST_PASSWD = self._test_database_passwd()
TEST_TBLSPACE = self._test_database_tblspace()
TEST_TBLSPACE_TMP = self._test_database_tblspace_tmp()
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
parameters = {
'dbname': TEST_NAME,
'user': TEST_USER,
'password': <PASSWORD>,
'tblspace': TEST_TBLSPACE,
'tblspace_temp': TEST_TBLSPACE_TMP,
}
cursor = self.connection.cursor()
time.sleep(1) # To avoid "database is being accessed by other users" errors.
if self._test_user_create():
if verbosity >= 1:
print('Destroying test user...')
self._destroy_test_user(cursor, parameters, verbosity)
if self._test_database_create():
if verbosity >= 1:
print('Destroying test database tables...')
self._execute_test_db_destruction(cursor, parameters, verbosity)
self.connection.close()
def _execute_test_db_creation(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_db(): dbname = %s" % parameters['dbname'])
statements = [
"""CREATE TABLESPACE %(tblspace)s
DATAFILE '%(tblspace)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 200M
""",
"""CREATE TEMPORARY TABLESPACE %(tblspace_temp)s
TEMPFILE '%(tblspace_temp)s.dbf' SIZE 20M
REUSE AUTOEXTEND ON NEXT 10M MAXSIZE 100M
""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _create_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_create_test_user(): username = %s" % parameters['user'])
statements = [
"""CREATE USER %(user)s
IDENTIFIED BY %(password)s
DEFAULT TABLESPACE %(tblspace)s
TEMPORARY TABLESPACE %(tblspace_temp)s
QUOTA UNLIMITED ON %(tblspace)s
""",
"""GRANT CONNECT, RESOURCE TO %(user)s""",
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_test_db_destruction(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_execute_test_db_destruction(): dbname=%s" % parameters['dbname'])
statements = [
'DROP TABLESPACE %(tblspace)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
'DROP TABLESPACE %(tblspace_temp)s INCLUDING CONTENTS AND DATAFILES CASCADE CONSTRAINTS',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _destroy_test_user(self, cursor, parameters, verbosity):
if verbosity >= 2:
print("_destroy_test_user(): user=%s" % parameters['user'])
print("Be patient. This can take some time...")
statements = [
'DROP USER %(user)s CASCADE',
]
self._execute_statements(cursor, statements, parameters, verbosity)
def _execute_statements(self, cursor, statements, parameters, verbosity):
for template in statements:
stmt = template % parameters
if verbosity >= 2:
print(stmt)
try:
cursor.execute(stmt)
except Exception as err:
sys.stderr.write("Failed (%s)\n" % (err))
raise
def _test_database_name(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_NAME']:
name = self.connection.settings_dict['TEST_NAME']
except AttributeError:
pass
return name
def _test_database_create(self):
return self.connection.settings_dict.get('TEST_CREATE', True)
def _test_user_create(self):
return self.connection.settings_dict.get('TEST_USER_CREATE', True)
def _test_database_user(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['USER']
try:
if self.connection.settings_dict['TEST_USER']:
name = self.connection.settings_dict['TEST_USER']
except KeyError:
pass
return name
def _test_database_passwd(self):
name = PASSWORD
try:
if self.connection.settings_dict['TEST_PASSWD']:
name = self.connection.settings_dict['TEST_PASSWD']
except KeyError:
pass
return name
def _test_database_tblspace(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME']
try:
if self.connection.settings_dict['TEST_TBLSPACE']:
name = self.connection.settings_dict['TEST_TBLSPACE']
except KeyError:
pass
return name
def _test_database_tblspace_tmp(self):
name = TEST_DATABASE_PREFIX + self.connection.settings_dict['NAME'] + '_temp'
try:
if self.connection.settings_dict['TEST_TBLSPACE_TMP']:
name = self.connection.settings_dict['TEST_TBLSPACE_TMP']
except KeyError:
pass
return name
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']
def test_db_signature(self):
settings_dict = self.connection.settings_dict
return (
settings_dict['HOST'],
settings_dict['PORT'],
settings_dict['ENGINE'],
settings_dict['NAME'],
self._test_database_user(),
)
def set_autocommit(self):
self.connection.connection.autocommit = True
def sql_create_model(self, model, style, known_models=set()):
"""
Returns the SQL required to create a single model, as a tuple of:
(list_of_sql, pending_references_dict)
"""
opts = model._meta
if not opts.managed or opts.proxy or opts.swapped:
return [], {}
final_output = []
table_output = []
pending_references = {}
qn = self.connection.ops.quote_name
for f in opts.local_fields:
col_type = f.db_type(connection=self.connection)
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
# Skip ManyToManyFields, because they're not represented as
# database columns in this table.
continue
# Make the definition (e.g. 'foo VARCHAR(30)') for this field.
field_output = [style.SQL_FIELD(qn(f.column)),
style.SQL_COLTYPE(col_type)]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
null = f.null
if (f.empty_strings_allowed and not f.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if not null:
field_output.append(style.SQL_KEYWORD('NOT NULL'))
if f.primary_key:
field_output.append(style.SQL_KEYWORD('PRIMARY KEY'))
elif f.unique:
field_output.append(style.SQL_KEYWORD('UNIQUE'))
elif f.has_default():
field_output.append(style.SQL_KEYWORD('DEFAULT'))
field_output.append("'%s'" % f.get_default())
if tablespace and f.unique:
# We must specify the index tablespace inline, because we
# won't be generating a CREATE INDEX statement for this field.
tablespace_sql = self.connection.ops.tablespace_sql(
tablespace, inline=True)
if tablespace_sql:
field_output.append(tablespace_sql)
if f.rel:
ref_output, pending = self.sql_for_inline_foreign_key_references(
f, known_models, style)
if pending:
pending_references.setdefault(f.rel.to, []).append(
(model, f))
else:
field_output.extend(ref_output)
table_output.append(' '.join(field_output))
for field_constraints in opts.unique_together:
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' %
", ".join(
[style.SQL_FIELD(qn(opts.get_field(f).column))
for f in field_constraints]))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' +
style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
full_statement.append(
' %s%s' % (line, i < len(table_output) - 1 and ',' or ''))
full_statement.append(')')
if opts.db_tablespace:
tablespace_sql = self.connection.ops.tablespace_sql(
opts.db_tablespace)
if tablespace_sql:
full_statement.append(tablespace_sql)
full_statement.append(';')
final_output.append('\n'.join(full_statement))
if opts.has_auto_field:
# Add any extra SQL needed to support auto-incrementing primary
# keys.
auto_column = opts.auto_field.db_column or opts.auto_field.name
autoinc_sql = self.connection.ops.autoinc_sql(opts.db_table,
auto_column)
if autoinc_sql:
for stmt in autoinc_sql:
final_output.append(stmt)
return final_output, pending_references | 0.239705 | 0.111169 |
import asyncio
from datetime import datetime
import logging
from OSMPythonTools.nominatim import Nominatim
from googlemaps import Client
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__package__)
Coordinates = tuple[float, float]
class JourneyApiClient:
"""API client for the OSM Nominatim and Google Travel Time APIs"""
def __init__(self, osm_username: str, gmaps_token: str) -> None:
self._osm_username = osm_username
self._gmaps_token = gmaps_token
self._gmaps_client = Client(gmaps_token, timeout=10)
self.nominatim = Nominatim(
userAgent=f"Journey Home Assistant Integration ({self._osm_username})"
)
def get_address(self, location: Coordinates):
"""
Get the address based on a (lat, long) tuple.
This function is used as a sync wrapper to the Nominatim API
"""
try:
result = self.nominatim.query(*location, reverse=True, zoom=16)
return result
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Failed to perform reverse geocoding - %s", exception)
return None
async def async_get_address(self, location: Coordinates):
return await asyncio.get_event_loop().run_in_executor(
None, self.get_address, location
)
def get_traveltime(self, origin: Coordinates, destination: Coordinates):
try:
result = self._gmaps_client.distance_matrix(
origins=[origin],
destinations=[destination],
mode="driving",
departure_time=datetime.now(),
)
return result["rows"][0]["elements"][0]
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Failed to get distances - %s", exception)
return None
async def async_get_traveltime(self, origin: Coordinates, destination: Coordinates):
return await asyncio.get_event_loop().run_in_executor(
None, self.get_traveltime, origin, destination
)
async def test_credentials(self) -> bool:
"""Check the Google Maps API credentials"""
def test_api():
try:
self._gmaps_client.distance_matrix(
origins=[(51.478, 0)], destinations=[(51.748, 0.02)], mode="driving"
)
except Exception as ex:
_LOGGER.error("Failed to validate credentials - %s", ex)
raise
return await asyncio.get_event_loop().run_in_executor(None, test_api) | custom_components/journey/api.py | import asyncio
from datetime import datetime
import logging
from OSMPythonTools.nominatim import Nominatim
from googlemaps import Client
TIMEOUT = 10
_LOGGER: logging.Logger = logging.getLogger(__package__)
Coordinates = tuple[float, float]
class JourneyApiClient:
"""API client for the OSM Nominatim and Google Travel Time APIs"""
def __init__(self, osm_username: str, gmaps_token: str) -> None:
self._osm_username = osm_username
self._gmaps_token = gmaps_token
self._gmaps_client = Client(gmaps_token, timeout=10)
self.nominatim = Nominatim(
userAgent=f"Journey Home Assistant Integration ({self._osm_username})"
)
def get_address(self, location: Coordinates):
"""
Get the address based on a (lat, long) tuple.
This function is used as a sync wrapper to the Nominatim API
"""
try:
result = self.nominatim.query(*location, reverse=True, zoom=16)
return result
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Failed to perform reverse geocoding - %s", exception)
return None
async def async_get_address(self, location: Coordinates):
return await asyncio.get_event_loop().run_in_executor(
None, self.get_address, location
)
def get_traveltime(self, origin: Coordinates, destination: Coordinates):
try:
result = self._gmaps_client.distance_matrix(
origins=[origin],
destinations=[destination],
mode="driving",
departure_time=datetime.now(),
)
return result["rows"][0]["elements"][0]
except Exception as exception: # pylint: disable=broad-except
_LOGGER.error("Failed to get distances - %s", exception)
return None
async def async_get_traveltime(self, origin: Coordinates, destination: Coordinates):
return await asyncio.get_event_loop().run_in_executor(
None, self.get_traveltime, origin, destination
)
async def test_credentials(self) -> bool:
"""Check the Google Maps API credentials"""
def test_api():
try:
self._gmaps_client.distance_matrix(
origins=[(51.478, 0)], destinations=[(51.748, 0.02)], mode="driving"
)
except Exception as ex:
_LOGGER.error("Failed to validate credentials - %s", ex)
raise
return await asyncio.get_event_loop().run_in_executor(None, test_api) | 0.620277 | 0.208945 |
import json
from pathlib import Path
import bids
from bids import BIDSLayout
bids.config.set_option("extension_initial_dot", True)
pattern = (
"sub-{subject}[/ses-{session}]/{datatype<anat|dwi>}/sub-{subject}"
"[_ses-{session}][_acq-{acquisition}][_dir-{direction}][_run-{run}]"
"_{suffix<T[12]w|dwi>}{extension<.bval|.bvec|.json|.nii.gz>|.nii.gz}"
)
dir_pattern = "sub-{subject}[/ses-{session}]/{datatype<anat|dwi>}/"
modality_dict = dict(Diffusion="dwi", T1w_MPR1="anat")
run_dict = dict(dir95="1", dir96="2", dir97="3")
def _mkdir(layout, subject, modality, include_ses):
entities = dict(subject=subject, datatype=modality_dict[modality])
if include_ses:
entities["session"] = "1"
dir_name = Path(layout.build_path(entities, dir_pattern, validate=False))
dir_name.mkdir(parents=True, exist_ok=True)
def convert(input_path, output_path, include_ses=False):
in_path = Path(input_path)
if not in_path.is_dir():
msg = f"{input_path} is not a valid directory."
raise ValueError(msg)
# Make output path
out_path = Path(output_path)
if not out_path.is_dir():
out_path.mkdir(parents=True)
# Generate dataset_description.json
data = dict(Name="hcp", BIDSVersion="1.4.0", DatasetType="raw")
with open(out_path / "dataset_description.json", "w") as f:
json.dump(data, f)
layout = BIDSLayout(out_path.absolute())
# Iterate through each subject folder
subject_folders = [x for x in in_path.iterdir() if x.is_dir()]
for subject_folder in subject_folders:
if not (subject_folder / "unprocessed/3T/").is_dir():
continue
# 6 digit sub id in str form
subject = subject_folder.name
modality_folders = [
x for x in (subject_folder / "unprocessed/3T/").iterdir() if x.is_dir()
]
for modality_folder in modality_folders:
modality = modality_folder.name
# Make bids output folders
_mkdir(layout, subject, modality, include_ses)
if modality == "T1w_MPR1":
entities = dict(
subject=subject,
datatype=modality_dict[modality],
extension=".nii.gz",
suffix="T1w",
)
if include_ses:
entities["session"] = "1"
new_fname = layout.build_path(entities, pattern)
# Rename old files
old_fname = list(modality_folder.iterdir())[0]
old_fname.rename(new_fname)
elif modality == "Diffusion":
for fname in modality_folder.iterdir():
splits = fname.name.split(".")
extension = "." + splits[-1] # Get extension
if extension == ".gz":
extension = ".nii.gz"
splits = splits[0].split("_")
direction = splits[-1] # Direction. RL or LR
run = run_dict[splits[-2]] # Run number
entities = dict(
subject=subject,
datatype=modality_dict[modality],
direction=direction,
run=run,
extension=extension,
suffix="dwi",
)
if include_ses:
entities["session"] = "1"
new_fname = layout.build_path(entities, pattern)
Path(fname).rename(new_fname)
# Make json sidecar
if extension == ".nii.gz":
entities["extension"] = ".json"
if direction == "LR":
phase = "i-"
elif direction == "RL":
phase = "i"
# TotalReadoutTime = EffectiveEchoSpacing * (EPI factor - 1) (which is 144)
sidecar = dict(
EffectiveEchoSpacing=0.00078,
TotalReadoutTime=0.11154,
PhaseEncodingDirection=phase,
)
with open(layout.build_path(entities, pattern), "w") as f:
json.dump(sidecar, f)
# Remove all folders
modality_folder.rmdir()
for folder in list(subject_folder.rglob("*"))[::-1]:
folder.rmdir()
subject_folder.rmdir()
if not input_path == output_path:
in_path.rmdir() | hcp2bids/convert.py | import json
from pathlib import Path
import bids
from bids import BIDSLayout
bids.config.set_option("extension_initial_dot", True)
pattern = (
"sub-{subject}[/ses-{session}]/{datatype<anat|dwi>}/sub-{subject}"
"[_ses-{session}][_acq-{acquisition}][_dir-{direction}][_run-{run}]"
"_{suffix<T[12]w|dwi>}{extension<.bval|.bvec|.json|.nii.gz>|.nii.gz}"
)
dir_pattern = "sub-{subject}[/ses-{session}]/{datatype<anat|dwi>}/"
modality_dict = dict(Diffusion="dwi", T1w_MPR1="anat")
run_dict = dict(dir95="1", dir96="2", dir97="3")
def _mkdir(layout, subject, modality, include_ses):
entities = dict(subject=subject, datatype=modality_dict[modality])
if include_ses:
entities["session"] = "1"
dir_name = Path(layout.build_path(entities, dir_pattern, validate=False))
dir_name.mkdir(parents=True, exist_ok=True)
def convert(input_path, output_path, include_ses=False):
in_path = Path(input_path)
if not in_path.is_dir():
msg = f"{input_path} is not a valid directory."
raise ValueError(msg)
# Make output path
out_path = Path(output_path)
if not out_path.is_dir():
out_path.mkdir(parents=True)
# Generate dataset_description.json
data = dict(Name="hcp", BIDSVersion="1.4.0", DatasetType="raw")
with open(out_path / "dataset_description.json", "w") as f:
json.dump(data, f)
layout = BIDSLayout(out_path.absolute())
# Iterate through each subject folder
subject_folders = [x for x in in_path.iterdir() if x.is_dir()]
for subject_folder in subject_folders:
if not (subject_folder / "unprocessed/3T/").is_dir():
continue
# 6 digit sub id in str form
subject = subject_folder.name
modality_folders = [
x for x in (subject_folder / "unprocessed/3T/").iterdir() if x.is_dir()
]
for modality_folder in modality_folders:
modality = modality_folder.name
# Make bids output folders
_mkdir(layout, subject, modality, include_ses)
if modality == "T1w_MPR1":
entities = dict(
subject=subject,
datatype=modality_dict[modality],
extension=".nii.gz",
suffix="T1w",
)
if include_ses:
entities["session"] = "1"
new_fname = layout.build_path(entities, pattern)
# Rename old files
old_fname = list(modality_folder.iterdir())[0]
old_fname.rename(new_fname)
elif modality == "Diffusion":
for fname in modality_folder.iterdir():
splits = fname.name.split(".")
extension = "." + splits[-1] # Get extension
if extension == ".gz":
extension = ".nii.gz"
splits = splits[0].split("_")
direction = splits[-1] # Direction. RL or LR
run = run_dict[splits[-2]] # Run number
entities = dict(
subject=subject,
datatype=modality_dict[modality],
direction=direction,
run=run,
extension=extension,
suffix="dwi",
)
if include_ses:
entities["session"] = "1"
new_fname = layout.build_path(entities, pattern)
Path(fname).rename(new_fname)
# Make json sidecar
if extension == ".nii.gz":
entities["extension"] = ".json"
if direction == "LR":
phase = "i-"
elif direction == "RL":
phase = "i"
# TotalReadoutTime = EffectiveEchoSpacing * (EPI factor - 1) (which is 144)
sidecar = dict(
EffectiveEchoSpacing=0.00078,
TotalReadoutTime=0.11154,
PhaseEncodingDirection=phase,
)
with open(layout.build_path(entities, pattern), "w") as f:
json.dump(sidecar, f)
# Remove all folders
modality_folder.rmdir()
for folder in list(subject_folder.rglob("*"))[::-1]:
folder.rmdir()
subject_folder.rmdir()
if not input_path == output_path:
in_path.rmdir() | 0.348423 | 0.202226 |
import sys
import pprint
import smtplib
import time
import uuid
from email.mime.text import MIMEText
from threading import Thread, Event
DEBUG = True
class Sender():
# TODO: Private, underscore
args = None
db = {}
dur = 10
emails = 10
def __init__(self, host, port):
self.init_db()
self.host = host
self.port = port
self.stopped = Event()
self.thread = Thread(target = self.send)
self.thread.daemon = True
def send(self):
delay = self.dur / self.emails
while not self.stopped.wait(delay):
avail = filter(lambda x: not x[1]['sent'], self.db.items())
if len(avail) > 0:
(ident, det) = avail.pop()
msg = det['msg']
if DEBUG:
print >>sys.stderr, "DEBUG: Sending email {0}".format(ident)
try:
sender = smtplib.SMTP(self.host, self.port)
sender.sendmail(msg['From'], msg['To'], msg.as_string())
sender.quit()
if DEBUG:
print >>sys.stderr, "SEND SUCCESS: {0}".format(ident)
self.db[ident]['sent'] = True
except:
if DEBUG:
print >>sys.stderr, "SEND FAILURE: {0}".format(ident)
def duration(self, d):
sent = len(filter(lambda x: x['sent'], self.db.values()))
if sent > 0:
return False
try:
self.dur = int(d)
except:
raise ValueError("What the hell is this: {0} of type {1}".format(d, type(d)))
return True
def get_db(self):
return self.db
def get_duration(self):
return self.dur
def get_from(self):
return '<EMAIL>'
def get_limit(self):
return str(len(self.db.items()))
def get_sent(self):
return str(len(filter(lambda x: x[1]['sent'], self.db.items())))
def get_subject(self, ident):
return "Generated test email {0}".format(ident)
def get_to(self):
return '<EMAIL>'
def init_db(self, num = 0):
while num < self.emails:
key = format(uuid.uuid4())
msg = MIMEText(key)
msg['From'] = self.get_from()
msg['To'] = self.get_to()
msg['Subject'] = self.get_subject(num)
value = {
'msg': msg,
'sent': False,
'received': False,
}
num += 1
self.db[key] = value
def limit(self, msgs):
sent = len(filter(lambda x: x['sent'], self.db.values()))
num = len(self.db.values())
if sent > 0:
return False
try:
if int(msgs) > self.emails:
self.emails = int(msgs)
self.init_db(num)
elif int(msgs) < self.emails:
newdb = { k: self.db[k] for k in self.db.keys()[0:int(msgs)] }
self.db = newdb
except:
raise ValueError("What the hell is this: {0} of type {1}".format(msgs, type(msgs)))
return True
# if msgs < len(lambda x: x['sent'], self.db):
# # TODO: Set message feedback
# return
#
#
# # TODO: stop sending and reset db to new limit
# self.emails = msgs
def running(self):
return not self.stopped.is_set()
def start(self):
self.thread.start()
def status(self):
return "running" if self.thread.is_alive() else "stopped"
def stop(self):
self.stopped.set()
if __name__ == "__main__":
s = Sender('localhost', 2255)
s.start()
while s.running():
print "DEBUG: Waiting for sends to finish {0}".format(len(filter(lambda e: not e['sent'], s.get_db().values())))
time.sleep(2)
if len(filter(lambda e: not e['sent'], s.get_db().values())) == 0:
s.stop() | sender.py |
import sys
import pprint
import smtplib
import time
import uuid
from email.mime.text import MIMEText
from threading import Thread, Event
DEBUG = True
class Sender():
# TODO: Private, underscore
args = None
db = {}
dur = 10
emails = 10
def __init__(self, host, port):
self.init_db()
self.host = host
self.port = port
self.stopped = Event()
self.thread = Thread(target = self.send)
self.thread.daemon = True
def send(self):
delay = self.dur / self.emails
while not self.stopped.wait(delay):
avail = filter(lambda x: not x[1]['sent'], self.db.items())
if len(avail) > 0:
(ident, det) = avail.pop()
msg = det['msg']
if DEBUG:
print >>sys.stderr, "DEBUG: Sending email {0}".format(ident)
try:
sender = smtplib.SMTP(self.host, self.port)
sender.sendmail(msg['From'], msg['To'], msg.as_string())
sender.quit()
if DEBUG:
print >>sys.stderr, "SEND SUCCESS: {0}".format(ident)
self.db[ident]['sent'] = True
except:
if DEBUG:
print >>sys.stderr, "SEND FAILURE: {0}".format(ident)
def duration(self, d):
sent = len(filter(lambda x: x['sent'], self.db.values()))
if sent > 0:
return False
try:
self.dur = int(d)
except:
raise ValueError("What the hell is this: {0} of type {1}".format(d, type(d)))
return True
def get_db(self):
return self.db
def get_duration(self):
return self.dur
def get_from(self):
return '<EMAIL>'
def get_limit(self):
return str(len(self.db.items()))
def get_sent(self):
return str(len(filter(lambda x: x[1]['sent'], self.db.items())))
def get_subject(self, ident):
return "Generated test email {0}".format(ident)
def get_to(self):
return '<EMAIL>'
def init_db(self, num = 0):
while num < self.emails:
key = format(uuid.uuid4())
msg = MIMEText(key)
msg['From'] = self.get_from()
msg['To'] = self.get_to()
msg['Subject'] = self.get_subject(num)
value = {
'msg': msg,
'sent': False,
'received': False,
}
num += 1
self.db[key] = value
def limit(self, msgs):
sent = len(filter(lambda x: x['sent'], self.db.values()))
num = len(self.db.values())
if sent > 0:
return False
try:
if int(msgs) > self.emails:
self.emails = int(msgs)
self.init_db(num)
elif int(msgs) < self.emails:
newdb = { k: self.db[k] for k in self.db.keys()[0:int(msgs)] }
self.db = newdb
except:
raise ValueError("What the hell is this: {0} of type {1}".format(msgs, type(msgs)))
return True
# if msgs < len(lambda x: x['sent'], self.db):
# # TODO: Set message feedback
# return
#
#
# # TODO: stop sending and reset db to new limit
# self.emails = msgs
def running(self):
return not self.stopped.is_set()
def start(self):
self.thread.start()
def status(self):
return "running" if self.thread.is_alive() else "stopped"
def stop(self):
self.stopped.set()
if __name__ == "__main__":
s = Sender('localhost', 2255)
s.start()
while s.running():
print "DEBUG: Waiting for sends to finish {0}".format(len(filter(lambda e: not e['sent'], s.get_db().values())))
time.sleep(2)
if len(filter(lambda e: not e['sent'], s.get_db().values())) == 0:
s.stop() | 0.128676 | 0.109825 |
import unittest
import os
import tensorflow as tf
import gpflow
from testing.gpflow_testcase import GPflowTestCase
class TestConfigParsing(GPflowTestCase):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
f = os.path.join(directory, 'gpflowrc_test.txt')
self.conf = gpflow._settings.read_config_file(f)
self.settings = gpflow._settings.namedtuplify(self.conf._sections)
def test(self):
self.assertTrue(all([
self.settings.first_section.a_bool is False,
self.settings.first_section.a_float == 1e-3,
self.settings.first_section.a_string == 'hello',
self.settings.first_section.a_type is tf.float64,
self.settings.second_section.a_bool is True,
self.settings.second_section.another_bool is True,
self.settings.second_section.yet_another_bool is False]))
def test_config_not_found(self):
"""GPflow config cannot be found."""
filename = "./config_not_found.txt"
self.assertRaises(RuntimeError, gpflow._settings.read_config_file, filename)
def test_parser(self):
with self.assertRaises(ValueError):
gpflow._settings.parse(None)
with self.assertRaises(ValueError):
gpflow._settings.parse(12)
with self.assertRaises(ValueError):
gpflow._settings.parse([])
self.assertTrue(gpflow._settings.parse('false') is False)
self.assertTrue(gpflow._settings.parse('False') is False)
self.assertTrue(gpflow._settings.parse('true') is True)
self.assertTrue(gpflow._settings.parse('True') is True)
self.assertTrue(gpflow._settings.parse('int32') is tf.int32)
self.assertTrue(gpflow._settings.parse('32') is 32)
self.assertTrue(gpflow._settings.parse('32.') == 32.)
self.assertTrue(gpflow._settings.parse('int') == 'int')
self.assertTrue(gpflow._settings.parse('hello') == 'hello')
self.assertTrue(gpflow._settings.parse('1E2') == 1e2)
self.assertTrue(gpflow._settings.parse('1e-9') == 1e-9)
class TestSettingsManager(GPflowTestCase):
def testRaises(self):
with self.assertRaises(AttributeError):
gpflow.settings.undefined_setting_to_raise_error
def testMutability(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
gpflow.settings.verbosity.hmc_verb = True
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
def testContextManager(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = True
config = gpflow.settings.get_settings()
config.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
with gpflow.settings.temp_settings(config):
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
if __name__ == "__main__":
unittest.main() | GPflow/testing/test_config.py | import unittest
import os
import tensorflow as tf
import gpflow
from testing.gpflow_testcase import GPflowTestCase
class TestConfigParsing(GPflowTestCase):
def setUp(self):
directory = os.path.dirname(os.path.realpath(__file__))
f = os.path.join(directory, 'gpflowrc_test.txt')
self.conf = gpflow._settings.read_config_file(f)
self.settings = gpflow._settings.namedtuplify(self.conf._sections)
def test(self):
self.assertTrue(all([
self.settings.first_section.a_bool is False,
self.settings.first_section.a_float == 1e-3,
self.settings.first_section.a_string == 'hello',
self.settings.first_section.a_type is tf.float64,
self.settings.second_section.a_bool is True,
self.settings.second_section.another_bool is True,
self.settings.second_section.yet_another_bool is False]))
def test_config_not_found(self):
"""GPflow config cannot be found."""
filename = "./config_not_found.txt"
self.assertRaises(RuntimeError, gpflow._settings.read_config_file, filename)
def test_parser(self):
with self.assertRaises(ValueError):
gpflow._settings.parse(None)
with self.assertRaises(ValueError):
gpflow._settings.parse(12)
with self.assertRaises(ValueError):
gpflow._settings.parse([])
self.assertTrue(gpflow._settings.parse('false') is False)
self.assertTrue(gpflow._settings.parse('False') is False)
self.assertTrue(gpflow._settings.parse('true') is True)
self.assertTrue(gpflow._settings.parse('True') is True)
self.assertTrue(gpflow._settings.parse('int32') is tf.int32)
self.assertTrue(gpflow._settings.parse('32') is 32)
self.assertTrue(gpflow._settings.parse('32.') == 32.)
self.assertTrue(gpflow._settings.parse('int') == 'int')
self.assertTrue(gpflow._settings.parse('hello') == 'hello')
self.assertTrue(gpflow._settings.parse('1E2') == 1e2)
self.assertTrue(gpflow._settings.parse('1e-9') == 1e-9)
class TestSettingsManager(GPflowTestCase):
def testRaises(self):
with self.assertRaises(AttributeError):
gpflow.settings.undefined_setting_to_raise_error
def testMutability(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
gpflow.settings.verbosity.hmc_verb = True
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
def testContextManager(self):
orig = gpflow.settings.verbosity.hmc_verb
gpflow.settings.verbosity.hmc_verb = True
config = gpflow.settings.get_settings()
config.verbosity.hmc_verb = False
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
with gpflow.settings.temp_settings(config):
self.assertTrue(gpflow.settings.verbosity.hmc_verb is False)
self.assertTrue(gpflow.settings.verbosity.hmc_verb is True)
gpflow.settings.verbosity.hmc_verb = orig
if __name__ == "__main__":
unittest.main() | 0.627267 | 0.539711 |
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from mainWindow import Ui_MainWindow
from UDP.UDP_Server import UDP_ServerThread
from UDP.UDP_Client import UDP_ClientThread
from worker import Worker
from TriDisplay import TriModel
from plot import Plot
from Utils.traces.trace import *
from constants import *
import datetime
import time
import queue
import numpy as np
class mainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
setVerbosity("debug")
#Button actions
self.ui.pushButton_en_server.clicked.connect(self.pushButton_serverEnable_onClicked)
self.ui.pushButton_en_client.clicked.connect(self.pushButton_clientEnable_onClicked)
self.ui.pushButton_chart_orientation.clicked.connect(self.pushButton_chartOrientation_onClicked)
self.ui.pushButton_3d_model.clicked.connect(self.pushButton_3D_Model_onClicked)
self.ui.pushButton_angle_set.clicked.connect(self.pushButton_angleSetPID_onClicked)
self.ui.pushButton_speed_set.clicked.connect(self.pushButton_speedSetPID_onClicked)
self.ui.pushButton_angle_zero.clicked.connect(self.pushButton_angleZeroPID_onClicked)
self.ui.pushButton_speed_zero.clicked.connect(self.pushButton_speedZeroPID_onClicked)
self.ui.pushButton_control_set.clicked.connect(self.pushButton_controlSet_onClicked)
#Initial value
self.ui.doubleSpinBox_angle_kp.setValue(ANGLE_KP_CONS)
self.ui.doubleSpinBox_angle_ki.setValue(ANGLE_KI_CONS)
self.ui.doubleSpinBox_angle_kd.setValue(ANGLE_KD_CONS)
self.ui.doubleSpinBox_angle_kp_Aggr.setValue(ANGLE_KP_AGGR)
self.ui.doubleSpinBox_angle_ki_Aggr.setValue(ANGLE_KI_AGGR)
self.ui.doubleSpinBox_angle_kd_Aggr.setValue(ANGLE_KD_AGGR)
self.ui.doubleSpinBox_angle_setpoint.setValue(CALIBRATED_ZERO_ANGLE)
self.ui.doubleSpinBox_angle_max.setValue(ANGLE_LIMIT)
self.ui.doubleSpinBox_speed_kp.setValue(SPEED_KP)
self.ui.doubleSpinBox_speed_ki.setValue(SPEED_KI)
self.ui.doubleSpinBox_speed_kd.setValue(SPEED_KD)
self.serverUDPQueue = queue.Queue(4)
self.threads = []
self.worker = None
self.clientUDP = None
self.serverUDP = None
def pushButton_serverEnable_onClicked(self):
#Create and start UDP server thread
port = int(self.ui.lineEdit_port_server.text())
if self.serverUDP != None and self.worker != None:
self.worker.terminate()
self.serverUDP.join(timeout=1)
self.worker = Worker(self)
self.serverUDP = UDP_ServerThread(name=SERVER_UDP_NAME, queue=self.serverUDPQueue, UDP_PORT=port)
self.serverUDP.daemon = True
self.threads.append(self.serverUDP)
self.serverUDP.start()
self.worker.start()
def pushButton_clientEnable_onClicked(self):
#Create and start UDP client thread
ip = self.ui.lineEdit_ip_client.text()
port = int(self.ui.lineEdit_port_client.text())
if self.clientUDP != None:
self.clientUDP.join(timeout=1)
self.clientUDP = UDP_ClientThread(name=CLIENT_UDP_NAME, UDP_IP=ip, UDP_PORT=port)
self.clientUDP.daemon = True
self.threads.append(self.clientUDP)
self.clientUDP.start()
def pushButton_chartOrientation_onClicked(self):
self.plot = Plot(self)
self.plot.start()
def pushButton_3D_Model_onClicked(self):
self.triModel = TriModel(self)
self.triModel.start()
def pushButton_angleSetPID_onClicked(self):
angleKpCons = self.ui.doubleSpinBox_angle_kp.value()
angleKiCons = self.ui.doubleSpinBox_angle_ki.value()
angleKdCons = self.ui.doubleSpinBox_angle_kd.value()
angleKpAggr = self.ui.doubleSpinBox_angle_kp_Aggr.value()
angleKiAggr = self.ui.doubleSpinBox_angle_ki_Aggr.value()
angleKdAggr = self.ui.doubleSpinBox_angle_kd_Aggr.value()
angleSetpoint = self.ui.doubleSpinBox_angle_setpoint.value()
angleMax = self.ui.doubleSpinBox_angle_max.value()
#(module),(data1)(data2),(data3)(...)(#)
msg = str(ANGLE_PID_CONS) + "," + \
str(angleKpCons) + "," + \
str(angleKiCons) + "," + \
str(angleKdCons) + "," + \
str(angleSetpoint) + "," + \
str(angleMax) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_speedSetPID_onClicked(self):
speedKpCons = self.ui.doubleSpinBox_speed_kp.value()
speedKiCons = self.ui.doubleSpinBox_speed_ki.value()
speedKdCons = self.ui.doubleSpinBox_speed_kd.value()
#(module),(data1)(data2),(data3)(...)(#)
msg = CMD_PID_SPEED + "," + \
str(speedKpCons) + "," + \
str(speedKiCons) + "," + \
str(speedKdCons) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_angleZeroPID_onClicked(self):
#(module),(data1)(data2),(data3)(...)(#)
msg = str(ANGLE_PID_CONS) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_speedZeroPID_onClicked(self):
#(module),(data1)(data2),(data3)(...)(#)
msg = CMD_PID_SPEED + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_controlSet_onClicked(self):
enableArduino = self.ui.checkBox_en_arduino.checkState()
print(enableArduino)
enableCV = self.ui.checkBox_en_cv.checkState()
print(enableCV)
#(module),(data1)(data2),(data3)(...)(#)
msg = str(STARTED) + "," + \
str(enableArduino) + "," + \
str(enableCV) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
myapp = mainWindow()
myapp.show()
sys.exit(app.exec_()) | GUI/WifiMonitor/UI.py | import sys
from PyQt5 import QtGui, QtCore, QtWidgets
import pyqtgraph as pg
from mainWindow import Ui_MainWindow
from UDP.UDP_Server import UDP_ServerThread
from UDP.UDP_Client import UDP_ClientThread
from worker import Worker
from TriDisplay import TriModel
from plot import Plot
from Utils.traces.trace import *
from constants import *
import datetime
import time
import queue
import numpy as np
class mainWindow(QtWidgets.QMainWindow):
def __init__(self, parent=None):
QtWidgets.QMainWindow.__init__(self, parent)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
setVerbosity("debug")
#Button actions
self.ui.pushButton_en_server.clicked.connect(self.pushButton_serverEnable_onClicked)
self.ui.pushButton_en_client.clicked.connect(self.pushButton_clientEnable_onClicked)
self.ui.pushButton_chart_orientation.clicked.connect(self.pushButton_chartOrientation_onClicked)
self.ui.pushButton_3d_model.clicked.connect(self.pushButton_3D_Model_onClicked)
self.ui.pushButton_angle_set.clicked.connect(self.pushButton_angleSetPID_onClicked)
self.ui.pushButton_speed_set.clicked.connect(self.pushButton_speedSetPID_onClicked)
self.ui.pushButton_angle_zero.clicked.connect(self.pushButton_angleZeroPID_onClicked)
self.ui.pushButton_speed_zero.clicked.connect(self.pushButton_speedZeroPID_onClicked)
self.ui.pushButton_control_set.clicked.connect(self.pushButton_controlSet_onClicked)
#Initial value
self.ui.doubleSpinBox_angle_kp.setValue(ANGLE_KP_CONS)
self.ui.doubleSpinBox_angle_ki.setValue(ANGLE_KI_CONS)
self.ui.doubleSpinBox_angle_kd.setValue(ANGLE_KD_CONS)
self.ui.doubleSpinBox_angle_kp_Aggr.setValue(ANGLE_KP_AGGR)
self.ui.doubleSpinBox_angle_ki_Aggr.setValue(ANGLE_KI_AGGR)
self.ui.doubleSpinBox_angle_kd_Aggr.setValue(ANGLE_KD_AGGR)
self.ui.doubleSpinBox_angle_setpoint.setValue(CALIBRATED_ZERO_ANGLE)
self.ui.doubleSpinBox_angle_max.setValue(ANGLE_LIMIT)
self.ui.doubleSpinBox_speed_kp.setValue(SPEED_KP)
self.ui.doubleSpinBox_speed_ki.setValue(SPEED_KI)
self.ui.doubleSpinBox_speed_kd.setValue(SPEED_KD)
self.serverUDPQueue = queue.Queue(4)
self.threads = []
self.worker = None
self.clientUDP = None
self.serverUDP = None
def pushButton_serverEnable_onClicked(self):
#Create and start UDP server thread
port = int(self.ui.lineEdit_port_server.text())
if self.serverUDP != None and self.worker != None:
self.worker.terminate()
self.serverUDP.join(timeout=1)
self.worker = Worker(self)
self.serverUDP = UDP_ServerThread(name=SERVER_UDP_NAME, queue=self.serverUDPQueue, UDP_PORT=port)
self.serverUDP.daemon = True
self.threads.append(self.serverUDP)
self.serverUDP.start()
self.worker.start()
def pushButton_clientEnable_onClicked(self):
#Create and start UDP client thread
ip = self.ui.lineEdit_ip_client.text()
port = int(self.ui.lineEdit_port_client.text())
if self.clientUDP != None:
self.clientUDP.join(timeout=1)
self.clientUDP = UDP_ClientThread(name=CLIENT_UDP_NAME, UDP_IP=ip, UDP_PORT=port)
self.clientUDP.daemon = True
self.threads.append(self.clientUDP)
self.clientUDP.start()
def pushButton_chartOrientation_onClicked(self):
self.plot = Plot(self)
self.plot.start()
def pushButton_3D_Model_onClicked(self):
self.triModel = TriModel(self)
self.triModel.start()
def pushButton_angleSetPID_onClicked(self):
angleKpCons = self.ui.doubleSpinBox_angle_kp.value()
angleKiCons = self.ui.doubleSpinBox_angle_ki.value()
angleKdCons = self.ui.doubleSpinBox_angle_kd.value()
angleKpAggr = self.ui.doubleSpinBox_angle_kp_Aggr.value()
angleKiAggr = self.ui.doubleSpinBox_angle_ki_Aggr.value()
angleKdAggr = self.ui.doubleSpinBox_angle_kd_Aggr.value()
angleSetpoint = self.ui.doubleSpinBox_angle_setpoint.value()
angleMax = self.ui.doubleSpinBox_angle_max.value()
#(module),(data1)(data2),(data3)(...)(#)
msg = str(ANGLE_PID_CONS) + "," + \
str(angleKpCons) + "," + \
str(angleKiCons) + "," + \
str(angleKdCons) + "," + \
str(angleSetpoint) + "," + \
str(angleMax) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_speedSetPID_onClicked(self):
speedKpCons = self.ui.doubleSpinBox_speed_kp.value()
speedKiCons = self.ui.doubleSpinBox_speed_ki.value()
speedKdCons = self.ui.doubleSpinBox_speed_kd.value()
#(module),(data1)(data2),(data3)(...)(#)
msg = CMD_PID_SPEED + "," + \
str(speedKpCons) + "," + \
str(speedKiCons) + "," + \
str(speedKdCons) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_angleZeroPID_onClicked(self):
#(module),(data1)(data2),(data3)(...)(#)
msg = str(ANGLE_PID_CONS) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_speedZeroPID_onClicked(self):
#(module),(data1)(data2),(data3)(...)(#)
msg = CMD_PID_SPEED + "," + \
str(0) + "," + \
str(0) + "," + \
str(0) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
def pushButton_controlSet_onClicked(self):
enableArduino = self.ui.checkBox_en_arduino.checkState()
print(enableArduino)
enableCV = self.ui.checkBox_en_cv.checkState()
print(enableCV)
#(module),(data1)(data2),(data3)(...)(#)
msg = str(STARTED) + "," + \
str(enableArduino) + "," + \
str(enableCV) + "#"
# Sending UDP packets...
if (self.clientUDP != None):
self.clientUDP.putMessage(msg)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
myapp = mainWindow()
myapp.show()
sys.exit(app.exec_()) | 0.227298 | 0.047914 |
from typing import List
from pandas import read_excel, DataFrame, Series, notnull, concat, isnull, \
ExcelFile
from stringcase import snakecase
from survey.constants import CATEGORY_SPLITTER
from survey.surveys.metadata import QuestionMetadata, AttributeMetadata
from survey.surveys.survey_creators import SurveyCreator
class PollfishCreator(SurveyCreator):
def read_survey_data(self):
"""
Read the raw survey data file and do any custom pre-cleaning.
"""
data = read_excel(self.survey_data_fn, sheet_name='Individuals')
# fill in blank columns
new_cols = []
for col in data.columns:
if col.startswith('Unnamed: '):
new_cols.append(new_cols[-1])
else:
new_cols.append(col)
data.columns = new_cols
# replace category values in original survey data
questions = read_excel(self.metadata_fn, sheet_name='questions')
attributes = read_excel(self.metadata_fn, sheet_name='attributes')
questions = concat([questions, attributes])
orders = read_excel(self.metadata_fn, sheet_name='orders')
for _, question_data in questions.iterrows():
category_name = question_data['categories']
if isnull(category_name):
continue
question_cats = orders.loc[orders['category'] == category_name]
question_cats = question_cats.dropna(subset=['replace_value'])
if not len(question_cats):
continue
to_replace = question_cats.set_index('value')[
'replace_value'].to_dict()
type_name = question_data['type_name']
if isnull(question_data['repeat']):
if type_name in (
'SingleChoice', 'SingleChoiceQuestion',
'Likert', 'LikertQuestion',
'SingleCategory', 'SingleCategoryAttribute',
'MultiChoice', 'MultiChoiceQuestion'
):
data[question_data['text']] = data[
question_data['text']].replace(to_replace=to_replace)
else:
raise TypeError(f'Cannot do value replacement '
f'for type {type_name}')
else:
raise TypeError(f'Cannot do value replacement for repeat '
f'questions')
# pre-clean
if self.pre_clean is not None:
data = self.pre_clean(data)
self.survey_data = data
def read_metadata(self):
"""
Read the question, attribute and order metadata from the Excel
metadata file.
"""
metadata = ExcelFile(self.metadata_fn)
# read metadata
questions_metadata = read_excel(metadata, 'questions')
attributes_metadata = read_excel(metadata, 'attributes')
orders_metadata = read_excel(metadata, 'orders')
# replace `value` with `replace_value` where applicable
orders_metadata['value'] = orders_metadata.apply(
lambda row: row['replace_value'] if notnull(row['replace_value'])
else row['value'],
axis=1
)
# filter to unique(category, value)
orders_metadata['value'] = orders_metadata['value'].astype(str)
# convert to strings
orders_metadata = orders_metadata.drop_duplicates(
subset=['category', 'value'])
# filter to specified survey
if None not in (self.survey_id_col, self.survey_id):
questions_metadata = self._filter_to_survey(questions_metadata)
attributes_metadata = self._filter_to_survey(attributes_metadata)
orders_metadata = self._filter_to_survey(orders_metadata)
# check for clashes in question, attribute and category names
category_names = sorted(orders_metadata['category'].unique())
q_name_errors = []
for q_name in sorted(questions_metadata['name'].unique()):
if q_name in category_names:
q_name_errors.append(q_name)
if q_name_errors:
raise ValueError(
f'The following categories clash with question names. '
f'Rename questions or categories.\n{q_name_errors}'
)
a_name_errors = []
for a_name in sorted(attributes_metadata['name'].unique()):
if a_name in category_names:
a_name_errors.append(a_name)
if a_name_errors:
raise ValueError(
f'The following categories clash with attribute names. '
f'Rename attributes or categories.\n{a_name_errors}'
)
# create ordered choices for questions with shared choices
for meta in (attributes_metadata, questions_metadata):
for idx, row in meta.iterrows():
if notnull(row['categories']):
q_name = row['name']
order_value = row['categories']
if q_name == order_value:
continue # already assigned to the question
ordered_choices = orders_metadata[
orders_metadata['category'] == order_value
].copy()
ordered_choices['category'] = q_name
orders_metadata = concat([orders_metadata, ordered_choices])
# set member variables
self.questions_metadata = questions_metadata
self.attributes_metadata = attributes_metadata
self.orders_metadata = orders_metadata
def _get_single_column_data(
self,
question_metadata: QuestionMetadata
) -> Series:
"""
Find a single column using the QuestionMetadata and return as a Series.
"""
data = self.survey_data[question_metadata.text]
if (
question_metadata.type_name in (
'SingleChoice', 'SingleChoiceQuestion'
) and
question_metadata.text in self.questions_metadata[
'text'].to_list()
):
# replace other values
def replace_other(value):
if isnull(value):
return value
else:
if value in categories:
return value
else:
return 'Other'
metadata: Series = self.questions_metadata.loc[
self.questions_metadata['text'] == question_metadata.text
].iloc[0]
if 'other' in metadata.keys() and metadata['other'] == True:
# do replacement
category_name = metadata['categories']
categories = self.orders_metadata.loc[
self.orders_metadata['category'] == category_name,
'value'
].to_list()
data = data.apply(replace_other)
# update categories in orders_metadata
self.orders_metadata = self.orders_metadata.append(
Series({'category': question_metadata.name,
'value': 'Other'}),
ignore_index=True
)
return data
def _get_multi_choice_data(
self, question_metadata: QuestionMetadata
) -> Series:
# get data
data: DataFrame = self.survey_data[question_metadata.text]
# replace other values
if question_metadata.text in self.questions_metadata['text'].to_list():
# this if condition excludes repeated questions that have had their
# text changed
def replace_other(value):
if isnull(value):
return value
else:
if value in categories:
return value
else:
return 'Other'
metadata: Series = self.questions_metadata.loc[
self.questions_metadata['text'] == question_metadata.text
].iloc[0]
if 'other' in metadata.keys() and metadata['other'] == True:
# do replacement
category_name = metadata['categories']
categories = self.orders_metadata.loc[
self.orders_metadata['category'] == category_name,
'value'
].to_list()
data = data.applymap(replace_other)
# update categories in orders_metadata
self.orders_metadata = self.orders_metadata.append(
Series({'category': question_metadata.name,
'value': 'Other'}),
ignore_index=True
)
# merge multi-choice questions to single column
return data.apply(
lambda row: CATEGORY_SPLITTER.join(row.dropna().astype(str)),
axis=1
)
def _get_ranked_choice_data(
self, question_metadata: QuestionMetadata
) -> list:
column = self.survey_data[question_metadata.text]
def rank_choices(value: str):
choices_ranks = value.split(' | ')
choices = [rank_choice.split(':')[0]
for rank_choice in choices_ranks]
ranks = [int(rank_choice.split(':')[1])
for rank_choice in choices_ranks]
ranked_choices = [choice for rank, choice in
sorted(zip(ranks, choices))]
return CATEGORY_SPLITTER.join(
str(choice) for choice in ranked_choices
)
new_answers = column.map(rank_choices)
return new_answers
def convert_metadata_to_objects(self):
"""
Convert DataFrames of metadata to lists of Metadata objects.
"""
self.attribute_metadatas = AttributeMetadata.from_dataframe(
self.attributes_metadata
)
rows: List[Series] = []
for _, row in self.questions_metadata.iterrows():
if notnull(row['repeat']):
repeats = row['repeat'].split('\n')
for repeat in repeats:
if row['type_name'] in (
'Likert', 'LikertQuestion',
'SingleChoice', 'SingleChoiceQuestion',
'MultiChoice', 'MultiChoiceQuestion'
):
new_q_meta = row.copy(deep=True)
new_q_meta['name'] = (
row['name'] + '__' +
snakecase(repeat.title().replace(' ', ''))
)
new_q_meta['text'] = row['text'] + '\n' + repeat
rows.append(new_q_meta)
else:
print(row['name'])
raise TypeError(row['type_name'])
else:
rows.append(row)
self.question_metadatas = QuestionMetadata.from_dataframe(
DataFrame(rows)
)
def _create_repeated_single_column(self, metadata: Series):
text = metadata['text']
column = self.survey_data.loc[:, text]
def create_cols_data(val: str):
data_dict = {}
if notnull(val):
repeats_responses = val.split(' | ')
for repeat_response in repeats_responses:
repeat, response = repeat_response.split(':')
data_dict[f'{text}\n{repeat}'] = response
return data_dict
data = DataFrame(column.map(create_cols_data).to_list())
data.index = self.survey_data.index
self.survey_data = concat([self.survey_data, data], axis=1)
self.survey_data = self.survey_data.drop(text, axis=1)
def _create_repeated_multi_column(self, metadata: Series):
text = metadata['text']
column: Series = self.survey_data.loc[:, text]
new_datas = []
for ix, value in column.items():
if isnull(value):
continue
repeats_responses = value.split(' | ')
for repeat_responses in repeats_responses:
repeat, str_responses = repeat_responses.split(':')
responses = str_responses.split(',')
for response in responses:
new_datas.append({
'index': ix,
'question': f'{text}\n{repeat}\n{response}',
'response': response
})
new_data = DataFrame(new_datas)
pt = new_data.groupby([
'index', 'question'])['response'].first().unstack('question')
pt.columns = [
'\n'.join(column.split('\n')[: -1])
for column in pt.columns
]
self.survey_data = concat([self.survey_data, pt], axis=1)
self.survey_data = self.survey_data.drop(text, axis=1)
def clean_survey_data(self):
survey_data = self.survey_data
new_survey_data = DataFrame()
# copy attribute columns to new dataframe
for amd in self.attribute_metadatas:
new_survey_data[amd.text] = survey_data[amd.text]
# rename columns tagged as multiple in metadata
for _, row in self.questions_metadata.iterrows():
if notnull(row['repeat']):
if row['type_name'] in (
'Likert', 'LikertQuestion',
'SingleChoice', 'SingleChoiceQuestion'
):
self._create_repeated_single_column(row)
elif row['type_name'] in (
'MultiChoice', 'MultiChoiceQuestion'
):
self._create_repeated_multi_column(row)
else:
raise TypeError(f"Can't clean repeated {row['type_name']}")
# create new columns
for qmd in self.question_metadatas:
if qmd.type_name not in ('MultiChoice', 'RankedChoice'):
new_survey_data[qmd.text] = self._get_single_column_data(qmd)
elif qmd.type_name == 'MultiChoice':
new_survey_data[qmd.text] = self._get_multi_choice_data(qmd)
elif qmd.type_name == 'RankedChoice':
new_survey_data[qmd.text] = self._get_ranked_choice_data(qmd)
# set index of respondent id
new_survey_data.index = self.survey_data['ID']
self.survey_data = new_survey_data | survey/surveys/survey_creators/pollfish_creator.py | from typing import List
from pandas import read_excel, DataFrame, Series, notnull, concat, isnull, \
ExcelFile
from stringcase import snakecase
from survey.constants import CATEGORY_SPLITTER
from survey.surveys.metadata import QuestionMetadata, AttributeMetadata
from survey.surveys.survey_creators import SurveyCreator
class PollfishCreator(SurveyCreator):
def read_survey_data(self):
"""
Read the raw survey data file and do any custom pre-cleaning.
"""
data = read_excel(self.survey_data_fn, sheet_name='Individuals')
# fill in blank columns
new_cols = []
for col in data.columns:
if col.startswith('Unnamed: '):
new_cols.append(new_cols[-1])
else:
new_cols.append(col)
data.columns = new_cols
# replace category values in original survey data
questions = read_excel(self.metadata_fn, sheet_name='questions')
attributes = read_excel(self.metadata_fn, sheet_name='attributes')
questions = concat([questions, attributes])
orders = read_excel(self.metadata_fn, sheet_name='orders')
for _, question_data in questions.iterrows():
category_name = question_data['categories']
if isnull(category_name):
continue
question_cats = orders.loc[orders['category'] == category_name]
question_cats = question_cats.dropna(subset=['replace_value'])
if not len(question_cats):
continue
to_replace = question_cats.set_index('value')[
'replace_value'].to_dict()
type_name = question_data['type_name']
if isnull(question_data['repeat']):
if type_name in (
'SingleChoice', 'SingleChoiceQuestion',
'Likert', 'LikertQuestion',
'SingleCategory', 'SingleCategoryAttribute',
'MultiChoice', 'MultiChoiceQuestion'
):
data[question_data['text']] = data[
question_data['text']].replace(to_replace=to_replace)
else:
raise TypeError(f'Cannot do value replacement '
f'for type {type_name}')
else:
raise TypeError(f'Cannot do value replacement for repeat '
f'questions')
# pre-clean
if self.pre_clean is not None:
data = self.pre_clean(data)
self.survey_data = data
def read_metadata(self):
"""
Read the question, attribute and order metadata from the Excel
metadata file.
"""
metadata = ExcelFile(self.metadata_fn)
# read metadata
questions_metadata = read_excel(metadata, 'questions')
attributes_metadata = read_excel(metadata, 'attributes')
orders_metadata = read_excel(metadata, 'orders')
# replace `value` with `replace_value` where applicable
orders_metadata['value'] = orders_metadata.apply(
lambda row: row['replace_value'] if notnull(row['replace_value'])
else row['value'],
axis=1
)
# filter to unique(category, value)
orders_metadata['value'] = orders_metadata['value'].astype(str)
# convert to strings
orders_metadata = orders_metadata.drop_duplicates(
subset=['category', 'value'])
# filter to specified survey
if None not in (self.survey_id_col, self.survey_id):
questions_metadata = self._filter_to_survey(questions_metadata)
attributes_metadata = self._filter_to_survey(attributes_metadata)
orders_metadata = self._filter_to_survey(orders_metadata)
# check for clashes in question, attribute and category names
category_names = sorted(orders_metadata['category'].unique())
q_name_errors = []
for q_name in sorted(questions_metadata['name'].unique()):
if q_name in category_names:
q_name_errors.append(q_name)
if q_name_errors:
raise ValueError(
f'The following categories clash with question names. '
f'Rename questions or categories.\n{q_name_errors}'
)
a_name_errors = []
for a_name in sorted(attributes_metadata['name'].unique()):
if a_name in category_names:
a_name_errors.append(a_name)
if a_name_errors:
raise ValueError(
f'The following categories clash with attribute names. '
f'Rename attributes or categories.\n{a_name_errors}'
)
# create ordered choices for questions with shared choices
for meta in (attributes_metadata, questions_metadata):
for idx, row in meta.iterrows():
if notnull(row['categories']):
q_name = row['name']
order_value = row['categories']
if q_name == order_value:
continue # already assigned to the question
ordered_choices = orders_metadata[
orders_metadata['category'] == order_value
].copy()
ordered_choices['category'] = q_name
orders_metadata = concat([orders_metadata, ordered_choices])
# set member variables
self.questions_metadata = questions_metadata
self.attributes_metadata = attributes_metadata
self.orders_metadata = orders_metadata
def _get_single_column_data(
self,
question_metadata: QuestionMetadata
) -> Series:
"""
Find a single column using the QuestionMetadata and return as a Series.
"""
data = self.survey_data[question_metadata.text]
if (
question_metadata.type_name in (
'SingleChoice', 'SingleChoiceQuestion'
) and
question_metadata.text in self.questions_metadata[
'text'].to_list()
):
# replace other values
def replace_other(value):
if isnull(value):
return value
else:
if value in categories:
return value
else:
return 'Other'
metadata: Series = self.questions_metadata.loc[
self.questions_metadata['text'] == question_metadata.text
].iloc[0]
if 'other' in metadata.keys() and metadata['other'] == True:
# do replacement
category_name = metadata['categories']
categories = self.orders_metadata.loc[
self.orders_metadata['category'] == category_name,
'value'
].to_list()
data = data.apply(replace_other)
# update categories in orders_metadata
self.orders_metadata = self.orders_metadata.append(
Series({'category': question_metadata.name,
'value': 'Other'}),
ignore_index=True
)
return data
def _get_multi_choice_data(
self, question_metadata: QuestionMetadata
) -> Series:
# get data
data: DataFrame = self.survey_data[question_metadata.text]
# replace other values
if question_metadata.text in self.questions_metadata['text'].to_list():
# this if condition excludes repeated questions that have had their
# text changed
def replace_other(value):
if isnull(value):
return value
else:
if value in categories:
return value
else:
return 'Other'
metadata: Series = self.questions_metadata.loc[
self.questions_metadata['text'] == question_metadata.text
].iloc[0]
if 'other' in metadata.keys() and metadata['other'] == True:
# do replacement
category_name = metadata['categories']
categories = self.orders_metadata.loc[
self.orders_metadata['category'] == category_name,
'value'
].to_list()
data = data.applymap(replace_other)
# update categories in orders_metadata
self.orders_metadata = self.orders_metadata.append(
Series({'category': question_metadata.name,
'value': 'Other'}),
ignore_index=True
)
# merge multi-choice questions to single column
return data.apply(
lambda row: CATEGORY_SPLITTER.join(row.dropna().astype(str)),
axis=1
)
def _get_ranked_choice_data(
self, question_metadata: QuestionMetadata
) -> list:
column = self.survey_data[question_metadata.text]
def rank_choices(value: str):
choices_ranks = value.split(' | ')
choices = [rank_choice.split(':')[0]
for rank_choice in choices_ranks]
ranks = [int(rank_choice.split(':')[1])
for rank_choice in choices_ranks]
ranked_choices = [choice for rank, choice in
sorted(zip(ranks, choices))]
return CATEGORY_SPLITTER.join(
str(choice) for choice in ranked_choices
)
new_answers = column.map(rank_choices)
return new_answers
def convert_metadata_to_objects(self):
"""
Convert DataFrames of metadata to lists of Metadata objects.
"""
self.attribute_metadatas = AttributeMetadata.from_dataframe(
self.attributes_metadata
)
rows: List[Series] = []
for _, row in self.questions_metadata.iterrows():
if notnull(row['repeat']):
repeats = row['repeat'].split('\n')
for repeat in repeats:
if row['type_name'] in (
'Likert', 'LikertQuestion',
'SingleChoice', 'SingleChoiceQuestion',
'MultiChoice', 'MultiChoiceQuestion'
):
new_q_meta = row.copy(deep=True)
new_q_meta['name'] = (
row['name'] + '__' +
snakecase(repeat.title().replace(' ', ''))
)
new_q_meta['text'] = row['text'] + '\n' + repeat
rows.append(new_q_meta)
else:
print(row['name'])
raise TypeError(row['type_name'])
else:
rows.append(row)
self.question_metadatas = QuestionMetadata.from_dataframe(
DataFrame(rows)
)
def _create_repeated_single_column(self, metadata: Series):
text = metadata['text']
column = self.survey_data.loc[:, text]
def create_cols_data(val: str):
data_dict = {}
if notnull(val):
repeats_responses = val.split(' | ')
for repeat_response in repeats_responses:
repeat, response = repeat_response.split(':')
data_dict[f'{text}\n{repeat}'] = response
return data_dict
data = DataFrame(column.map(create_cols_data).to_list())
data.index = self.survey_data.index
self.survey_data = concat([self.survey_data, data], axis=1)
self.survey_data = self.survey_data.drop(text, axis=1)
def _create_repeated_multi_column(self, metadata: Series):
text = metadata['text']
column: Series = self.survey_data.loc[:, text]
new_datas = []
for ix, value in column.items():
if isnull(value):
continue
repeats_responses = value.split(' | ')
for repeat_responses in repeats_responses:
repeat, str_responses = repeat_responses.split(':')
responses = str_responses.split(',')
for response in responses:
new_datas.append({
'index': ix,
'question': f'{text}\n{repeat}\n{response}',
'response': response
})
new_data = DataFrame(new_datas)
pt = new_data.groupby([
'index', 'question'])['response'].first().unstack('question')
pt.columns = [
'\n'.join(column.split('\n')[: -1])
for column in pt.columns
]
self.survey_data = concat([self.survey_data, pt], axis=1)
self.survey_data = self.survey_data.drop(text, axis=1)
def clean_survey_data(self):
survey_data = self.survey_data
new_survey_data = DataFrame()
# copy attribute columns to new dataframe
for amd in self.attribute_metadatas:
new_survey_data[amd.text] = survey_data[amd.text]
# rename columns tagged as multiple in metadata
for _, row in self.questions_metadata.iterrows():
if notnull(row['repeat']):
if row['type_name'] in (
'Likert', 'LikertQuestion',
'SingleChoice', 'SingleChoiceQuestion'
):
self._create_repeated_single_column(row)
elif row['type_name'] in (
'MultiChoice', 'MultiChoiceQuestion'
):
self._create_repeated_multi_column(row)
else:
raise TypeError(f"Can't clean repeated {row['type_name']}")
# create new columns
for qmd in self.question_metadatas:
if qmd.type_name not in ('MultiChoice', 'RankedChoice'):
new_survey_data[qmd.text] = self._get_single_column_data(qmd)
elif qmd.type_name == 'MultiChoice':
new_survey_data[qmd.text] = self._get_multi_choice_data(qmd)
elif qmd.type_name == 'RankedChoice':
new_survey_data[qmd.text] = self._get_ranked_choice_data(qmd)
# set index of respondent id
new_survey_data.index = self.survey_data['ID']
self.survey_data = new_survey_data | 0.679179 | 0.277387 |
from FeatureCloud.app.engine.app import AppState, app_state, Role, LogLevel
from federated_dca.utils import load_params, trainInstince, average_model_params
import bios
@app_state('initial', Role.BOTH)
class InitialState(AppState):
def register(self):
self.register_transition('train', Role.BOTH)
def run(self):
if self.is_coordinator:
self.config = bios.read('/mnt/input/config.yml')['fc_dca']
train_instince = trainInstince(self.config)
self.log('Send initial Model to Clients')
self.broadcast_data(train_instince.model.state_dict())
init_model_state = self.await_data()
self.store('train_instince', train_instince)
return 'train'
else:
self.config = bios.read('/mnt/input/config.yml')['fc_dca']
train_instince = trainInstince(self.config)
init_model_state = self.await_data()
self.log(f'Received initial Model state')
train_instince.model.load_state_dict(init_model_state)
self.store('train_instince', train_instince)
return 'train'
@app_state('train', Role.BOTH)
class TrainState(AppState):
def register(self):
self.register_transition('aggregate', Role.COORDINATOR)
self.register_transition('obtain', Role.PARTICIPANT)
self.register_transition('terminal')
def run(self):
train_instince = self.load('train_instince')
train_instince.train(self.update, self.log, self.id)
model_weights = train_instince.get_weights()
self.log(f'Send Model weights')
self.send_data_to_coordinator(model_weights)
if train_instince.finished_training:
train_instince.finish()
return 'terminal'
elif self.is_coordinator:
return 'aggregate'
else:
return 'obtain'
@app_state('aggregate', Role.COORDINATOR)
class GlobalAggregate(AppState):
def register(self):
self.register_transition('obtain', Role.COORDINATOR)
def run(self):
model_states = self.gather_data()
self.log('Recived Model weights')
model_state = average_model_params(model_states)
self.log('Send updated Model weights')
self.broadcast_data(model_state)
return 'obtain'
@app_state('obtain', Role.BOTH)
class LocalUpdate(AppState):
def register(self):
self.register_transition('train', Role.BOTH)
def run(self):
updated_weights = self.await_data()
self.log(f'{self.id} received updated Model weights')
train_instince = self.load('train_instince')
train_instince.set_weights(updated_weights)
return 'train' | federated_dca/app.py | from FeatureCloud.app.engine.app import AppState, app_state, Role, LogLevel
from federated_dca.utils import load_params, trainInstince, average_model_params
import bios
@app_state('initial', Role.BOTH)
class InitialState(AppState):
def register(self):
self.register_transition('train', Role.BOTH)
def run(self):
if self.is_coordinator:
self.config = bios.read('/mnt/input/config.yml')['fc_dca']
train_instince = trainInstince(self.config)
self.log('Send initial Model to Clients')
self.broadcast_data(train_instince.model.state_dict())
init_model_state = self.await_data()
self.store('train_instince', train_instince)
return 'train'
else:
self.config = bios.read('/mnt/input/config.yml')['fc_dca']
train_instince = trainInstince(self.config)
init_model_state = self.await_data()
self.log(f'Received initial Model state')
train_instince.model.load_state_dict(init_model_state)
self.store('train_instince', train_instince)
return 'train'
@app_state('train', Role.BOTH)
class TrainState(AppState):
def register(self):
self.register_transition('aggregate', Role.COORDINATOR)
self.register_transition('obtain', Role.PARTICIPANT)
self.register_transition('terminal')
def run(self):
train_instince = self.load('train_instince')
train_instince.train(self.update, self.log, self.id)
model_weights = train_instince.get_weights()
self.log(f'Send Model weights')
self.send_data_to_coordinator(model_weights)
if train_instince.finished_training:
train_instince.finish()
return 'terminal'
elif self.is_coordinator:
return 'aggregate'
else:
return 'obtain'
@app_state('aggregate', Role.COORDINATOR)
class GlobalAggregate(AppState):
def register(self):
self.register_transition('obtain', Role.COORDINATOR)
def run(self):
model_states = self.gather_data()
self.log('Recived Model weights')
model_state = average_model_params(model_states)
self.log('Send updated Model weights')
self.broadcast_data(model_state)
return 'obtain'
@app_state('obtain', Role.BOTH)
class LocalUpdate(AppState):
def register(self):
self.register_transition('train', Role.BOTH)
def run(self):
updated_weights = self.await_data()
self.log(f'{self.id} received updated Model weights')
train_instince = self.load('train_instince')
train_instince.set_weights(updated_weights)
return 'train' | 0.564219 | 0.099558 |
import sys
import numpy as np
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
import scipy.sparse
import math
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from seldon.pipeline.pandas_pipelines import BasePandasEstimator
from collections import OrderedDict
import io
from sklearn.utils import check_X_y
from sklearn.utils import check_array
from sklearn.base import BaseEstimator,ClassifierMixin
import logging
logger = logging.getLogger(__name__)
class XGBoostClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):
"""
Wrapper for XGBoost classifier with pandas support
XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py
Parameters
----------
target : str
Target column
target_readable : str
More descriptive version of target variable
included : list str, optional
columns to include
excluded : list str, optional
columns to exclude
id_map : dict (int,str), optional
map of class ids to high level names
num_iterations : int
number of iterations over data to run vw
raw_predictions : str
file to push raw predictions from vw to
max_depth : int
Maximum tree depth for base learners.
learning_rate : float
Boosting learning rate (xgb's "eta")
n_estimators : int
Number of boosted trees to fit.
silent : boolean
Whether to print messages while running boosting.
objective : string
Specify the learning task and the corresponding learning objective.
nthread : int
Number of parallel threads used to run xgboost.
gamma : float
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : int
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : int
Maximum delta step we allow each tree's weight estimation to be.
subsample : float
Subsample ratio of the training instance.
colsample_bytree : float
Subsample ratio of columns when constructing each tree.
colsample_bylevel : float
Subsample ratio of columns for each split, in each level.
reg_alpha : float (xgb's alpha)
L2 regularization term on weights
reg_lambda : float (xgb's lambda)
L1 regularization term on weights
scale_pos_weight : float
Balancing of positive and negative weights.
base_score:
The initial prediction score of all instances, global bias.
seed : int
Random number seed.
missing : float, optional
Value in the data which needs to be present as a missing value. If
None, defaults to np.nan.
"""
def __init__(self, target=None, target_readable=None,included=None,excluded=None,clf=None,
id_map={},vectorizer=None,svmlight_feature=None,
max_depth=3, learning_rate=0.1, n_estimators=100,
silent=True, objective="reg:linear",
nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None):
super(XGBoostClassifier, self).__init__(target,target_readable,included,excluded,id_map)
self.vectorizer = vectorizer
self.clf = clf
self.max_depth=max_depth
self.learning_rate=learning_rate
self.n_estimators=n_estimators
self.silent=silent
self.objective=objective
self.nthread=nthread
self.gamma=gamma
self.min_child_weight=min_child_weight
self.max_delta_step=max_delta_step
self.subsample=subsample
self.colsample_bytree=colsample_bytree
self.colsample_bylevel=colsample_bylevel
self.reg_alpha=reg_alpha
self.reg_lambda=reg_lambda
self.scale_pos_weight=scale_pos_weight
self.base_score=base_score
self.seed=seed
self.missing=missing
#self.params = { "max_depth":max_depth,"learning_rate":learning_rate,"n_estimators":n_estimators,
# "silent":silent, "objective":objective,
# "nthread":nthread, "gamma":gamma, "min_child_weight":min_child_weight, "max_delta_step":max_delta_step,
# "subsample":subsample, "colsample_bytree":colsample_bytree, "colsample_bylevel":colsample_bylevel,
# "reg_alpha":reg_alpha, "reg_lambda":reg_lambda, "scale_pos_weight":scale_pos_weight,
# "base_score":base_score, "seed":seed, "missing":missing }
self.svmlight_feature = svmlight_feature
def _to_svmlight(self,row):
"""Convert a dataframe row containing a dict of id:val to svmlight line
"""
if self.target in row:
line = str(row[self.target])
else:
line = "1"
d = row[self.svmlight_feature]
for (k,v) in d:
line += (" "+str(k)+":"+str(v))
return line
def _load_from_svmlight(self,df):
"""Load data from dataframe with dict of id:val into numpy matrix
"""
logger.info("loading from dictionary feature")
df_svm = df.apply(self._to_svmlight,axis=1)
output = io.BytesIO()
df_svm.to_csv(output,index=False,header=False)
output.seek(0)
(X,y) = load_svmlight_file(output,zero_based=False)
output.close()
return (X,y)
def fit(self,X,y=None):
"""Fit a model:
Parameters
----------
X : pandas dataframe or array-like
training samples. If pandas dataframe can handle dict of feature in one column or cnvert a set of columns
y : array like, required for array-like X and not used presently for pandas dataframe
class labels
Returns
-------
self: object
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
if not self.target_readable is None:
self.create_class_id_map(df,self.target,self.target_readable)
(X,y) = self._load_from_svmlight(df)
num_class = len(np.unique(y))
else:
(X,y,self.vectorizer) = self.convert_numpy(df)
num_class = len(y.unique())
else:
check_X_y(X,y)
num_class = len(np.unique(y))
self.clf = xgb.XGBClassifier(max_depth=self.max_depth, learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
silent=self.silent, objective=self.objective,
nthread=self.nthread, gamma=self.gamma,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
subsample=self.subsample, colsample_bytree=self.colsample_bytree,
colsample_bylevel=self.colsample_bylevel,
reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda,
scale_pos_weight=self.scale_pos_weight,
base_score=self.base_score, seed=self.seed, missing=self.missing)
logger.info(self.clf.get_params(deep=True))
self.clf.fit(X,y,verbose=True)
return self
def predict_proba(self, X):
"""
Returns class probability estimates for the given test data.
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
(X,_) = self._load_from_svmlight(df)
else:
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict_proba(X) | python/seldon/xgb.py | import sys
import numpy as np
import xgboost as xgb
from sklearn.datasets import load_svmlight_file
import scipy.sparse
import math
import pandas as pd
from sklearn.feature_extraction import DictVectorizer
from seldon.pipeline.pandas_pipelines import BasePandasEstimator
from collections import OrderedDict
import io
from sklearn.utils import check_X_y
from sklearn.utils import check_array
from sklearn.base import BaseEstimator,ClassifierMixin
import logging
logger = logging.getLogger(__name__)
class XGBoostClassifier(BasePandasEstimator,BaseEstimator,ClassifierMixin):
"""
Wrapper for XGBoost classifier with pandas support
XGBoost specific arguments follow https://github.com/dmlc/xgboost/blob/master/python-package/xgboost/sklearn.py
Parameters
----------
target : str
Target column
target_readable : str
More descriptive version of target variable
included : list str, optional
columns to include
excluded : list str, optional
columns to exclude
id_map : dict (int,str), optional
map of class ids to high level names
num_iterations : int
number of iterations over data to run vw
raw_predictions : str
file to push raw predictions from vw to
max_depth : int
Maximum tree depth for base learners.
learning_rate : float
Boosting learning rate (xgb's "eta")
n_estimators : int
Number of boosted trees to fit.
silent : boolean
Whether to print messages while running boosting.
objective : string
Specify the learning task and the corresponding learning objective.
nthread : int
Number of parallel threads used to run xgboost.
gamma : float
Minimum loss reduction required to make a further partition on a leaf node of the tree.
min_child_weight : int
Minimum sum of instance weight(hessian) needed in a child.
max_delta_step : int
Maximum delta step we allow each tree's weight estimation to be.
subsample : float
Subsample ratio of the training instance.
colsample_bytree : float
Subsample ratio of columns when constructing each tree.
colsample_bylevel : float
Subsample ratio of columns for each split, in each level.
reg_alpha : float (xgb's alpha)
L2 regularization term on weights
reg_lambda : float (xgb's lambda)
L1 regularization term on weights
scale_pos_weight : float
Balancing of positive and negative weights.
base_score:
The initial prediction score of all instances, global bias.
seed : int
Random number seed.
missing : float, optional
Value in the data which needs to be present as a missing value. If
None, defaults to np.nan.
"""
def __init__(self, target=None, target_readable=None,included=None,excluded=None,clf=None,
id_map={},vectorizer=None,svmlight_feature=None,
max_depth=3, learning_rate=0.1, n_estimators=100,
silent=True, objective="reg:linear",
nthread=-1, gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, seed=0, missing=None):
super(XGBoostClassifier, self).__init__(target,target_readable,included,excluded,id_map)
self.vectorizer = vectorizer
self.clf = clf
self.max_depth=max_depth
self.learning_rate=learning_rate
self.n_estimators=n_estimators
self.silent=silent
self.objective=objective
self.nthread=nthread
self.gamma=gamma
self.min_child_weight=min_child_weight
self.max_delta_step=max_delta_step
self.subsample=subsample
self.colsample_bytree=colsample_bytree
self.colsample_bylevel=colsample_bylevel
self.reg_alpha=reg_alpha
self.reg_lambda=reg_lambda
self.scale_pos_weight=scale_pos_weight
self.base_score=base_score
self.seed=seed
self.missing=missing
#self.params = { "max_depth":max_depth,"learning_rate":learning_rate,"n_estimators":n_estimators,
# "silent":silent, "objective":objective,
# "nthread":nthread, "gamma":gamma, "min_child_weight":min_child_weight, "max_delta_step":max_delta_step,
# "subsample":subsample, "colsample_bytree":colsample_bytree, "colsample_bylevel":colsample_bylevel,
# "reg_alpha":reg_alpha, "reg_lambda":reg_lambda, "scale_pos_weight":scale_pos_weight,
# "base_score":base_score, "seed":seed, "missing":missing }
self.svmlight_feature = svmlight_feature
def _to_svmlight(self,row):
"""Convert a dataframe row containing a dict of id:val to svmlight line
"""
if self.target in row:
line = str(row[self.target])
else:
line = "1"
d = row[self.svmlight_feature]
for (k,v) in d:
line += (" "+str(k)+":"+str(v))
return line
def _load_from_svmlight(self,df):
"""Load data from dataframe with dict of id:val into numpy matrix
"""
logger.info("loading from dictionary feature")
df_svm = df.apply(self._to_svmlight,axis=1)
output = io.BytesIO()
df_svm.to_csv(output,index=False,header=False)
output.seek(0)
(X,y) = load_svmlight_file(output,zero_based=False)
output.close()
return (X,y)
def fit(self,X,y=None):
"""Fit a model:
Parameters
----------
X : pandas dataframe or array-like
training samples. If pandas dataframe can handle dict of feature in one column or cnvert a set of columns
y : array like, required for array-like X and not used presently for pandas dataframe
class labels
Returns
-------
self: object
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
if not self.target_readable is None:
self.create_class_id_map(df,self.target,self.target_readable)
(X,y) = self._load_from_svmlight(df)
num_class = len(np.unique(y))
else:
(X,y,self.vectorizer) = self.convert_numpy(df)
num_class = len(y.unique())
else:
check_X_y(X,y)
num_class = len(np.unique(y))
self.clf = xgb.XGBClassifier(max_depth=self.max_depth, learning_rate=self.learning_rate,
n_estimators=self.n_estimators,
silent=self.silent, objective=self.objective,
nthread=self.nthread, gamma=self.gamma,
min_child_weight=self.min_child_weight,
max_delta_step=self.max_delta_step,
subsample=self.subsample, colsample_bytree=self.colsample_bytree,
colsample_bylevel=self.colsample_bylevel,
reg_alpha=self.reg_alpha, reg_lambda=self.reg_lambda,
scale_pos_weight=self.scale_pos_weight,
base_score=self.base_score, seed=self.seed, missing=self.missing)
logger.info(self.clf.get_params(deep=True))
self.clf.fit(X,y,verbose=True)
return self
def predict_proba(self, X):
"""
Returns class probability estimates for the given test data.
X : pandas dataframe or array-like
Test samples
Returns
-------
proba : array-like, shape = (n_samples, n_outputs)
Class probability estimates.
"""
if isinstance(X,pd.DataFrame):
df = X
if not self.svmlight_feature is None:
(X,_) = self._load_from_svmlight(df)
else:
(X,_,_) = self.convert_numpy(df)
else:
check_array(X)
return self.clf.predict_proba(X) | 0.685002 | 0.445409 |
from __future__ import print_function
import argparse
import sys
import os
import subprocess
import time
import xml.etree.ElementTree as ET
from datetime import datetime
# helpers
def get_substring(s, leader, trailer):
end_of_leader = s.index(leader) + len(leader)
start_of_trailer = s.index(trailer, end_of_leader)
return s[end_of_leader:start_of_trailer]
def now():
return datetime.now().strftime('[%y.%m.%d %H:%M:%S] ')
def printunbuff(string):
print(string, flush=True, file=sys.stderr)
def check():
found = 0
if (args.severity == 0):
# don't want to break on severity so return.
return found
with open(args.summaryreport) as f:
datafile = f.readlines()
for line in datafile:
if 'numflawssev' in line:
# print('numflawssev processing')
# print(line)
if not('numflawssev5="0"' in line):
# print('at least one sev 5')
# print(line)
found = 1
if (not('numflawssev4="0"' in line) and (args.severity <= 4)):
# print('at least one sev 4')
# print(line)
found = 1
if (not('numflawssev3="0"' in line) and (args.severity <= 3)):
# print('at least one sev 3')
# print(line)
found = 1
elif 'severity_desc' in line:
if ('severity_desc="Very High"' in line):
# print('at least one very high sca finding')
# print(line)
found = 1
elif (('severity_desc="High"' in line) and (args.severity <= 4)):
# print('at least one high sca finding')
# print(line)
found = 1
elif (('severity_desc="Medium"' in line) and (args.severity <= 3)):
# print('at least one Medium sca finding')
# print(line)
found = 1
return found # Because you finished the search without finding
# args
parser = argparse.ArgumentParser(description='A Python wrapper to the Veracode Java API jar, '
'providing "check a build and break by severity" functionality',
epilog='Any additional arguments will be passed through to the API jar.',
allow_abbrev=False)
parser.add_argument('apiwrapperjar', help='File path to Veracode API Java wrapper')
parser.add_argument('vid', help='Veracode API credentials ID')
parser.add_argument('vkey', help='Veracode API credentials key')
parser.add_argument('-sr', '--summaryreport', default="./sr3.xml", help='File path to put summary report in')
parser.add_argument('-bid','--build_id', help='Build id for the build to check')
parser.add_argument('-s','--severity', type=int, default=0,
help='Severity to break the build on. 0=none, 1=info, 2=low, 3=medium, 4=high, 5=very high')
args, unparsed = parser.parse_known_args()
#print(args.severity)
#print('build id is: '+args.build_id, file=sys.stderr)
#print('vid is: '+args.vid, file=sys.stderr)
#print(args.summaryreport, file=sys.stderr)
path_to_sr = os.path.dirname(os.path.abspath(__file__))
args.summaryreport= os.path.join(path_to_sr, args.summaryreport)
#print('summary report file is: '+args.summaryreport, file=sys.stderr)
# setup
base_command = ['java', '-jar', args.apiwrapperjar, '-vid', args.vid, '-vkey', args.vkey]
command = base_command + ['-action', 'SummaryReport', '-outputfilepath',args.summaryreport, '-buildid', args.build_id]
printunbuff(now()+'Calling summary report with: '+str(command))
build_info = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
printunbuff(now()+'reply is: '+str(build_info))
fail = check()
printunbuff(now()+'Checked for flaws severity '+str(args.severity)+' and above. Fail build = '+str(fail))
sys.exit(fail) | breakbyseverity.py | from __future__ import print_function
import argparse
import sys
import os
import subprocess
import time
import xml.etree.ElementTree as ET
from datetime import datetime
# helpers
def get_substring(s, leader, trailer):
end_of_leader = s.index(leader) + len(leader)
start_of_trailer = s.index(trailer, end_of_leader)
return s[end_of_leader:start_of_trailer]
def now():
return datetime.now().strftime('[%y.%m.%d %H:%M:%S] ')
def printunbuff(string):
print(string, flush=True, file=sys.stderr)
def check():
found = 0
if (args.severity == 0):
# don't want to break on severity so return.
return found
with open(args.summaryreport) as f:
datafile = f.readlines()
for line in datafile:
if 'numflawssev' in line:
# print('numflawssev processing')
# print(line)
if not('numflawssev5="0"' in line):
# print('at least one sev 5')
# print(line)
found = 1
if (not('numflawssev4="0"' in line) and (args.severity <= 4)):
# print('at least one sev 4')
# print(line)
found = 1
if (not('numflawssev3="0"' in line) and (args.severity <= 3)):
# print('at least one sev 3')
# print(line)
found = 1
elif 'severity_desc' in line:
if ('severity_desc="Very High"' in line):
# print('at least one very high sca finding')
# print(line)
found = 1
elif (('severity_desc="High"' in line) and (args.severity <= 4)):
# print('at least one high sca finding')
# print(line)
found = 1
elif (('severity_desc="Medium"' in line) and (args.severity <= 3)):
# print('at least one Medium sca finding')
# print(line)
found = 1
return found # Because you finished the search without finding
# args
parser = argparse.ArgumentParser(description='A Python wrapper to the Veracode Java API jar, '
'providing "check a build and break by severity" functionality',
epilog='Any additional arguments will be passed through to the API jar.',
allow_abbrev=False)
parser.add_argument('apiwrapperjar', help='File path to Veracode API Java wrapper')
parser.add_argument('vid', help='Veracode API credentials ID')
parser.add_argument('vkey', help='Veracode API credentials key')
parser.add_argument('-sr', '--summaryreport', default="./sr3.xml", help='File path to put summary report in')
parser.add_argument('-bid','--build_id', help='Build id for the build to check')
parser.add_argument('-s','--severity', type=int, default=0,
help='Severity to break the build on. 0=none, 1=info, 2=low, 3=medium, 4=high, 5=very high')
args, unparsed = parser.parse_known_args()
#print(args.severity)
#print('build id is: '+args.build_id, file=sys.stderr)
#print('vid is: '+args.vid, file=sys.stderr)
#print(args.summaryreport, file=sys.stderr)
path_to_sr = os.path.dirname(os.path.abspath(__file__))
args.summaryreport= os.path.join(path_to_sr, args.summaryreport)
#print('summary report file is: '+args.summaryreport, file=sys.stderr)
# setup
base_command = ['java', '-jar', args.apiwrapperjar, '-vid', args.vid, '-vkey', args.vkey]
command = base_command + ['-action', 'SummaryReport', '-outputfilepath',args.summaryreport, '-buildid', args.build_id]
printunbuff(now()+'Calling summary report with: '+str(command))
build_info = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
printunbuff(now()+'reply is: '+str(build_info))
fail = check()
printunbuff(now()+'Checked for flaws severity '+str(args.severity)+' and above. Fail build = '+str(fail))
sys.exit(fail) | 0.132234 | 0.094093 |
import os
import re
import numpy as np
import torch
def calc_t_emb(ts, t_emb_dim):
"""
Embed time steps into a higher dimension space
"""
assert t_emb_dim % 2 == 0
half_dim = t_emb_dim // 2
t_emb = np.log(10000) / (half_dim - 1)
t_emb = torch.exp(torch.arange(half_dim) * -t_emb)
t_emb = t_emb.cuda()
t_emb = ts * t_emb
t_emb = torch.cat((torch.sin(t_emb), torch.cos(t_emb)), 1)
return t_emb
def flatten(v):
"""
Flatten a list of lists/tuples
"""
return [x for y in v for x in y]
def rescale(x):
"""
Rescale a tensor to 0-1
"""
return (x - x.min()) / (x.max() - x.min())
def find_max_epoch(path, ckpt_name):
"""
Find max epoch in path, formatted ($ckpt_name)_$epoch.pkl, such as unet_ckpt_30.pkl
"""
files = os.listdir(path)
epoch = -1
for f in files:
if len(f) <= len(ckpt_name) + 5:
continue
if f[:len(ckpt_name)] == ckpt_name and f[-4:] == '.pkl':
number = f[len(ckpt_name)+1:-4]
try:
epoch = max(epoch, int(number))
except:
continue
return epoch
def print_size(net):
"""
Print the number of parameters of a network
"""
if net is not None and isinstance(net, torch.nn.Module):
module_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in module_parameters])
print("{} Parameters: {:.6f}M".format(
net.__class__.__name__, params / 1e6), flush=True)
def std_normal(size):
"""
Generate a standard Gaussian of a given size
"""
return torch.normal(0, 1, size=size).cuda()
def sampling(net, size, T, Alpha, Alpha_bar, Sigma):
"""
Perform the complete sampling step according to p(x_0|x_T)
"""
assert len(Alpha) == T
assert len(Alpha_bar) == T
assert len(Sigma) == T
assert len(size) == 4
print('begin sampling, total steps = %s' % T)
x = std_normal(size)
with torch.no_grad():
for t in range(T-1,-1,-1):
if t % 100 == 0:
print('reverse step:', t)
ts = (t * torch.ones((size[0], 1))).cuda()
epsilon_theta = net((x,ts,))
x = (x - (1-Alpha[t])/torch.sqrt(1-Alpha_bar[t]) * epsilon_theta) / torch.sqrt(Alpha[t])
if t > 0:
x = x + Sigma[t] * std_normal(size)
return x
def training_loss(net, loss_fn, T, X, Alpha_bar):
"""
Compute the loss_fn (default is \ell_2) loss of (epsilon - epsilon_theta)
"""
B, C, H, W = X.shape
ts = torch.randint(T, size=(B,1,1,1)).cuda()
z = std_normal(X.shape)
xt = torch.sqrt(Alpha_bar[ts]) * X + torch.sqrt(1-Alpha_bar[ts]) * z
epsilon_theta = net((xt, ts.view(B,1),))
return loss_fn(epsilon_theta, z) | util.py | import os
import re
import numpy as np
import torch
def calc_t_emb(ts, t_emb_dim):
"""
Embed time steps into a higher dimension space
"""
assert t_emb_dim % 2 == 0
half_dim = t_emb_dim // 2
t_emb = np.log(10000) / (half_dim - 1)
t_emb = torch.exp(torch.arange(half_dim) * -t_emb)
t_emb = t_emb.cuda()
t_emb = ts * t_emb
t_emb = torch.cat((torch.sin(t_emb), torch.cos(t_emb)), 1)
return t_emb
def flatten(v):
"""
Flatten a list of lists/tuples
"""
return [x for y in v for x in y]
def rescale(x):
"""
Rescale a tensor to 0-1
"""
return (x - x.min()) / (x.max() - x.min())
def find_max_epoch(path, ckpt_name):
"""
Find max epoch in path, formatted ($ckpt_name)_$epoch.pkl, such as unet_ckpt_30.pkl
"""
files = os.listdir(path)
epoch = -1
for f in files:
if len(f) <= len(ckpt_name) + 5:
continue
if f[:len(ckpt_name)] == ckpt_name and f[-4:] == '.pkl':
number = f[len(ckpt_name)+1:-4]
try:
epoch = max(epoch, int(number))
except:
continue
return epoch
def print_size(net):
"""
Print the number of parameters of a network
"""
if net is not None and isinstance(net, torch.nn.Module):
module_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in module_parameters])
print("{} Parameters: {:.6f}M".format(
net.__class__.__name__, params / 1e6), flush=True)
def std_normal(size):
"""
Generate a standard Gaussian of a given size
"""
return torch.normal(0, 1, size=size).cuda()
def sampling(net, size, T, Alpha, Alpha_bar, Sigma):
"""
Perform the complete sampling step according to p(x_0|x_T)
"""
assert len(Alpha) == T
assert len(Alpha_bar) == T
assert len(Sigma) == T
assert len(size) == 4
print('begin sampling, total steps = %s' % T)
x = std_normal(size)
with torch.no_grad():
for t in range(T-1,-1,-1):
if t % 100 == 0:
print('reverse step:', t)
ts = (t * torch.ones((size[0], 1))).cuda()
epsilon_theta = net((x,ts,))
x = (x - (1-Alpha[t])/torch.sqrt(1-Alpha_bar[t]) * epsilon_theta) / torch.sqrt(Alpha[t])
if t > 0:
x = x + Sigma[t] * std_normal(size)
return x
def training_loss(net, loss_fn, T, X, Alpha_bar):
"""
Compute the loss_fn (default is \ell_2) loss of (epsilon - epsilon_theta)
"""
B, C, H, W = X.shape
ts = torch.randint(T, size=(B,1,1,1)).cuda()
z = std_normal(X.shape)
xt = torch.sqrt(Alpha_bar[ts]) * X + torch.sqrt(1-Alpha_bar[ts]) * z
epsilon_theta = net((xt, ts.view(B,1),))
return loss_fn(epsilon_theta, z) | 0.786008 | 0.561996 |